affinity-sdk 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. affinity/__init__.py +139 -0
  2. affinity/cli/__init__.py +7 -0
  3. affinity/cli/click_compat.py +27 -0
  4. affinity/cli/commands/__init__.py +1 -0
  5. affinity/cli/commands/_entity_files_dump.py +219 -0
  6. affinity/cli/commands/_list_entry_fields.py +41 -0
  7. affinity/cli/commands/_v1_parsing.py +77 -0
  8. affinity/cli/commands/company_cmds.py +2139 -0
  9. affinity/cli/commands/completion_cmd.py +33 -0
  10. affinity/cli/commands/config_cmds.py +540 -0
  11. affinity/cli/commands/entry_cmds.py +33 -0
  12. affinity/cli/commands/field_cmds.py +413 -0
  13. affinity/cli/commands/interaction_cmds.py +875 -0
  14. affinity/cli/commands/list_cmds.py +3152 -0
  15. affinity/cli/commands/note_cmds.py +433 -0
  16. affinity/cli/commands/opportunity_cmds.py +1174 -0
  17. affinity/cli/commands/person_cmds.py +1980 -0
  18. affinity/cli/commands/query_cmd.py +444 -0
  19. affinity/cli/commands/relationship_strength_cmds.py +62 -0
  20. affinity/cli/commands/reminder_cmds.py +595 -0
  21. affinity/cli/commands/resolve_url_cmd.py +127 -0
  22. affinity/cli/commands/session_cmds.py +84 -0
  23. affinity/cli/commands/task_cmds.py +110 -0
  24. affinity/cli/commands/version_cmd.py +29 -0
  25. affinity/cli/commands/whoami_cmd.py +36 -0
  26. affinity/cli/config.py +108 -0
  27. affinity/cli/context.py +749 -0
  28. affinity/cli/csv_utils.py +195 -0
  29. affinity/cli/date_utils.py +42 -0
  30. affinity/cli/decorators.py +77 -0
  31. affinity/cli/errors.py +28 -0
  32. affinity/cli/field_utils.py +355 -0
  33. affinity/cli/formatters.py +551 -0
  34. affinity/cli/help_json.py +283 -0
  35. affinity/cli/logging.py +100 -0
  36. affinity/cli/main.py +261 -0
  37. affinity/cli/options.py +53 -0
  38. affinity/cli/paths.py +32 -0
  39. affinity/cli/progress.py +183 -0
  40. affinity/cli/query/__init__.py +163 -0
  41. affinity/cli/query/aggregates.py +357 -0
  42. affinity/cli/query/dates.py +194 -0
  43. affinity/cli/query/exceptions.py +147 -0
  44. affinity/cli/query/executor.py +1236 -0
  45. affinity/cli/query/filters.py +248 -0
  46. affinity/cli/query/models.py +333 -0
  47. affinity/cli/query/output.py +331 -0
  48. affinity/cli/query/parser.py +619 -0
  49. affinity/cli/query/planner.py +430 -0
  50. affinity/cli/query/progress.py +270 -0
  51. affinity/cli/query/schema.py +439 -0
  52. affinity/cli/render.py +1589 -0
  53. affinity/cli/resolve.py +222 -0
  54. affinity/cli/resolvers.py +249 -0
  55. affinity/cli/results.py +308 -0
  56. affinity/cli/runner.py +218 -0
  57. affinity/cli/serialization.py +65 -0
  58. affinity/cli/session_cache.py +276 -0
  59. affinity/cli/types.py +70 -0
  60. affinity/client.py +771 -0
  61. affinity/clients/__init__.py +19 -0
  62. affinity/clients/http.py +3664 -0
  63. affinity/clients/pipeline.py +165 -0
  64. affinity/compare.py +501 -0
  65. affinity/downloads.py +114 -0
  66. affinity/exceptions.py +615 -0
  67. affinity/filters.py +1128 -0
  68. affinity/hooks.py +198 -0
  69. affinity/inbound_webhooks.py +302 -0
  70. affinity/models/__init__.py +163 -0
  71. affinity/models/entities.py +798 -0
  72. affinity/models/pagination.py +513 -0
  73. affinity/models/rate_limit_snapshot.py +48 -0
  74. affinity/models/secondary.py +413 -0
  75. affinity/models/types.py +663 -0
  76. affinity/policies.py +40 -0
  77. affinity/progress.py +22 -0
  78. affinity/py.typed +0 -0
  79. affinity/services/__init__.py +42 -0
  80. affinity/services/companies.py +1286 -0
  81. affinity/services/lists.py +1892 -0
  82. affinity/services/opportunities.py +1330 -0
  83. affinity/services/persons.py +1348 -0
  84. affinity/services/rate_limits.py +173 -0
  85. affinity/services/tasks.py +193 -0
  86. affinity/services/v1_only.py +2445 -0
  87. affinity/types.py +83 -0
  88. affinity_sdk-0.9.5.dist-info/METADATA +622 -0
  89. affinity_sdk-0.9.5.dist-info/RECORD +92 -0
  90. affinity_sdk-0.9.5.dist-info/WHEEL +4 -0
  91. affinity_sdk-0.9.5.dist-info/entry_points.txt +2 -0
  92. affinity_sdk-0.9.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,3152 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import sys
5
+ import time
6
+ import warnings as stdlib_warnings
7
+ from collections.abc import Callable
8
+ from contextlib import ExitStack
9
+ from typing import Any, Literal, cast
10
+
11
+ from rich.console import Console
12
+ from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn
13
+
14
+ from affinity.filters import FilterExpression
15
+ from affinity.filters import parse as parse_filter
16
+ from affinity.models.entities import FieldMetadata, ListCreate, ListEntryWithEntity
17
+ from affinity.models.pagination import FilterStats
18
+ from affinity.models.types import ListType
19
+ from affinity.types import (
20
+ AnyFieldId,
21
+ CompanyId,
22
+ EnrichedFieldId,
23
+ FieldId,
24
+ FieldType,
25
+ ListEntryId,
26
+ ListId,
27
+ OpportunityId,
28
+ PersonId,
29
+ )
30
+
31
+ from ..click_compat import RichCommand, RichGroup, click
32
+ from ..context import CLIContext
33
+ from ..csv_utils import write_csv_to_stdout
34
+ from ..decorators import category, destructive
35
+ from ..errors import CLIError
36
+ from ..options import output_options
37
+ from ..render import format_duration
38
+ from ..resolve import (
39
+ list_all_saved_views,
40
+ list_fields_for_list,
41
+ resolve_list_selector,
42
+ resolve_saved_view,
43
+ )
44
+ from ..results import CommandContext
45
+ from ..runner import CommandOutput, run_command
46
+ from ..serialization import serialize_model_for_cli, serialize_models_for_cli
47
+
48
+
49
+ @click.group(name="list", cls=RichGroup)
50
+ def list_group() -> None:
51
+ """List commands."""
52
+
53
+
54
+ def _parse_list_type(value: str | None) -> ListType | None:
55
+ if value is None:
56
+ return None
57
+ value = value.lower()
58
+ if value in {"person", "people"}:
59
+ return ListType.PERSON
60
+ if value in {"company", "companies", "organization", "org"}:
61
+ return ListType.COMPANY
62
+ if value in {"opportunity", "opp"}:
63
+ return ListType.OPPORTUNITY
64
+ raise CLIError(f"Unknown list type: {value}", exit_code=2, error_type="usage_error")
65
+
66
+
67
+ @category("read")
68
+ @list_group.command(name="ls", cls=RichCommand)
69
+ @click.option("--type", "list_type", type=str, default=None, help="Filter by list type.")
70
+ @click.option("--page-size", "-s", type=int, default=None, help="Page size (limit).")
71
+ @click.option(
72
+ "--cursor", type=str, default=None, help="Resume from cursor (incompatible with --page-size)."
73
+ )
74
+ @click.option(
75
+ "--max-results", "--limit", "-n", type=int, default=None, help="Stop after N items total."
76
+ )
77
+ @click.option("--all", "-A", "all_pages", is_flag=True, help="Fetch all pages.")
78
+ @output_options
79
+ @click.pass_obj
80
+ def list_ls(
81
+ ctx: CLIContext,
82
+ *,
83
+ list_type: str | None,
84
+ page_size: int | None,
85
+ cursor: str | None,
86
+ max_results: int | None,
87
+ all_pages: bool,
88
+ ) -> None:
89
+ """
90
+ List all lists in the workspace.
91
+
92
+ Examples:
93
+
94
+ - `xaffinity list ls`
95
+ - `xaffinity list ls --type person`
96
+ - `xaffinity list ls --type company --all`
97
+ """
98
+
99
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
100
+ client = ctx.get_client(warnings=warnings)
101
+ lt = _parse_list_type(list_type)
102
+
103
+ if cursor is not None and page_size is not None:
104
+ raise CLIError(
105
+ "--cursor cannot be combined with --page-size.",
106
+ exit_code=2,
107
+ error_type="usage_error",
108
+ )
109
+
110
+ # Build CommandContext upfront for all return paths
111
+ ctx_modifiers: dict[str, object] = {}
112
+ if list_type:
113
+ ctx_modifiers["type"] = list_type
114
+ if page_size is not None:
115
+ ctx_modifiers["pageSize"] = page_size
116
+ if cursor is not None:
117
+ ctx_modifiers["cursor"] = cursor
118
+ if max_results is not None:
119
+ ctx_modifiers["maxResults"] = max_results
120
+ if all_pages:
121
+ ctx_modifiers["allPages"] = True
122
+
123
+ cmd_context = CommandContext(
124
+ name="list ls",
125
+ inputs={},
126
+ modifiers=ctx_modifiers,
127
+ )
128
+
129
+ pages = client.lists.pages(limit=page_size, cursor=cursor)
130
+ rows: list[dict[str, object]] = []
131
+ first_page = True
132
+
133
+ show_progress = (
134
+ ctx.progress != "never"
135
+ and not ctx.quiet
136
+ and (ctx.progress == "always" or sys.stderr.isatty())
137
+ )
138
+
139
+ with ExitStack() as stack:
140
+ progress: Progress | None = None
141
+ task_id: TaskID | None = None
142
+ if show_progress:
143
+ progress = stack.enter_context(
144
+ Progress(
145
+ TextColumn("{task.description}"),
146
+ BarColumn(),
147
+ TextColumn("{task.completed} rows"),
148
+ TimeElapsedColumn(),
149
+ console=Console(file=sys.stderr),
150
+ transient=True,
151
+ )
152
+ )
153
+ task_id = progress.add_task("Fetching", total=max_results)
154
+
155
+ for page in pages:
156
+ for idx, item in enumerate(page.data):
157
+ if lt is not None and item.type != lt:
158
+ continue
159
+ rows.append(
160
+ {
161
+ "id": int(item.id),
162
+ "name": item.name,
163
+ "type": ListType(item.type).name.lower(),
164
+ "ownerId": int(item.owner_id)
165
+ if getattr(item, "owner_id", None)
166
+ else None,
167
+ "isPublic": getattr(item, "is_public", None),
168
+ }
169
+ )
170
+ if progress and task_id is not None:
171
+ progress.update(task_id, completed=len(rows))
172
+ if max_results is not None and len(rows) >= max_results:
173
+ stopped_mid_page = idx < (len(page.data) - 1)
174
+ if stopped_mid_page:
175
+ warnings.append(
176
+ "Results limited by --max-results. Use --all to fetch all results."
177
+ )
178
+ pagination = None
179
+ if (
180
+ page.pagination.next_cursor
181
+ and not stopped_mid_page
182
+ and page.pagination.next_cursor != cursor
183
+ ):
184
+ pagination = {
185
+ "lists": {
186
+ "nextCursor": page.pagination.next_cursor,
187
+ "prevCursor": page.pagination.prev_cursor,
188
+ }
189
+ }
190
+ return CommandOutput(
191
+ data={"lists": rows[:max_results]},
192
+ context=cmd_context,
193
+ pagination=pagination,
194
+ api_called=True,
195
+ )
196
+
197
+ if first_page and not all_pages and max_results is None:
198
+ return CommandOutput(
199
+ data={"lists": rows},
200
+ context=cmd_context,
201
+ pagination=(
202
+ {
203
+ "lists": {
204
+ "nextCursor": page.pagination.next_cursor,
205
+ "prevCursor": page.pagination.prev_cursor,
206
+ }
207
+ }
208
+ if page.pagination.next_cursor
209
+ else None
210
+ ),
211
+ api_called=True,
212
+ )
213
+ first_page = False
214
+
215
+ return CommandOutput(
216
+ data={"lists": rows},
217
+ context=cmd_context,
218
+ pagination=None,
219
+ api_called=True,
220
+ )
221
+
222
+ run_command(ctx, command="list ls", fn=fn)
223
+
224
+
225
+ @category("write")
226
+ @list_group.command(name="create", cls=RichCommand)
227
+ @click.option("--name", required=True, help="List name.")
228
+ @click.option("--type", "list_type", required=True, help="List type (person/company/opportunity).")
229
+ @click.option(
230
+ "--public/--private",
231
+ "is_public",
232
+ default=False,
233
+ help="Whether the list is public (default: private).",
234
+ )
235
+ @click.option("--owner-id", type=int, default=None, help="Owner id.")
236
+ @output_options
237
+ @click.pass_obj
238
+ def list_create(
239
+ ctx: CLIContext,
240
+ *,
241
+ name: str,
242
+ list_type: str,
243
+ is_public: bool,
244
+ owner_id: int | None,
245
+ ) -> None:
246
+ """
247
+ Create a new list.
248
+
249
+ Examples:
250
+
251
+ - `xaffinity list create --name "Prospects" --type company`
252
+ - `xaffinity list create --name "Candidates" --type person --public`
253
+ """
254
+
255
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
256
+ _ = warnings
257
+ lt = _parse_list_type(list_type)
258
+ if lt is None:
259
+ raise CLIError(
260
+ "Missing list type.",
261
+ exit_code=2,
262
+ error_type="usage_error",
263
+ hint="Use --type person|company|opportunity.",
264
+ )
265
+ client = ctx.get_client(warnings=warnings)
266
+ created = client.lists.create(
267
+ ListCreate(
268
+ name=name,
269
+ type=lt,
270
+ is_public=is_public,
271
+ owner_id=owner_id,
272
+ )
273
+ )
274
+
275
+ # Invalidate list-related caches after creation
276
+ cache = ctx.session_cache
277
+ cache.invalidate_prefix("list_resolve_")
278
+
279
+ # Build CommandContext for list create
280
+ ctx_modifiers: dict[str, object] = {"name": name, "type": list_type}
281
+ if is_public:
282
+ ctx_modifiers["isPublic"] = True
283
+ if owner_id is not None:
284
+ ctx_modifiers["ownerId"] = owner_id
285
+
286
+ cmd_context = CommandContext(
287
+ name="list create",
288
+ inputs={},
289
+ modifiers=ctx_modifiers,
290
+ )
291
+
292
+ payload = serialize_model_for_cli(created)
293
+ return CommandOutput(data={"list": payload}, context=cmd_context, api_called=True)
294
+
295
+ run_command(ctx, command="list create", fn=fn)
296
+
297
+
298
+ @category("read")
299
+ @list_group.command(name="get", cls=RichCommand)
300
+ @click.argument("list_selector", type=str)
301
+ @output_options
302
+ @click.pass_obj
303
+ def list_get(ctx: CLIContext, list_selector: str) -> None:
304
+ """
305
+ Get list details, fields, and saved views.
306
+
307
+ LIST_SELECTOR can be a list id or exact list name.
308
+
309
+ Examples:
310
+
311
+ - `xaffinity list get 12345`
312
+ - `xaffinity list get "Pipeline"`
313
+ """
314
+
315
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
316
+ client = ctx.get_client(warnings=warnings)
317
+ cache = ctx.session_cache
318
+ resolved = resolve_list_selector(client=client, selector=list_selector, cache=cache)
319
+ list_id = ListId(int(resolved.list.id))
320
+ fields = list_fields_for_list(client=client, list_id=list_id, cache=cache)
321
+ views = list_all_saved_views(client=client, list_id=list_id, cache=cache)
322
+
323
+ # Extract resolved list name for context
324
+ ctx_resolved: dict[str, str] | None = None
325
+ list_resolved = resolved.resolved.get("list", {})
326
+ if isinstance(list_resolved, dict):
327
+ list_name = list_resolved.get("entityName")
328
+ if list_name:
329
+ ctx_resolved = {"selector": str(list_name)}
330
+
331
+ cmd_context = CommandContext(
332
+ name="list get",
333
+ inputs={"selector": list_selector},
334
+ modifiers={},
335
+ resolved=ctx_resolved,
336
+ )
337
+
338
+ data = {
339
+ "list": serialize_model_for_cli(resolved.list),
340
+ "fields": serialize_models_for_cli(fields),
341
+ "savedViews": serialize_models_for_cli(views),
342
+ }
343
+ return CommandOutput(
344
+ data=data, context=cmd_context, resolved=resolved.resolved, api_called=True
345
+ )
346
+
347
+ run_command(ctx, command="list get", fn=fn)
348
+
349
+
350
+ CsvHeaderMode = Literal["names", "ids"]
351
+
352
+
353
+ ExpandChoice = Literal["people", "companies", "opportunities"]
354
+ CsvMode = Literal["flat", "nested"]
355
+ ExpandOnError = Literal["raise", "skip"]
356
+
357
+
358
+ @category("read")
359
+ @list_group.command(name="export", cls=RichCommand)
360
+ @click.argument("list_selector", type=str)
361
+ @click.option("--saved-view", type=str, default=None, help="Saved view id or name.")
362
+ @click.option("--field", "fields", type=str, multiple=True, help="Field name or id (repeatable).")
363
+ @click.option(
364
+ "--filter",
365
+ "filter_expr",
366
+ type=str,
367
+ default=None,
368
+ help='Client-side filter (field op value). Quote multi-word: Status="Intro Meeting".',
369
+ )
370
+ @click.option(
371
+ "--page-size", "-s", type=int, default=100, show_default=True, help="Page size (max 100)."
372
+ )
373
+ @click.option(
374
+ "--cursor", type=str, default=None, help="Resume from cursor (incompatible with --page-size)."
375
+ )
376
+ @click.option(
377
+ "--max-results", "--limit", "-n", type=int, default=None, help="Stop after N rows total."
378
+ )
379
+ @click.option("--all", "-A", "all_pages", is_flag=True, help="Fetch all rows.")
380
+ @click.option("--csv", "csv_flag", is_flag=True, help="Output as CSV (to stdout).")
381
+ @click.option(
382
+ "--csv-header",
383
+ type=click.Choice(["names", "ids"]),
384
+ default="names",
385
+ show_default=True,
386
+ help="Use field names or IDs for CSV headers.",
387
+ )
388
+ @click.option(
389
+ "--csv-bom",
390
+ is_flag=True,
391
+ help="Add UTF-8 BOM for Excel (use with redirection: --csv --csv-bom > file.csv).",
392
+ )
393
+ @click.option("--dry-run", is_flag=True, help="Validate selectors and print export plan.")
394
+ # Expand options (Phase 1)
395
+ @click.option(
396
+ "--expand",
397
+ "expand",
398
+ multiple=True,
399
+ type=click.Choice(["people", "companies", "opportunities"]),
400
+ help="Expand associated entities (repeatable).",
401
+ )
402
+ @click.option(
403
+ "--expand-max-results",
404
+ type=int,
405
+ default=100,
406
+ show_default=True,
407
+ help="Max associations per entry per type.",
408
+ )
409
+ @click.option(
410
+ "--expand-all",
411
+ is_flag=True,
412
+ help="Fetch all associations per entry (no limit).",
413
+ )
414
+ @click.option(
415
+ "--expand-on-error",
416
+ type=click.Choice(["raise", "skip"]),
417
+ default="raise",
418
+ show_default=True,
419
+ help="How to handle per-entry expansion errors.",
420
+ )
421
+ @click.option(
422
+ "--csv-mode",
423
+ type=click.Choice(["flat", "nested"]),
424
+ default="flat",
425
+ show_default=True,
426
+ help="CSV expansion format: flat (one row per association) or nested (JSON arrays).",
427
+ )
428
+ # Phase 4: --expand-fields and --expand-field-type for expanded entity fields
429
+ @click.option(
430
+ "--expand-fields",
431
+ "expand_fields",
432
+ multiple=True,
433
+ type=str,
434
+ help="Include specific field by name or ID in expanded entities (repeatable).",
435
+ )
436
+ @click.option(
437
+ "--expand-field-type",
438
+ "expand_field_types",
439
+ multiple=True,
440
+ type=click.Choice(["global", "enriched", "relationship-intelligence"], case_sensitive=False),
441
+ help="Include all fields of this type in expanded entities (repeatable).",
442
+ )
443
+ # Phase 5: --expand-filter and --expand-opportunities-list
444
+ @click.option(
445
+ "--expand-filter",
446
+ "expand_filter",
447
+ type=str,
448
+ default=None,
449
+ help="Filter expanded entities (e.g., 'field=value' or 'field!=value').",
450
+ )
451
+ @click.option(
452
+ "--expand-opportunities-list",
453
+ "expand_opps_list",
454
+ type=str,
455
+ default=None,
456
+ help="Scope --expand opportunities to a specific list (id or name).",
457
+ )
458
+ @output_options
459
+ @click.pass_obj
460
+ def list_export(
461
+ ctx: CLIContext,
462
+ list_selector: str,
463
+ *,
464
+ saved_view: str | None,
465
+ fields: tuple[str, ...],
466
+ filter_expr: str | None,
467
+ page_size: int,
468
+ cursor: str | None,
469
+ max_results: int | None,
470
+ all_pages: bool,
471
+ csv_flag: bool,
472
+ csv_header: CsvHeaderMode,
473
+ csv_bom: bool,
474
+ dry_run: bool,
475
+ # Expand options
476
+ expand: tuple[str, ...],
477
+ expand_max_results: int,
478
+ expand_all: bool,
479
+ expand_on_error: str,
480
+ csv_mode: str,
481
+ # Phase 4 options
482
+ expand_fields: tuple[str, ...],
483
+ expand_field_types: tuple[str, ...],
484
+ # Phase 5 options
485
+ expand_filter: str | None,
486
+ expand_opps_list: str | None,
487
+ ) -> None:
488
+ """
489
+ Export list entries to JSON or CSV.
490
+
491
+ LIST_SELECTOR can be a list id or exact list name.
492
+
493
+ Examples:
494
+
495
+ - `xaffinity list export "Pipeline" --all`
496
+ - `xaffinity list export 12345 --csv --all > pipeline.csv`
497
+ - `xaffinity list export "Pipeline" --saved-view "Active Deals" --csv > deals.csv`
498
+ - `xaffinity list export "Pipeline" --field Status --field "Deal Size" --all`
499
+ - `xaffinity list export "Pipeline" --expand people --all --csv > opps-with-people.csv`
500
+ - `xaffinity list export "Pipeline" --expand people --expand companies --all`
501
+ """
502
+
503
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
504
+ # Check mutual exclusivity: --csv and --json
505
+ if csv_flag and ctx.output == "json":
506
+ raise CLIError(
507
+ "--csv and --json are mutually exclusive.",
508
+ exit_code=2,
509
+ error_type="usage_error",
510
+ )
511
+
512
+ # Track start time for summary line
513
+ export_start_time = time.time()
514
+
515
+ # Parse and validate expand options early
516
+ expand_set = {e.strip().lower() for e in expand if e and e.strip()}
517
+ want_expand = len(expand_set) > 0
518
+
519
+ # Validate expand field options require --expand
520
+ if (expand_fields or expand_field_types) and not want_expand:
521
+ raise CLIError(
522
+ "--expand-fields and --expand-field-type require --expand.",
523
+ exit_code=2,
524
+ error_type="usage_error",
525
+ hint="Use --expand people/companies to expand entity associations.",
526
+ )
527
+
528
+ # Parse expand field types to FieldType enum
529
+ parsed_expand_field_types: list[FieldType] | None = None
530
+ if expand_field_types:
531
+ parsed_expand_field_types = []
532
+ for ft in expand_field_types:
533
+ ft_lower = ft.strip().lower()
534
+ if ft_lower == "global":
535
+ parsed_expand_field_types.append(FieldType.GLOBAL)
536
+ elif ft_lower == "enriched":
537
+ parsed_expand_field_types.append(FieldType.ENRICHED)
538
+ elif ft_lower == "relationship-intelligence":
539
+ parsed_expand_field_types.append(FieldType.RELATIONSHIP_INTELLIGENCE)
540
+
541
+ # Note: expand_fields will be validated and resolved after client is obtained
542
+ # to enable name→ID resolution via API lookup
543
+
544
+ # Validate --expand-filter requires --expand (Phase 5)
545
+ if expand_filter and not want_expand:
546
+ raise CLIError(
547
+ "--expand-filter requires --expand.",
548
+ exit_code=2,
549
+ error_type="usage_error",
550
+ hint="Use --expand people/companies/opportunities to expand entity associations.",
551
+ )
552
+
553
+ # Validate --expand-opportunities-list requires --expand opportunities (Phase 5)
554
+ if expand_opps_list and "opportunities" not in expand_set:
555
+ raise CLIError(
556
+ "--expand-opportunities-list requires --expand opportunities.",
557
+ exit_code=2,
558
+ error_type="usage_error",
559
+ hint="Use --expand opportunities --expand-opportunities-list <list>.",
560
+ )
561
+
562
+ # Parse expand filter expression (Phase 5)
563
+ parsed_expand_filters: FilterExpression | None = None
564
+ if expand_filter:
565
+ try:
566
+ parsed_expand_filters = parse_filter(expand_filter)
567
+ except ValueError as e:
568
+ raise CLIError(
569
+ f"Invalid expand filter: {e}",
570
+ exit_code=2,
571
+ error_type="usage_error",
572
+ hint=(
573
+ "Use 'field=value', 'field!=value', 'field=*' (not null), "
574
+ "or 'field!=*' (is null). "
575
+ "Combine with '|' (or) and '&' (and)."
576
+ ),
577
+ ) from e
578
+
579
+ if saved_view and filter_expr:
580
+ raise CLIError(
581
+ "--saved-view and --filter are mutually exclusive.",
582
+ exit_code=2,
583
+ error_type="usage_error",
584
+ )
585
+ if cursor and (saved_view or filter_expr or fields):
586
+ raise CLIError(
587
+ "--cursor cannot be combined with --saved-view/--filter/--field.",
588
+ exit_code=2,
589
+ error_type="usage_error",
590
+ )
591
+ if cursor and page_size != 200:
592
+ raise CLIError(
593
+ "--cursor cannot be combined with --page-size (cursor encodes page size).",
594
+ exit_code=2,
595
+ error_type="usage_error",
596
+ )
597
+
598
+ if want_expand and cursor:
599
+ raise CLIError(
600
+ "--cursor cannot be combined with --expand.",
601
+ exit_code=2,
602
+ error_type="usage_error",
603
+ hint="For large exports, use streaming CSV output or the SDK with checkpointing.",
604
+ )
605
+
606
+ # Warn about client-side filtering (API doesn't support server-side filtering)
607
+ if filter_expr and not saved_view:
608
+ warnings.append(
609
+ "The Affinity API does not support server-side filtering on list entries. "
610
+ "Filtering is being applied client-side after fetching data. "
611
+ "For large lists, consider using saved views instead (--saved-view)."
612
+ )
613
+
614
+ # Warn if both --expand-all and --expand-max-results specified
615
+ if expand_all and expand_max_results != 100:
616
+ warnings.append(
617
+ f"--expand-all specified; ignoring --expand-max-results {expand_max_results}"
618
+ )
619
+
620
+ # Determine effective expansion limit
621
+ effective_expand_limit: int | None = None if expand_all else expand_max_results
622
+
623
+ client = ctx.get_client(warnings=warnings)
624
+ cache = ctx.session_cache
625
+ resolved_list = resolve_list_selector(client=client, selector=list_selector, cache=cache)
626
+ list_id = ListId(int(resolved_list.list.id))
627
+ # Note: AffinityModel uses use_enum_values=True, so list.type is an int
628
+ list_type_value = resolved_list.list.type
629
+ list_type = (
630
+ ListType(list_type_value) if isinstance(list_type_value, int) else list_type_value
631
+ )
632
+ resolved: dict[str, Any] = dict(resolved_list.resolved)
633
+
634
+ # Extract resolved list name for CommandContext (string values only)
635
+ ctx_resolved: dict[str, str] | None = None
636
+ if resolved_list.list.name:
637
+ ctx_resolved = {"listId": resolved_list.list.name}
638
+
639
+ # Build CommandContext upfront (used by all return paths)
640
+ ctx_modifiers: dict[str, object] = {}
641
+ if saved_view:
642
+ ctx_modifiers["savedView"] = saved_view
643
+ if fields:
644
+ ctx_modifiers["fields"] = list(fields)
645
+ if filter_expr:
646
+ ctx_modifiers["filter"] = filter_expr
647
+ if page_size != 200:
648
+ ctx_modifiers["pageSize"] = page_size
649
+ if cursor:
650
+ ctx_modifiers["cursor"] = cursor
651
+ if max_results is not None:
652
+ ctx_modifiers["maxResults"] = max_results
653
+ if all_pages:
654
+ ctx_modifiers["all"] = True
655
+ if csv_flag:
656
+ ctx_modifiers["csv"] = True
657
+ if expand:
658
+ ctx_modifiers["expand"] = list(expand)
659
+ if dry_run:
660
+ ctx_modifiers["dryRun"] = True
661
+
662
+ cmd_context = CommandContext(
663
+ name="list export",
664
+ inputs={"listId": int(list_id)},
665
+ modifiers=ctx_modifiers,
666
+ resolved=ctx_resolved,
667
+ )
668
+
669
+ # Validate expand options for list type
670
+ if want_expand:
671
+ valid_expand_for_type: dict[ListType, set[str]] = {
672
+ ListType.OPPORTUNITY: {"people", "companies"},
673
+ ListType.PERSON: {"companies", "opportunities"},
674
+ ListType.COMPANY: {"people", "opportunities"},
675
+ }
676
+ valid_for_this_type = valid_expand_for_type.get(list_type, set())
677
+ invalid_expands = expand_set - valid_for_this_type
678
+
679
+ if invalid_expands:
680
+ raise CLIError(
681
+ f"--expand {', '.join(sorted(invalid_expands))} is not valid for "
682
+ f"{list_type.name.lower()} lists.",
683
+ exit_code=2,
684
+ error_type="usage_error",
685
+ details={"validExpand": sorted(valid_for_this_type)},
686
+ hint=f"Valid values for {list_type.name.lower()} lists: "
687
+ f"{', '.join(sorted(valid_for_this_type))}.",
688
+ )
689
+
690
+ # Validate and resolve --expand-fields (Phase 4 - Gap 4 fix)
691
+ # Uses API to fetch field metadata and validate field names/IDs
692
+ parsed_expand_fields: list[tuple[str, AnyFieldId]] | None = None
693
+ if expand_fields and want_expand:
694
+ parsed_expand_fields = _validate_and_resolve_expand_fields(
695
+ client=client,
696
+ expand_set=expand_set,
697
+ field_specs=expand_fields,
698
+ )
699
+
700
+ # Resolve --expand-opportunities-list if provided (Phase 5)
701
+ resolved_opps_list_id: ListId | None = None
702
+ if expand_opps_list and "opportunities" in expand_set:
703
+ resolved_opps_list = resolve_list_selector(
704
+ client=client, selector=expand_opps_list, cache=cache
705
+ )
706
+ # Validate it's an opportunity list
707
+ opps_list_type_value = resolved_opps_list.list.type
708
+ opps_list_type = (
709
+ ListType(opps_list_type_value)
710
+ if isinstance(opps_list_type_value, int)
711
+ else opps_list_type_value
712
+ )
713
+ if opps_list_type != ListType.OPPORTUNITY:
714
+ raise CLIError(
715
+ f"--expand-opportunities-list must reference an opportunity list, "
716
+ f"got {opps_list_type.name.lower()} list.",
717
+ exit_code=2,
718
+ error_type="usage_error",
719
+ )
720
+ resolved_opps_list_id = ListId(int(resolved_opps_list.list.id))
721
+ resolved["expandOpportunitiesList"] = {
722
+ "listId": int(resolved_opps_list_id),
723
+ "listName": resolved_opps_list.list.name,
724
+ }
725
+
726
+ # Warn about expensive --expand opportunities without scoping (Phase 5)
727
+ if "opportunities" in expand_set and resolved_opps_list_id is None:
728
+ warnings.append(
729
+ "Expanding opportunities without --expand-opportunities-list will search "
730
+ "all opportunity lists. This may be slow for large workspaces. "
731
+ "Consider using --expand-opportunities-list to scope the search."
732
+ )
733
+
734
+ # Resolve columns/fields.
735
+ field_meta = list_fields_for_list(client=client, list_id=list_id, cache=cache)
736
+ field_by_id: dict[str, FieldMetadata] = {str(f.id): f for f in field_meta}
737
+
738
+ selected_field_ids: list[str] = []
739
+ if saved_view:
740
+ _, view_resolved = resolve_saved_view(
741
+ client=client, list_id=list_id, selector=saved_view, cache=cache
742
+ )
743
+ resolved.update(view_resolved)
744
+ # Note: API's view.field_ids is typically empty; use --field to specify fields
745
+ if fields:
746
+ selected_field_ids = _resolve_field_selectors(
747
+ fields=fields, field_by_id=field_by_id
748
+ )
749
+ else:
750
+ # No explicit fields requested with saved view - return all fields
751
+ selected_field_ids = [str(f.id) for f in field_meta]
752
+ elif fields:
753
+ selected_field_ids = _resolve_field_selectors(fields=fields, field_by_id=field_by_id)
754
+ else:
755
+ selected_field_ids = [str(f.id) for f in field_meta]
756
+
757
+ columns = _columns_meta(selected_field_ids, field_by_id=field_by_id)
758
+
759
+ if dry_run:
760
+ if want_expand:
761
+ # Cleaner output for --expand mode (omit irrelevant fields like cursor)
762
+ data: dict[str, Any] = {
763
+ "listId": int(list_id),
764
+ "listName": resolved_list.list.name,
765
+ "listType": list_type.name.lower(),
766
+ "csv": csv_flag,
767
+ }
768
+ if filter_expr:
769
+ data["filter"] = filter_expr
770
+ else:
771
+ # Standard export - show all query params
772
+ data = {
773
+ "listId": int(list_id),
774
+ "listName": resolved_list.list.name,
775
+ "listType": list_type.name.lower(),
776
+ "savedView": saved_view,
777
+ "fieldIds": selected_field_ids,
778
+ "filter": filter_expr,
779
+ "pageSize": page_size,
780
+ "cursor": cursor,
781
+ "csv": csv_flag,
782
+ }
783
+ if want_expand:
784
+ # Estimate API calls for expansion
785
+ entry_count = resolved_list.list.list_size or 0
786
+ expand_calls = entry_count # 1 call per entry (optimized for dual)
787
+ data["expand"] = sorted(expand_set)
788
+ data["expandMaxResults"] = effective_expand_limit
789
+ data["csvMode"] = csv_mode if csv_flag else None
790
+ # Add dry run warnings
791
+ dry_run_warnings: list[str] = []
792
+ # Handle unreliable listSize from API (often returns 0 for non-empty lists)
793
+ if entry_count == 0:
794
+ data["estimatedEntries"] = "unknown (API metadata unavailable)"
795
+ data["estimatedApiCalls"] = "unknown"
796
+ data["estimatedDuration"] = "unknown"
797
+ dry_run_warnings.append(
798
+ "Cannot estimate - Affinity API reports 0 entries but list may "
799
+ "contain data. The export will fetch all available entries."
800
+ )
801
+ else:
802
+ data["estimatedEntries"] = entry_count
803
+ data["estimatedApiCalls"] = {
804
+ "listEntries": max(1, entry_count // page_size),
805
+ "associations": expand_calls,
806
+ "total": max(1, entry_count // page_size) + expand_calls,
807
+ "note": (
808
+ "Using get_associations() optimization "
809
+ "(both people+companies in 1 call per entry)"
810
+ if "people" in expand_set and "companies" in expand_set
811
+ else "1 call per entry"
812
+ ),
813
+ }
814
+ # Estimate duration based on entry count
815
+ if entry_count <= 50:
816
+ data["estimatedDuration"] = "~30 seconds to 1 minute"
817
+ elif entry_count <= 150:
818
+ data["estimatedDuration"] = f"~2-5 minutes for {entry_count} entries"
819
+ elif entry_count <= 500:
820
+ data["estimatedDuration"] = f"~5-10 minutes for {entry_count} entries"
821
+ else:
822
+ data["estimatedDuration"] = f"~10-20+ minutes for {entry_count} entries"
823
+ if entry_count > 1000:
824
+ dry_run_warnings.append(
825
+ f"Large export ({entry_count} entries) may take 10-15 minutes or more."
826
+ )
827
+ dry_run_warnings.append(
828
+ "Expansion of related entities may be slower for large datasets."
829
+ )
830
+ if effective_expand_limit is not None:
831
+ dry_run_warnings.append(
832
+ f"Using --expand-max-results {effective_expand_limit} (default). "
833
+ "Some entries may have more associations. "
834
+ "Use --expand-all for complete data."
835
+ )
836
+ data["warnings"] = dry_run_warnings
837
+ return CommandOutput(
838
+ data=data,
839
+ context=cmd_context,
840
+ resolved=resolved,
841
+ columns=columns,
842
+ api_called=True,
843
+ )
844
+
845
+ # Build expand field data structures from parsed_expand_fields
846
+ # - expand_field_ids: list of field IDs for API calls
847
+ # - field_id_to_display: dict mapping field ID (str) -> display name (original spec)
848
+ expand_field_ids: list[AnyFieldId] | None = None
849
+ field_id_to_display: dict[str, str] | None = None
850
+ if parsed_expand_fields:
851
+ expand_field_ids = [field_id for _, field_id in parsed_expand_fields]
852
+ field_id_to_display = {
853
+ str(field_id): original for original, field_id in parsed_expand_fields
854
+ }
855
+
856
+ # Prepare CSV writing.
857
+ want_csv = csv_flag
858
+ rows_written = 0
859
+ next_cursor: str | None = None
860
+
861
+ # Helper to format progress description with association counts
862
+ def _format_progress_desc(
863
+ entries: int,
864
+ total: int | None,
865
+ people_count: int,
866
+ companies_count: int,
867
+ opportunities_count: int,
868
+ expand_set: set[str],
869
+ ) -> str:
870
+ if total and total > 0:
871
+ pct = int(100 * entries / total)
872
+ desc = f"Exporting: {entries}/{total} entries ({pct}%)"
873
+ else:
874
+ desc = f"Exporting: {entries} entries"
875
+ if expand_set:
876
+ parts = []
877
+ if "people" in expand_set and people_count > 0:
878
+ parts.append(f"{people_count} people")
879
+ if "companies" in expand_set and companies_count > 0:
880
+ parts.append(f"{companies_count} companies")
881
+ if "opportunities" in expand_set and opportunities_count > 0:
882
+ parts.append(f"{opportunities_count} opportunities")
883
+ if parts:
884
+ desc += ", " + " + ".join(parts)
885
+ return desc
886
+
887
+ def _format_filter_progress(state: dict[str, Any] | None) -> str | None:
888
+ """Format progress description for filtered queries.
889
+
890
+ Returns a description showing scanning context. The progress bar
891
+ separately shows the exported row count, so we don't duplicate that here.
892
+ """
893
+ if state is None:
894
+ return None
895
+ filter_stats = state.get("filterStats")
896
+ if filter_stats is None:
897
+ return None
898
+ scanned = filter_stats.get("scanned", 0)
899
+ matched = filter_stats.get("matched", 0)
900
+ return f"Exporting ({matched} matches from {scanned} scanned)"
901
+
902
+ with ExitStack() as stack:
903
+ progress: Progress | None = None
904
+ task_id: TaskID | None = None
905
+ show_progress = (
906
+ ctx.progress != "never"
907
+ and not ctx.quiet
908
+ and (ctx.progress == "always" or sys.stderr.isatty())
909
+ )
910
+ entry_total = resolved_list.list.list_size if want_expand else None
911
+ if show_progress:
912
+ progress = stack.enter_context(
913
+ Progress(
914
+ TextColumn("{task.description}"),
915
+ BarColumn(),
916
+ TextColumn("{task.completed} rows"),
917
+ TimeElapsedColumn(),
918
+ console=Console(file=sys.stderr),
919
+ transient=True,
920
+ )
921
+ )
922
+ initial_desc = (
923
+ "Exporting"
924
+ if not want_expand
925
+ else _format_progress_desc(0, entry_total, 0, 0, 0, expand_set)
926
+ )
927
+ task_id = progress.add_task(
928
+ initial_desc, total=max_results if max_results else None
929
+ )
930
+
931
+ if want_csv:
932
+ field_headers = [
933
+ (
934
+ (field_by_id[fid].name if fid in field_by_id else fid)
935
+ if csv_header == "names"
936
+ else fid
937
+ )
938
+ for fid in selected_field_ids
939
+ ]
940
+ base_header = [
941
+ "listEntryId",
942
+ "entityType",
943
+ "entityId",
944
+ "entityName",
945
+ *field_headers,
946
+ ]
947
+
948
+ # Add expansion columns if needed
949
+ if want_expand:
950
+ header = _expand_csv_headers(
951
+ base_header,
952
+ expand_set,
953
+ csv_mode,
954
+ expand_fields=parsed_expand_fields,
955
+ header_mode=csv_header,
956
+ )
957
+ else:
958
+ header = base_header
959
+
960
+ csv_iter_state: dict[str, Any] = {}
961
+ entries_with_truncated_assoc: list[int] = []
962
+ skipped_entries: list[int] = []
963
+ entries_with_large_nested_assoc: list[int] = []
964
+ csv_associations_fetched: dict[str, int] = {
965
+ "people": 0,
966
+ "companies": 0,
967
+ "opportunities": 0,
968
+ }
969
+ csv_entries_processed = 0
970
+
971
+ def iter_rows() -> Any:
972
+ nonlocal rows_written, next_cursor, csv_entries_processed
973
+
974
+ # Rate limiting for MCP progress (0.65s interval)
975
+ last_mcp_progress_time: float = float("-inf")
976
+ # MCP mode: emit JSON progress when not TTY but progress still desired
977
+ # IMPORTANT: If Rich progress bar is active (show_progress=True),
978
+ # don't also emit JSON progress - they're mutually exclusive
979
+ mcp_mode = (
980
+ not show_progress
981
+ and not sys.stderr.isatty()
982
+ and ctx.progress != "never"
983
+ and not ctx.quiet
984
+ )
985
+
986
+ # Create callback for real-time filter progress updates
987
+ def on_filter_progress(stats: FilterStats) -> None:
988
+ nonlocal last_mcp_progress_time
989
+
990
+ desc = f"Scanning {stats.scanned}... ({stats.matched} matches)"
991
+
992
+ # Rich Progress bar (TTY)
993
+ if progress is not None and task_id is not None:
994
+ progress.update(task_id, description=desc)
995
+
996
+ # NDJSON for MCP (non-TTY) with rate limiting
997
+ if mcp_mode:
998
+ now = time.monotonic()
999
+ if now - last_mcp_progress_time >= 0.65:
1000
+ last_mcp_progress_time = now
1001
+ obj = {"type": "progress", "progress": None, "message": desc}
1002
+ print(json.dumps(obj), file=sys.stderr, flush=True)
1003
+
1004
+ # Use callback if we have a filter and either Rich progress or MCP mode
1005
+ has_rich_progress = progress is not None and task_id is not None
1006
+ filter_callback = (
1007
+ on_filter_progress
1008
+ if filter_expr and (mcp_mode or has_rich_progress)
1009
+ else None
1010
+ )
1011
+
1012
+ for row, page_next_cursor in _iterate_list_entries(
1013
+ client=client,
1014
+ list_id=list_id,
1015
+ saved_view=saved_view,
1016
+ filter_expr=filter_expr,
1017
+ selected_field_ids=selected_field_ids,
1018
+ page_size=page_size,
1019
+ cursor=cursor,
1020
+ max_results=max_results,
1021
+ all_pages=all_pages,
1022
+ field_by_id=field_by_id,
1023
+ key_mode=csv_header,
1024
+ state=csv_iter_state,
1025
+ cache=cache,
1026
+ filter_progress_callback=filter_callback,
1027
+ ):
1028
+ next_cursor = page_next_cursor
1029
+
1030
+ if not want_expand:
1031
+ # No expansion - yield row as-is
1032
+ rows_written += 1
1033
+ if progress is not None and task_id is not None:
1034
+ filter_desc = _format_filter_progress(csv_iter_state)
1035
+ if filter_desc:
1036
+ progress.update(
1037
+ task_id,
1038
+ completed=rows_written,
1039
+ description=filter_desc,
1040
+ )
1041
+ else:
1042
+ progress.update(task_id, completed=rows_written)
1043
+ yield row
1044
+ continue
1045
+
1046
+ # Handle expansion for opportunity lists
1047
+ entity_id = row.get("entityId")
1048
+ if entity_id is None:
1049
+ # No entity - emit row with empty expansion columns
1050
+ expanded_row = dict(row)
1051
+ expanded_row["expandedType"] = ""
1052
+ expanded_row["expandedId"] = ""
1053
+ expanded_row["expandedName"] = ""
1054
+ if "people" in expand_set:
1055
+ expanded_row["expandedEmail"] = ""
1056
+ if "companies" in expand_set:
1057
+ expanded_row["expandedDomain"] = ""
1058
+ rows_written += 1
1059
+ if progress is not None and task_id is not None:
1060
+ progress.update(task_id, completed=rows_written)
1061
+ yield expanded_row
1062
+ continue
1063
+
1064
+ # Fetch associations based on list type
1065
+ # For flat CSV mode, use prefixed field keys (person.X, company.X)
1066
+ # For nested CSV mode, use unprefixed keys in JSON arrays
1067
+ result = _fetch_associations(
1068
+ client=client,
1069
+ list_type=list_type,
1070
+ entity_id=entity_id,
1071
+ expand_set=expand_set,
1072
+ max_results=effective_expand_limit,
1073
+ on_error=expand_on_error,
1074
+ warnings=warnings,
1075
+ expand_field_types=parsed_expand_field_types,
1076
+ expand_field_ids=expand_field_ids,
1077
+ expand_filters=parsed_expand_filters,
1078
+ expand_opps_list_id=resolved_opps_list_id,
1079
+ field_id_to_display=field_id_to_display,
1080
+ prefix_fields=(csv_mode == "flat"),
1081
+ )
1082
+
1083
+ if result is None:
1084
+ # Error occurred and on_error='skip'
1085
+ skipped_entries.append(entity_id)
1086
+ continue
1087
+
1088
+ people, companies, opportunities = result
1089
+ csv_entries_processed += 1
1090
+ csv_associations_fetched["people"] += len(people)
1091
+ csv_associations_fetched["companies"] += len(companies)
1092
+ csv_associations_fetched["opportunities"] += len(opportunities)
1093
+
1094
+ # Update progress description with association counts
1095
+ if progress is not None and task_id is not None:
1096
+ progress.update(
1097
+ task_id,
1098
+ description=_format_progress_desc(
1099
+ csv_entries_processed,
1100
+ entry_total,
1101
+ csv_associations_fetched["people"],
1102
+ csv_associations_fetched["companies"],
1103
+ csv_associations_fetched["opportunities"],
1104
+ expand_set,
1105
+ ),
1106
+ )
1107
+
1108
+ # Check for truncation
1109
+ if effective_expand_limit is not None and (
1110
+ len(people) >= effective_expand_limit
1111
+ or len(companies) >= effective_expand_limit
1112
+ or len(opportunities) >= effective_expand_limit
1113
+ ):
1114
+ entries_with_truncated_assoc.append(entity_id)
1115
+
1116
+ # Handle CSV mode
1117
+ if csv_mode == "flat":
1118
+ # Flat mode: one row per association
1119
+ emitted_any = False
1120
+
1121
+ # Emit person rows
1122
+ for person in people:
1123
+ expanded_row = dict(row)
1124
+ expanded_row["expandedType"] = "person"
1125
+ expanded_row["expandedId"] = person["id"]
1126
+ expanded_row["expandedName"] = person["name"]
1127
+ if "people" in expand_set:
1128
+ expanded_row["expandedEmail"] = person.get("primaryEmail") or ""
1129
+ if "companies" in expand_set:
1130
+ expanded_row["expandedDomain"] = ""
1131
+ if "opportunities" in expand_set:
1132
+ expanded_row["expandedListId"] = ""
1133
+ # Copy prefixed field values (Phase 4)
1134
+ for key, val in person.items():
1135
+ if key.startswith("person."):
1136
+ expanded_row[key] = val if val is not None else ""
1137
+ rows_written += 1
1138
+ emitted_any = True
1139
+ if progress is not None and task_id is not None:
1140
+ progress.update(task_id, completed=rows_written)
1141
+ yield expanded_row
1142
+
1143
+ # Emit company rows
1144
+ for company in companies:
1145
+ expanded_row = dict(row)
1146
+ expanded_row["expandedType"] = "company"
1147
+ expanded_row["expandedId"] = company["id"]
1148
+ expanded_row["expandedName"] = company["name"]
1149
+ if "people" in expand_set:
1150
+ expanded_row["expandedEmail"] = ""
1151
+ if "companies" in expand_set:
1152
+ expanded_row["expandedDomain"] = company.get("domain") or ""
1153
+ if "opportunities" in expand_set:
1154
+ expanded_row["expandedListId"] = ""
1155
+ # Copy prefixed field values (Phase 4)
1156
+ for key, val in company.items():
1157
+ if key.startswith("company."):
1158
+ expanded_row[key] = val if val is not None else ""
1159
+ rows_written += 1
1160
+ emitted_any = True
1161
+ if progress is not None and task_id is not None:
1162
+ progress.update(task_id, completed=rows_written)
1163
+ yield expanded_row
1164
+
1165
+ # Emit opportunity rows (Phase 5)
1166
+ for opp in opportunities:
1167
+ expanded_row = dict(row)
1168
+ expanded_row["expandedType"] = "opportunity"
1169
+ expanded_row["expandedId"] = opp["id"]
1170
+ expanded_row["expandedName"] = opp.get("name") or ""
1171
+ if "people" in expand_set:
1172
+ expanded_row["expandedEmail"] = ""
1173
+ if "companies" in expand_set:
1174
+ expanded_row["expandedDomain"] = ""
1175
+ if "opportunities" in expand_set:
1176
+ expanded_row["expandedListId"] = opp.get("listId") or ""
1177
+ rows_written += 1
1178
+ emitted_any = True
1179
+ if progress is not None and task_id is not None:
1180
+ progress.update(task_id, completed=rows_written)
1181
+ yield expanded_row
1182
+
1183
+ # If no associations, emit one row with empty expansion columns
1184
+ if not emitted_any:
1185
+ expanded_row = dict(row)
1186
+ expanded_row["expandedType"] = ""
1187
+ expanded_row["expandedId"] = ""
1188
+ expanded_row["expandedName"] = ""
1189
+ if "people" in expand_set:
1190
+ expanded_row["expandedEmail"] = ""
1191
+ if "companies" in expand_set:
1192
+ expanded_row["expandedDomain"] = ""
1193
+ if "opportunities" in expand_set:
1194
+ expanded_row["expandedListId"] = ""
1195
+ rows_written += 1
1196
+ if progress is not None and task_id is not None:
1197
+ progress.update(task_id, completed=rows_written)
1198
+ yield expanded_row
1199
+
1200
+ else:
1201
+ # Nested mode: JSON arrays in columns
1202
+ total_assoc = len(people) + len(companies) + len(opportunities)
1203
+ if total_assoc > 100:
1204
+ entries_with_large_nested_assoc.append(entity_id)
1205
+ expanded_row = dict(row)
1206
+ if "people" in expand_set:
1207
+ people_json = json.dumps(people) if people else "[]"
1208
+ expanded_row["_expand_people"] = people_json
1209
+ if "companies" in expand_set:
1210
+ companies_json = json.dumps(companies) if companies else "[]"
1211
+ expanded_row["_expand_companies"] = companies_json
1212
+ if "opportunities" in expand_set:
1213
+ opps_json = json.dumps(opportunities) if opportunities else "[]"
1214
+ expanded_row["_expand_opportunities"] = opps_json
1215
+ rows_written += 1
1216
+ if progress is not None and task_id is not None:
1217
+ progress.update(task_id, completed=rows_written)
1218
+ yield expanded_row
1219
+
1220
+ # Write CSV to stdout
1221
+ try:
1222
+ write_csv_to_stdout(
1223
+ rows=iter_rows(),
1224
+ fieldnames=header,
1225
+ bom=csv_bom,
1226
+ )
1227
+ except KeyboardInterrupt:
1228
+ # Partial output already sent to stdout
1229
+ Console(file=sys.stderr).print(f"\nInterrupted ({rows_written} rows written)")
1230
+ sys.exit(130)
1231
+
1232
+ # Print warnings to stderr before exit
1233
+ if entries_with_truncated_assoc:
1234
+ count = len(entries_with_truncated_assoc)
1235
+ Console(file=sys.stderr).print(
1236
+ f"Warning: {count} entries had associations truncated at "
1237
+ f"{effective_expand_limit} (use --expand-all for complete data)"
1238
+ )
1239
+
1240
+ if entries_with_large_nested_assoc and csv_mode == "nested":
1241
+ count = len(entries_with_large_nested_assoc)
1242
+ first_id = entries_with_large_nested_assoc[0]
1243
+ Console(file=sys.stderr).print(
1244
+ f"Warning: {count} entries have >100 associations. "
1245
+ f"Large nested arrays may impact memory (e.g., entry {first_id}). "
1246
+ "Consider --csv-mode flat."
1247
+ )
1248
+
1249
+ if skipped_entries:
1250
+ if len(skipped_entries) <= 10:
1251
+ ids_str = ", ".join(str(eid) for eid in skipped_entries)
1252
+ Console(file=sys.stderr).print(
1253
+ f"Warning: {len(skipped_entries)} entries skipped due to errors: "
1254
+ f"{ids_str} (use --expand-on-error raise to fail on errors)"
1255
+ )
1256
+ else:
1257
+ first_ids = ", ".join(str(eid) for eid in skipped_entries[:5])
1258
+ Console(file=sys.stderr).print(
1259
+ f"Warning: {len(skipped_entries)} entries skipped due to errors "
1260
+ f"(first 5: {first_ids}, ...) "
1261
+ "(use --expand-on-error raise to fail on errors)"
1262
+ )
1263
+
1264
+ if csv_iter_state.get("truncatedMidPage") is True:
1265
+ Console(file=sys.stderr).print(
1266
+ "Warning: Results limited by --max-results. Use --all to fetch all results."
1267
+ )
1268
+
1269
+ # Print export summary to stderr
1270
+ if show_progress:
1271
+ elapsed = time.time() - export_start_time
1272
+ filter_stats = csv_iter_state.get("filterStats")
1273
+ if filter_stats:
1274
+ scanned = filter_stats.get("scanned", 0)
1275
+ Console(file=sys.stderr).print(
1276
+ f"Exported {rows_written:,} rows "
1277
+ f"(filtered from {scanned:,} scanned) "
1278
+ f"in {format_duration(elapsed)}"
1279
+ )
1280
+ else:
1281
+ Console(file=sys.stderr).print(
1282
+ f"Exported {rows_written:,} rows in {format_duration(elapsed)}"
1283
+ )
1284
+
1285
+ sys.exit(0)
1286
+
1287
+ # JSON/table rows in-memory (small exports).
1288
+ # Emit memory warning for large JSON exports with expansion
1289
+ if want_expand:
1290
+ entry_count = resolved_list.list.list_size or 0
1291
+ # Rough estimate: each entry with associations is ~1KB
1292
+ estimated_rows = entry_count
1293
+ if estimated_rows > 1000:
1294
+ warnings.append(
1295
+ f"JSON output will buffer ~{estimated_rows} rows in memory. "
1296
+ "For large exports, consider --csv for streaming output."
1297
+ )
1298
+
1299
+ rows: list[dict[str, Any]] = []
1300
+ table_iter_state: dict[str, Any] = {}
1301
+ json_entries_with_truncated_assoc: list[int] = []
1302
+ json_skipped_entries: list[int] = []
1303
+ associations_fetched: dict[str, int] = {
1304
+ "people": 0,
1305
+ "companies": 0,
1306
+ "opportunities": 0,
1307
+ }
1308
+
1309
+ # Rate limiting for MCP progress (0.65s interval)
1310
+ json_last_mcp_progress_time: float = float("-inf")
1311
+ # MCP mode: emit JSON progress when not TTY but progress still desired
1312
+ # IMPORTANT: If Rich progress bar is active (show_progress=True),
1313
+ # don't also emit JSON progress - they're mutually exclusive
1314
+ json_mcp_mode = (
1315
+ not show_progress
1316
+ and not sys.stderr.isatty()
1317
+ and ctx.progress != "never"
1318
+ and not ctx.quiet
1319
+ )
1320
+
1321
+ # Create callback for real-time filter progress updates (JSON output)
1322
+ def on_json_filter_progress(stats: FilterStats) -> None:
1323
+ nonlocal json_last_mcp_progress_time
1324
+
1325
+ desc = f"Scanning {stats.scanned}... ({stats.matched} matches)"
1326
+
1327
+ # Rich Progress bar (TTY)
1328
+ if progress is not None and task_id is not None:
1329
+ progress.update(task_id, description=desc)
1330
+
1331
+ # NDJSON for MCP (non-TTY) with rate limiting
1332
+ if json_mcp_mode:
1333
+ now = time.monotonic()
1334
+ if now - json_last_mcp_progress_time >= 0.65:
1335
+ json_last_mcp_progress_time = now
1336
+ print(
1337
+ json.dumps({"type": "progress", "progress": None, "message": desc}),
1338
+ file=sys.stderr,
1339
+ flush=True,
1340
+ )
1341
+
1342
+ # Use callback if we have a filter and either Rich progress or MCP mode
1343
+ json_filter_callback = (
1344
+ on_json_filter_progress
1345
+ if filter_expr and (json_mcp_mode or (progress is not None and task_id is not None))
1346
+ else None
1347
+ )
1348
+
1349
+ for row, page_next_cursor in _iterate_list_entries(
1350
+ client=client,
1351
+ list_id=list_id,
1352
+ saved_view=saved_view,
1353
+ filter_expr=filter_expr,
1354
+ selected_field_ids=selected_field_ids,
1355
+ page_size=page_size,
1356
+ cursor=cursor,
1357
+ max_results=max_results,
1358
+ all_pages=all_pages,
1359
+ field_by_id=field_by_id,
1360
+ key_mode="names",
1361
+ state=table_iter_state,
1362
+ cache=cache,
1363
+ filter_progress_callback=json_filter_callback,
1364
+ ):
1365
+ next_cursor = page_next_cursor
1366
+
1367
+ if not want_expand:
1368
+ rows.append(row)
1369
+ if progress is not None and task_id is not None:
1370
+ filter_desc = _format_filter_progress(table_iter_state)
1371
+ if filter_desc:
1372
+ progress.update(task_id, completed=len(rows), description=filter_desc)
1373
+ else:
1374
+ progress.update(task_id, completed=len(rows))
1375
+ continue
1376
+
1377
+ # Handle expansion for JSON output (nested arrays)
1378
+ entity_id = row.get("entityId")
1379
+ if entity_id is None:
1380
+ # No entity - add row with empty arrays
1381
+ expanded_row = dict(row)
1382
+ if "people" in expand_set:
1383
+ expanded_row["people"] = []
1384
+ if "companies" in expand_set:
1385
+ expanded_row["companies"] = []
1386
+ if "opportunities" in expand_set:
1387
+ expanded_row["opportunities"] = []
1388
+ expanded_row["associations"] = "—"
1389
+ rows.append(expanded_row)
1390
+ if progress is not None and task_id is not None:
1391
+ progress.update(task_id, completed=len(rows))
1392
+ continue
1393
+
1394
+ # Fetch associations based on list type
1395
+ # For JSON output, use unprefixed field keys in nested arrays
1396
+ result = _fetch_associations(
1397
+ client=client,
1398
+ list_type=list_type,
1399
+ entity_id=entity_id,
1400
+ expand_set=expand_set,
1401
+ max_results=effective_expand_limit,
1402
+ on_error=expand_on_error,
1403
+ warnings=warnings,
1404
+ expand_field_types=parsed_expand_field_types,
1405
+ expand_field_ids=expand_field_ids,
1406
+ expand_filters=parsed_expand_filters,
1407
+ expand_opps_list_id=resolved_opps_list_id,
1408
+ field_id_to_display=field_id_to_display,
1409
+ prefix_fields=False,
1410
+ )
1411
+
1412
+ if result is None:
1413
+ # Error occurred and on_error='skip' - skip this entry entirely
1414
+ json_skipped_entries.append(entity_id)
1415
+ continue
1416
+
1417
+ people, companies, opportunities = result
1418
+
1419
+ # Check for truncation
1420
+ if effective_expand_limit is not None and (
1421
+ len(people) >= effective_expand_limit
1422
+ or len(companies) >= effective_expand_limit
1423
+ or len(opportunities) >= effective_expand_limit
1424
+ ):
1425
+ json_entries_with_truncated_assoc.append(entity_id)
1426
+
1427
+ # Track counts
1428
+ associations_fetched["people"] += len(people)
1429
+ associations_fetched["companies"] += len(companies)
1430
+ associations_fetched["opportunities"] += len(opportunities)
1431
+
1432
+ # Update progress description with association counts
1433
+ if progress is not None and task_id is not None:
1434
+ progress.update(
1435
+ task_id,
1436
+ description=_format_progress_desc(
1437
+ len(rows) + 1, # +1 for current entry being processed
1438
+ entry_total,
1439
+ associations_fetched["people"],
1440
+ associations_fetched["companies"],
1441
+ associations_fetched["opportunities"],
1442
+ expand_set,
1443
+ ),
1444
+ )
1445
+
1446
+ # Add nested arrays to row
1447
+ expanded_row = dict(row)
1448
+ if "people" in expand_set:
1449
+ expanded_row["people"] = people
1450
+ if "companies" in expand_set:
1451
+ expanded_row["companies"] = companies
1452
+ if "opportunities" in expand_set:
1453
+ expanded_row["opportunities"] = opportunities
1454
+
1455
+ # Add associations summary for table mode
1456
+ summary_parts = []
1457
+ if "people" in expand_set:
1458
+ pc = len(people)
1459
+ if pc > 0:
1460
+ label = "+ people" if pc >= 100 else " person" if pc == 1 else " people"
1461
+ summary_parts.append(f"{pc}{label}")
1462
+ if "companies" in expand_set:
1463
+ cc = len(companies)
1464
+ if cc > 0:
1465
+ if cc >= 100:
1466
+ label = "+ companies"
1467
+ elif cc == 1:
1468
+ label = " company"
1469
+ else:
1470
+ label = " companies"
1471
+ summary_parts.append(f"{cc}{label}")
1472
+ if "opportunities" in expand_set:
1473
+ oc = len(opportunities)
1474
+ if oc > 0:
1475
+ if oc >= 100:
1476
+ label = "+ opps"
1477
+ elif oc == 1:
1478
+ label = " opp"
1479
+ else:
1480
+ label = " opps"
1481
+ summary_parts.append(f"{oc}{label}")
1482
+ assoc_summary = ", ".join(summary_parts) if summary_parts else "—"
1483
+ expanded_row["associations"] = assoc_summary
1484
+
1485
+ rows.append(expanded_row)
1486
+ if progress is not None and task_id is not None:
1487
+ progress.update(task_id, completed=len(rows))
1488
+
1489
+ if table_iter_state.get("truncatedMidPage") is True:
1490
+ warnings.append("Results limited by --max-results. Use --all to fetch all results.")
1491
+
1492
+ # Add truncation warning for JSON output
1493
+ if json_entries_with_truncated_assoc:
1494
+ count = len(json_entries_with_truncated_assoc)
1495
+ warnings.append(
1496
+ f"{count} entries had associations truncated at {effective_expand_limit} "
1497
+ "(use --expand-all for complete data)"
1498
+ )
1499
+
1500
+ # Add skipped entries summary for JSON output with IDs
1501
+ if json_skipped_entries:
1502
+ if len(json_skipped_entries) <= 10:
1503
+ ids_str = ", ".join(str(eid) for eid in json_skipped_entries)
1504
+ warnings.append(
1505
+ f"{len(json_skipped_entries)} entries skipped due to errors: {ids_str} "
1506
+ "(use --expand-on-error raise to fail on errors)"
1507
+ )
1508
+ else:
1509
+ first_ids = ", ".join(str(eid) for eid in json_skipped_entries[:5])
1510
+ warnings.append(
1511
+ f"{len(json_skipped_entries)} entries skipped due to errors "
1512
+ f"(first 5: {first_ids}, ...) "
1513
+ "(use --expand-on-error raise to fail on errors)"
1514
+ )
1515
+
1516
+ # Build output data
1517
+ output_data: dict[str, Any] = {"rows": rows}
1518
+ if want_expand:
1519
+ output_data["entriesProcessed"] = len(rows) + len(json_skipped_entries)
1520
+ output_data["associationsFetched"] = {
1521
+ k: v for k, v in associations_fetched.items() if k in expand_set
1522
+ }
1523
+
1524
+ # Print export summary to stderr
1525
+ if show_progress:
1526
+ elapsed = time.time() - export_start_time
1527
+ filter_stats = table_iter_state.get("filterStats")
1528
+ if filter_stats:
1529
+ scanned = filter_stats.get("scanned", 0)
1530
+ Console(file=sys.stderr).print(
1531
+ f"Exported {len(rows):,} rows "
1532
+ f"(filtered from {scanned:,} scanned) "
1533
+ f"in {format_duration(elapsed)}"
1534
+ )
1535
+ else:
1536
+ Console(file=sys.stderr).print(
1537
+ f"Exported {len(rows):,} rows in {format_duration(elapsed)}"
1538
+ )
1539
+
1540
+ return CommandOutput(
1541
+ data=output_data,
1542
+ context=cmd_context,
1543
+ pagination={"rows": {"nextCursor": next_cursor, "prevCursor": None}}
1544
+ if next_cursor
1545
+ else None,
1546
+ resolved=resolved,
1547
+ columns=columns,
1548
+ api_called=True,
1549
+ )
1550
+ raise AssertionError("unreachable")
1551
+
1552
+ run_command(ctx, command="list export", fn=fn)
1553
+
1554
+
1555
+ def _resolve_field_selectors(
1556
+ *,
1557
+ fields: tuple[str, ...],
1558
+ field_by_id: dict[str, FieldMetadata],
1559
+ ) -> list[str]:
1560
+ resolved: list[str] = []
1561
+ # Build name index for list-scoped fields
1562
+ by_name: dict[str, list[str]] = {}
1563
+ for fid, meta in field_by_id.items():
1564
+ by_name.setdefault(meta.name.lower(), []).append(fid)
1565
+
1566
+ for raw in fields:
1567
+ raw = raw.strip()
1568
+ if not raw:
1569
+ continue
1570
+ if raw.isdigit():
1571
+ resolved.append(raw)
1572
+ continue
1573
+ # treat as ID if exact key exists
1574
+ if raw in field_by_id:
1575
+ resolved.append(raw)
1576
+ continue
1577
+ matches = by_name.get(raw.lower(), [])
1578
+ if not matches:
1579
+ raise CLIError(f'Unknown field: "{raw}"', exit_code=2, error_type="usage_error")
1580
+ if len(matches) > 1:
1581
+ raise CLIError(
1582
+ f'Ambiguous field name: "{raw}"',
1583
+ exit_code=2,
1584
+ error_type="ambiguous_resolution",
1585
+ details={"name": raw, "fieldIds": matches},
1586
+ )
1587
+ resolved.append(matches[0])
1588
+ return resolved
1589
+
1590
+
1591
+ def _columns_meta(
1592
+ field_ids: list[str],
1593
+ *,
1594
+ field_by_id: dict[str, FieldMetadata],
1595
+ ) -> list[dict[str, Any]]:
1596
+ cols: list[dict[str, Any]] = []
1597
+ for fid in field_ids:
1598
+ meta = field_by_id.get(fid)
1599
+ cols.append(
1600
+ {
1601
+ "fieldId": fid,
1602
+ "fieldName": meta.name if meta else fid,
1603
+ "fieldType": meta.type if meta else None,
1604
+ "valueType": meta.value_type if meta else None,
1605
+ }
1606
+ )
1607
+ return cols
1608
+
1609
+
1610
+ def _iterate_list_entries(
1611
+ *,
1612
+ client: Any,
1613
+ list_id: ListId,
1614
+ saved_view: str | None,
1615
+ filter_expr: str | None,
1616
+ selected_field_ids: list[str],
1617
+ page_size: int,
1618
+ cursor: str | None,
1619
+ max_results: int | None,
1620
+ all_pages: bool,
1621
+ field_by_id: dict[str, FieldMetadata],
1622
+ key_mode: Literal["names", "ids"],
1623
+ state: dict[str, Any] | None = None,
1624
+ cache: Any = None,
1625
+ filter_progress_callback: Callable[[FilterStats], None] | None = None,
1626
+ ) -> Any:
1627
+ """
1628
+ Yield `(row_dict, next_cursor)` where `next_cursor` resumes at the next page (not per-row).
1629
+
1630
+ Args:
1631
+ filter_progress_callback: Optional callback invoked after each physical page
1632
+ fetch during filtered queries. Useful for real-time progress updates
1633
+ while scanning many rows with few matches.
1634
+ """
1635
+ # Suppress SDK's client-side filtering warning (CLI handles this warning itself)
1636
+ stdlib_warnings.filterwarnings(
1637
+ "ignore",
1638
+ message=".*does not support server-side filtering.*",
1639
+ category=UserWarning,
1640
+ )
1641
+
1642
+ fetched = 0
1643
+
1644
+ entries = client.lists.entries(list_id)
1645
+
1646
+ if saved_view:
1647
+ next_page_cursor: str | None = None
1648
+ if cursor:
1649
+ page = entries.list(cursor=cursor)
1650
+ else:
1651
+ view, _ = resolve_saved_view(
1652
+ client=client, list_id=list_id, selector=saved_view, cache=cache
1653
+ )
1654
+ page = entries.from_saved_view(view.id, field_ids=selected_field_ids, limit=page_size)
1655
+
1656
+ next_page_cursor = page.pagination.next_cursor
1657
+ for idx, entry in enumerate(page.data):
1658
+ fetched += 1
1659
+ yield (
1660
+ _entry_to_row(entry, selected_field_ids, field_by_id, key_mode=key_mode),
1661
+ None
1662
+ if max_results is not None and fetched >= max_results and idx < (len(page.data) - 1)
1663
+ else next_page_cursor,
1664
+ )
1665
+ if max_results is not None and fetched >= max_results:
1666
+ if idx < (len(page.data) - 1) and state is not None:
1667
+ state["truncatedMidPage"] = True
1668
+ return
1669
+
1670
+ if not all_pages and max_results is None:
1671
+ return
1672
+
1673
+ while next_page_cursor:
1674
+ page = entries.list(cursor=next_page_cursor)
1675
+ next_page_cursor = page.pagination.next_cursor
1676
+ for idx, entry in enumerate(page.data):
1677
+ fetched += 1
1678
+ yield (
1679
+ _entry_to_row(entry, selected_field_ids, field_by_id, key_mode=key_mode),
1680
+ None
1681
+ if max_results is not None
1682
+ and fetched >= max_results
1683
+ and idx < (len(page.data) - 1)
1684
+ else next_page_cursor,
1685
+ )
1686
+ if max_results is not None and fetched >= max_results:
1687
+ if idx < (len(page.data) - 1) and state is not None:
1688
+ state["truncatedMidPage"] = True
1689
+ return
1690
+ return
1691
+
1692
+ pages = (
1693
+ entries.pages(cursor=cursor)
1694
+ if cursor is not None
1695
+ else entries.pages(
1696
+ field_ids=selected_field_ids,
1697
+ filter=filter_expr,
1698
+ limit=page_size,
1699
+ progress_callback=filter_progress_callback,
1700
+ )
1701
+ )
1702
+
1703
+ first_page = True
1704
+ for page in pages:
1705
+ next_page_cursor = page.pagination.next_cursor
1706
+ # Track filter stats for progress reporting
1707
+ if state is not None and page.filter_stats is not None:
1708
+ state["filterStats"] = {
1709
+ "scanned": page.filter_stats.scanned,
1710
+ "matched": page.filter_stats.matched,
1711
+ }
1712
+ for idx, entry in enumerate(page.data):
1713
+ fetched += 1
1714
+ yield (
1715
+ _entry_to_row(entry, selected_field_ids, field_by_id, key_mode=key_mode),
1716
+ None
1717
+ if max_results is not None and fetched >= max_results and idx < (len(page.data) - 1)
1718
+ else next_page_cursor,
1719
+ )
1720
+ if max_results is not None and fetched >= max_results:
1721
+ if idx < (len(page.data) - 1) and state is not None:
1722
+ state["truncatedMidPage"] = True
1723
+ return
1724
+
1725
+ if first_page and not all_pages and max_results is None:
1726
+ return
1727
+ first_page = False
1728
+
1729
+
1730
+ def _extract_field_values(obj: Any) -> dict[str, Any]:
1731
+ """Extract field values from an object with fields_raw (V2 API) or fields.data (fallback).
1732
+
1733
+ The V2 API returns fields as an array: [{"id": "field-X", "value": {"data": ...}}, ...]
1734
+ This helper parses that format into a dict mapping field_id -> value.
1735
+
1736
+ Args:
1737
+ obj: An object with `fields_raw` (list) and/or `fields.data` (dict) attributes
1738
+
1739
+ Returns:
1740
+ Dict mapping field_id (str) -> field value
1741
+ """
1742
+ field_values: dict[str, Any] = {}
1743
+ fields_raw = getattr(obj, "fields_raw", None)
1744
+ if isinstance(fields_raw, list):
1745
+ for field_obj in fields_raw:
1746
+ if isinstance(field_obj, dict) and "id" in field_obj:
1747
+ fid_key = str(field_obj["id"])
1748
+ value_wrapper = field_obj.get("value")
1749
+ if isinstance(value_wrapper, dict):
1750
+ field_values[fid_key] = value_wrapper.get("data")
1751
+ else:
1752
+ field_values[fid_key] = value_wrapper
1753
+ else:
1754
+ # Fallback to fields.data for older API formats
1755
+ fields_attr = getattr(obj, "fields", None)
1756
+ if fields_attr is not None and hasattr(fields_attr, "data") and fields_attr.data:
1757
+ field_values = dict(fields_attr.data)
1758
+ return field_values
1759
+
1760
+
1761
+ def _entry_to_row(
1762
+ entry: ListEntryWithEntity,
1763
+ field_ids: list[str],
1764
+ field_by_id: dict[str, FieldMetadata],
1765
+ *,
1766
+ key_mode: Literal["names", "ids"],
1767
+ ) -> dict[str, Any]:
1768
+ entity_id: int | None = None
1769
+ entity_name: str | None = None
1770
+ if entry.entity is not None:
1771
+ entity_id = int(entry.entity.id)
1772
+ entity_name = getattr(entry.entity, "name", None)
1773
+ if entity_name is None and hasattr(entry.entity, "full_name"):
1774
+ entity_name = cast(Any, entry.entity).full_name
1775
+ row: dict[str, Any] = {
1776
+ "listEntryId": int(entry.id),
1777
+ "entityType": entry.type,
1778
+ "entityId": entity_id,
1779
+ "entityName": entity_name,
1780
+ }
1781
+
1782
+ # Extract field values from entity (V2 API stores fields on entity, not entry)
1783
+ field_values = _extract_field_values(entry.entity) if entry.entity else {}
1784
+
1785
+ for fid in field_ids:
1786
+ key = fid if key_mode == "ids" else field_by_id[fid].name if fid in field_by_id else fid
1787
+ row[key] = field_values.get(str(fid))
1788
+ return row
1789
+
1790
+
1791
+ def _person_to_expand_dict(
1792
+ person: Any,
1793
+ field_types: list[FieldType] | None = None,
1794
+ field_ids: list[AnyFieldId] | None = None,
1795
+ field_id_to_display: dict[str, str] | None = None,
1796
+ prefix_fields: bool = True,
1797
+ ) -> dict[str, Any]:
1798
+ """Convert a Person object to an expand dict, including field values if present.
1799
+
1800
+ Args:
1801
+ field_id_to_display: Mapping from field ID to display name for --expand-fields
1802
+ prefix_fields: If True, prefix field keys with "person." (for flat CSV mode).
1803
+ If False, use unprefixed display names (for nested JSON mode).
1804
+ """
1805
+ result: dict[str, Any] = {
1806
+ "id": int(person.id),
1807
+ "name": person.full_name,
1808
+ "primaryEmail": person.primary_email or (person.emails[0] if person.emails else None),
1809
+ }
1810
+ # Include field values if requested and present
1811
+ if (field_types or field_ids) and hasattr(person, "fields") and person.fields.requested:
1812
+ field_values = _extract_field_values(person)
1813
+ for field_id, value in field_values.items():
1814
+ # Get display name from mapping, fallback to field_id
1815
+ display_name = (
1816
+ field_id_to_display.get(str(field_id), str(field_id))
1817
+ if field_id_to_display
1818
+ else str(field_id)
1819
+ )
1820
+ if prefix_fields:
1821
+ result[f"person.{display_name}"] = value
1822
+ else:
1823
+ result[display_name] = value
1824
+ return result
1825
+
1826
+
1827
+ def _company_to_expand_dict(
1828
+ company: Any,
1829
+ field_types: list[FieldType] | None = None,
1830
+ field_ids: list[AnyFieldId] | None = None,
1831
+ field_id_to_display: dict[str, str] | None = None,
1832
+ prefix_fields: bool = True,
1833
+ ) -> dict[str, Any]:
1834
+ """Convert a Company object to an expand dict, including field values if present.
1835
+
1836
+ Args:
1837
+ field_id_to_display: Mapping from field ID to display name for --expand-fields
1838
+ prefix_fields: If True, prefix field keys with "company." (for flat CSV mode).
1839
+ If False, use unprefixed display names (for nested JSON mode).
1840
+ """
1841
+ result: dict[str, Any] = {
1842
+ "id": int(company.id),
1843
+ "name": company.name,
1844
+ "domain": company.domain,
1845
+ }
1846
+ # Include field values if requested and present
1847
+ if (field_types or field_ids) and hasattr(company, "fields") and company.fields.requested:
1848
+ field_values = _extract_field_values(company)
1849
+ for field_id, value in field_values.items():
1850
+ # Get display name from mapping, fallback to field_id
1851
+ display_name = (
1852
+ field_id_to_display.get(str(field_id), str(field_id))
1853
+ if field_id_to_display
1854
+ else str(field_id)
1855
+ )
1856
+ if prefix_fields:
1857
+ result[f"company.{display_name}"] = value
1858
+ else:
1859
+ result[display_name] = value
1860
+ return result
1861
+
1862
+
1863
+ def _fetch_opportunity_associations(
1864
+ client: Any,
1865
+ opportunity_id: OpportunityId,
1866
+ *,
1867
+ expand_set: set[str],
1868
+ max_results: int | None,
1869
+ on_error: str,
1870
+ warnings: list[str],
1871
+ expand_field_types: list[FieldType] | None = None,
1872
+ expand_field_ids: list[AnyFieldId] | None = None,
1873
+ field_id_to_display: dict[str, str] | None = None,
1874
+ prefix_fields: bool = True,
1875
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]] | None:
1876
+ """
1877
+ Fetch people and/or companies associated with an opportunity.
1878
+
1879
+ Returns:
1880
+ Tuple of (people_list, companies_list) where each list contains dicts with
1881
+ id, name, primaryEmail/domain, plus field values if expand_field_types/ids specified.
1882
+ Returns None if error occurred and on_error='skip'.
1883
+ """
1884
+ want_people = "people" in expand_set
1885
+ want_companies = "companies" in expand_set
1886
+ want_fields = bool(expand_field_types or expand_field_ids)
1887
+
1888
+ people: list[dict[str, Any]] = []
1889
+ companies: list[dict[str, Any]] = []
1890
+
1891
+ try:
1892
+ # Use dual optimization if both are requested
1893
+ if want_people and want_companies:
1894
+ assoc = client.opportunities.get_associations(opportunity_id)
1895
+ person_ids = [int(pid) for pid in assoc.person_ids]
1896
+ company_ids = [int(cid) for cid in assoc.company_ids]
1897
+ else:
1898
+ person_ids = []
1899
+ company_ids = []
1900
+ if want_people:
1901
+ person_ids = [
1902
+ int(pid)
1903
+ for pid in client.opportunities.get_associated_person_ids(opportunity_id)
1904
+ ]
1905
+ if want_companies:
1906
+ company_ids = [
1907
+ int(cid)
1908
+ for cid in client.opportunities.get_associated_company_ids(opportunity_id)
1909
+ ]
1910
+
1911
+ # Apply max_results limit to IDs before fetching
1912
+ if max_results is not None and max_results >= 0:
1913
+ person_ids = person_ids[:max_results]
1914
+ company_ids = company_ids[:max_results]
1915
+
1916
+ # Fetch people details
1917
+ if want_people and person_ids:
1918
+ if want_fields:
1919
+ # Use V2 API with field types to get field values
1920
+ for pid in person_ids:
1921
+ person = client.persons.get(
1922
+ PersonId(pid),
1923
+ field_types=expand_field_types,
1924
+ field_ids=expand_field_ids,
1925
+ )
1926
+ people.append(
1927
+ _person_to_expand_dict(
1928
+ person,
1929
+ expand_field_types,
1930
+ expand_field_ids,
1931
+ field_id_to_display,
1932
+ prefix_fields,
1933
+ )
1934
+ )
1935
+ else:
1936
+ # Use existing V1 method for core fields only
1937
+ fetched_people = client.opportunities.get_associated_people(
1938
+ opportunity_id, max_results=max_results
1939
+ )
1940
+ people = [_person_to_expand_dict(p) for p in fetched_people]
1941
+
1942
+ # Fetch company details
1943
+ if want_companies and company_ids:
1944
+ if want_fields:
1945
+ # Use V2 API with field types to get field values
1946
+ for cid in company_ids:
1947
+ company = client.companies.get(
1948
+ CompanyId(cid),
1949
+ field_types=expand_field_types,
1950
+ field_ids=expand_field_ids,
1951
+ )
1952
+ companies.append(
1953
+ _company_to_expand_dict(
1954
+ company,
1955
+ expand_field_types,
1956
+ expand_field_ids,
1957
+ field_id_to_display,
1958
+ prefix_fields,
1959
+ )
1960
+ )
1961
+ else:
1962
+ # Use existing V1 method for core fields only
1963
+ fetched_companies = client.opportunities.get_associated_companies(
1964
+ opportunity_id, max_results=max_results
1965
+ )
1966
+ companies = [_company_to_expand_dict(c) for c in fetched_companies]
1967
+
1968
+ except Exception as e:
1969
+ if on_error == "skip":
1970
+ warnings.append(f"Skipped expansion for opportunity {int(opportunity_id)}: {e}")
1971
+ return None
1972
+ raise
1973
+
1974
+ return people, companies
1975
+
1976
+
1977
+ def _fetch_company_associations(
1978
+ client: Any,
1979
+ company_id: CompanyId,
1980
+ *,
1981
+ expand_set: set[str],
1982
+ max_results: int | None,
1983
+ on_error: str,
1984
+ warnings: list[str],
1985
+ expand_field_types: list[FieldType] | None = None,
1986
+ expand_field_ids: list[AnyFieldId] | None = None,
1987
+ field_id_to_display: dict[str, str] | None = None,
1988
+ prefix_fields: bool = True,
1989
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]] | None:
1990
+ """
1991
+ Fetch people associated with a company.
1992
+
1993
+ For company lists, only 'people' expansion is valid.
1994
+
1995
+ Returns:
1996
+ Tuple of (people_list, []) where people_list contains dicts with
1997
+ id, name, primaryEmail, plus field values if expand_field_types/ids specified.
1998
+ Returns None if error occurred and on_error='skip'.
1999
+ """
2000
+ want_people = "people" in expand_set
2001
+ want_fields = bool(expand_field_types or expand_field_ids)
2002
+
2003
+ people: list[dict[str, Any]] = []
2004
+
2005
+ try:
2006
+ if want_people:
2007
+ # Get person IDs first
2008
+ person_ids = client.companies.get_associated_person_ids(
2009
+ company_id, max_results=max_results
2010
+ )
2011
+
2012
+ if want_fields:
2013
+ # Use V2 API with field types to get field values
2014
+ for pid in person_ids:
2015
+ person = client.persons.get(
2016
+ pid,
2017
+ field_types=expand_field_types,
2018
+ field_ids=expand_field_ids,
2019
+ )
2020
+ people.append(
2021
+ _person_to_expand_dict(
2022
+ person,
2023
+ expand_field_types,
2024
+ expand_field_ids,
2025
+ field_id_to_display,
2026
+ prefix_fields,
2027
+ )
2028
+ )
2029
+ else:
2030
+ # Use existing V1 method for core fields only
2031
+ fetched_people = client.companies.get_associated_people(
2032
+ company_id, max_results=max_results
2033
+ )
2034
+ people = [_person_to_expand_dict(p) for p in fetched_people]
2035
+
2036
+ except Exception as e:
2037
+ if on_error == "skip":
2038
+ warnings.append(f"Skipped expansion for company {int(company_id)}: {e}")
2039
+ return None
2040
+ raise
2041
+
2042
+ # Return (people, []) - companies is always empty for company list expansion
2043
+ return people, []
2044
+
2045
+
2046
+ def _fetch_person_associations(
2047
+ client: Any,
2048
+ person_id: PersonId,
2049
+ *,
2050
+ expand_set: set[str],
2051
+ max_results: int | None,
2052
+ on_error: str,
2053
+ warnings: list[str],
2054
+ expand_field_types: list[FieldType] | None = None,
2055
+ expand_field_ids: list[AnyFieldId] | None = None,
2056
+ field_id_to_display: dict[str, str] | None = None,
2057
+ prefix_fields: bool = True,
2058
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]] | None:
2059
+ """
2060
+ Fetch companies associated with a person.
2061
+
2062
+ For person lists, only 'companies' expansion is valid.
2063
+ Note: V2 API doesn't return company_ids, so we use V1 fallback to get IDs.
2064
+
2065
+ Returns:
2066
+ Tuple of ([], companies_list) where companies_list contains dicts with
2067
+ id, name, domain, plus field values if expand_field_types/ids specified.
2068
+ Returns None if error occurred and on_error='skip'.
2069
+ """
2070
+ want_companies = "companies" in expand_set
2071
+ want_fields = bool(expand_field_types or expand_field_ids)
2072
+
2073
+ companies: list[dict[str, Any]] = []
2074
+
2075
+ try:
2076
+ if want_companies:
2077
+ # V1 fallback: fetch person via V1 API to get organization_ids
2078
+ person_data = client._http.get(f"/persons/{person_id}", v1=True)
2079
+ company_ids_raw = (
2080
+ person_data.get("organization_ids") or person_data.get("organizationIds") or []
2081
+ )
2082
+ company_ids = [int(cid) for cid in company_ids_raw if cid is not None]
2083
+
2084
+ # Apply max_results limit
2085
+ if max_results is not None and max_results >= 0:
2086
+ company_ids = company_ids[:max_results]
2087
+
2088
+ if want_fields:
2089
+ # Use V2 API with field types to get field values
2090
+ for cid in company_ids:
2091
+ company = client.companies.get(
2092
+ CompanyId(cid),
2093
+ field_types=expand_field_types,
2094
+ field_ids=expand_field_ids,
2095
+ )
2096
+ companies.append(
2097
+ _company_to_expand_dict(
2098
+ company,
2099
+ expand_field_types,
2100
+ expand_field_ids,
2101
+ field_id_to_display,
2102
+ prefix_fields,
2103
+ )
2104
+ )
2105
+ else:
2106
+ # Fetch company details via V1 API (core fields only)
2107
+ for cid in company_ids:
2108
+ company_data = client._http.get(f"/organizations/{cid}", v1=True)
2109
+ companies.append(
2110
+ {
2111
+ "id": cid,
2112
+ "name": company_data.get("name"),
2113
+ "domain": company_data.get("domain"),
2114
+ }
2115
+ )
2116
+
2117
+ except Exception as e:
2118
+ if on_error == "skip":
2119
+ warnings.append(f"Skipped expansion for person {int(person_id)}: {e}")
2120
+ return None
2121
+ raise
2122
+
2123
+ # Return ([], companies) - people is always empty for person list expansion
2124
+ return [], companies
2125
+
2126
+
2127
+ def _fetch_associations(
2128
+ client: Any,
2129
+ list_type: ListType,
2130
+ entity_id: int,
2131
+ *,
2132
+ expand_set: set[str],
2133
+ max_results: int | None,
2134
+ on_error: str,
2135
+ warnings: list[str],
2136
+ expand_field_types: list[FieldType] | None = None,
2137
+ expand_field_ids: list[AnyFieldId] | None = None,
2138
+ expand_filters: FilterExpression | None = None,
2139
+ expand_opps_list_id: ListId | None = None,
2140
+ field_id_to_display: dict[str, str] | None = None,
2141
+ prefix_fields: bool = True,
2142
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]], list[dict[str, Any]]] | None:
2143
+ """
2144
+ Dispatch to the correct association fetcher based on list type.
2145
+
2146
+ Routes to:
2147
+ - _fetch_opportunity_associations for opportunity lists
2148
+ - _fetch_company_associations for company/organization lists
2149
+ - _fetch_person_associations for person lists
2150
+
2151
+ Args:
2152
+ field_id_to_display: Mapping from field ID to display name for --expand-fields
2153
+ prefix_fields: If True, prefix field keys with entity type (for flat CSV mode).
2154
+
2155
+ Returns:
2156
+ Tuple of (people_list, companies_list, opportunities_list).
2157
+ Returns None if error occurred and on_error='skip'.
2158
+ """
2159
+ people: list[dict[str, Any]] = []
2160
+ companies: list[dict[str, Any]] = []
2161
+ opportunities: list[dict[str, Any]] = []
2162
+
2163
+ try:
2164
+ if list_type == ListType.OPPORTUNITY:
2165
+ result = _fetch_opportunity_associations(
2166
+ client=client,
2167
+ opportunity_id=OpportunityId(entity_id),
2168
+ expand_set=expand_set,
2169
+ max_results=max_results,
2170
+ on_error=on_error,
2171
+ warnings=warnings,
2172
+ expand_field_types=expand_field_types,
2173
+ expand_field_ids=expand_field_ids,
2174
+ field_id_to_display=field_id_to_display,
2175
+ prefix_fields=prefix_fields,
2176
+ )
2177
+ if result is None:
2178
+ return None
2179
+ people, companies = result
2180
+
2181
+ elif list_type == ListType.COMPANY:
2182
+ result = _fetch_company_associations(
2183
+ client=client,
2184
+ company_id=CompanyId(entity_id),
2185
+ expand_set=expand_set,
2186
+ max_results=max_results,
2187
+ on_error=on_error,
2188
+ warnings=warnings,
2189
+ expand_field_types=expand_field_types,
2190
+ expand_field_ids=expand_field_ids,
2191
+ field_id_to_display=field_id_to_display,
2192
+ prefix_fields=prefix_fields,
2193
+ )
2194
+ if result is None:
2195
+ return None
2196
+ people, _ = result
2197
+
2198
+ # Fetch opportunities if requested (Phase 5)
2199
+ if "opportunities" in expand_set:
2200
+ opportunities = _fetch_entity_opportunities(
2201
+ client=client,
2202
+ entity_type="company",
2203
+ entity_id=CompanyId(entity_id),
2204
+ opps_list_id=expand_opps_list_id,
2205
+ max_results=max_results,
2206
+ on_error=on_error,
2207
+ warnings=warnings,
2208
+ )
2209
+
2210
+ elif list_type == ListType.PERSON:
2211
+ result = _fetch_person_associations(
2212
+ client=client,
2213
+ person_id=PersonId(entity_id),
2214
+ expand_set=expand_set,
2215
+ max_results=max_results,
2216
+ on_error=on_error,
2217
+ warnings=warnings,
2218
+ expand_field_types=expand_field_types,
2219
+ expand_field_ids=expand_field_ids,
2220
+ field_id_to_display=field_id_to_display,
2221
+ prefix_fields=prefix_fields,
2222
+ )
2223
+ if result is None:
2224
+ return None
2225
+ _, companies = result
2226
+
2227
+ # Fetch opportunities if requested (Phase 5)
2228
+ if "opportunities" in expand_set:
2229
+ opportunities = _fetch_entity_opportunities(
2230
+ client=client,
2231
+ entity_type="person",
2232
+ entity_id=PersonId(entity_id),
2233
+ opps_list_id=expand_opps_list_id,
2234
+ max_results=max_results,
2235
+ on_error=on_error,
2236
+ warnings=warnings,
2237
+ )
2238
+
2239
+ else:
2240
+ raise ValueError(f"Unsupported list type for expansion: {list_type}")
2241
+
2242
+ # Apply expand filters (Phase 5)
2243
+ if expand_filters:
2244
+ people = [p for p in people if expand_filters.matches(p)]
2245
+ companies = [c for c in companies if expand_filters.matches(c)]
2246
+ opportunities = [o for o in opportunities if expand_filters.matches(o)]
2247
+
2248
+ return people, companies, opportunities
2249
+
2250
+ except Exception as e:
2251
+ if on_error == "skip":
2252
+ warnings.append(f"Skipped expansion for entity {entity_id}: {e}")
2253
+ return None
2254
+ raise
2255
+
2256
+
2257
+ def _fetch_entity_opportunities(
2258
+ client: Any,
2259
+ entity_type: str,
2260
+ entity_id: PersonId | CompanyId,
2261
+ *,
2262
+ opps_list_id: ListId | None,
2263
+ max_results: int | None,
2264
+ on_error: str,
2265
+ warnings: list[str],
2266
+ ) -> list[dict[str, Any]]:
2267
+ """
2268
+ Fetch opportunities associated with a person or company.
2269
+
2270
+ If opps_list_id is provided, only search that specific opportunity list.
2271
+ Otherwise, search all accessible opportunity lists.
2272
+
2273
+ Returns list of opportunity dicts with id, name, listId.
2274
+ """
2275
+ opportunities: list[dict[str, Any]] = []
2276
+
2277
+ try:
2278
+ # Get opportunity lists to search
2279
+ if opps_list_id is not None:
2280
+ opp_list_ids = [opps_list_id]
2281
+ else:
2282
+ # Fetch all opportunity lists the user has access to
2283
+ opp_list_ids = []
2284
+ for page in client.lists.pages():
2285
+ for lst in page.data:
2286
+ if lst.type == ListType.OPPORTUNITY:
2287
+ opp_list_ids.append(ListId(int(lst.id)))
2288
+
2289
+ # Search each opportunity list for entries associated with this entity
2290
+ for list_id in opp_list_ids:
2291
+ entries = client.lists.entries(list_id)
2292
+
2293
+ # Fetch entries from this list and check associations
2294
+ # Note: This is expensive as we need to check each entry's associations
2295
+ for page in entries.pages(limit=100):
2296
+ for entry in page.data:
2297
+ if entry.entity is None:
2298
+ continue
2299
+
2300
+ opp_id = OpportunityId(int(entry.entity.id))
2301
+
2302
+ # Check if this opportunity is associated with our entity
2303
+ try:
2304
+ assoc = client.opportunities.get_associations(opp_id)
2305
+ is_associated = False
2306
+
2307
+ if entity_type == "person":
2308
+ person_ids = [int(pid) for pid in assoc.person_ids]
2309
+ is_associated = int(entity_id) in person_ids
2310
+ elif entity_type == "company":
2311
+ company_ids = [int(cid) for cid in assoc.company_ids]
2312
+ is_associated = int(entity_id) in company_ids
2313
+
2314
+ if is_associated:
2315
+ opportunities.append(
2316
+ {
2317
+ "id": int(opp_id),
2318
+ "name": getattr(entry.entity, "name", None),
2319
+ "listId": int(list_id),
2320
+ }
2321
+ )
2322
+
2323
+ # Apply max results limit
2324
+ if max_results is not None and len(opportunities) >= max_results:
2325
+ return opportunities
2326
+
2327
+ except Exception:
2328
+ # Skip opportunities we can't access
2329
+ continue
2330
+
2331
+ # Stop pagination if we have enough results
2332
+ if max_results is not None and len(opportunities) >= max_results:
2333
+ break
2334
+
2335
+ except Exception as e:
2336
+ if on_error == "skip":
2337
+ warnings.append(f"Error fetching opportunities for {entity_type} {int(entity_id)}: {e}")
2338
+ else:
2339
+ raise
2340
+
2341
+ return opportunities
2342
+
2343
+
2344
+ def _validate_and_resolve_expand_fields(
2345
+ client: Any,
2346
+ expand_set: set[str],
2347
+ field_specs: tuple[str, ...],
2348
+ ) -> list[tuple[str, AnyFieldId]]:
2349
+ """
2350
+ Validate --expand-fields against available global/enriched fields.
2351
+
2352
+ Fetches field metadata for expanded entity types (person/company) and validates
2353
+ that each field spec exists. Field specs can be:
2354
+ - Field names (resolved to IDs via metadata lookup)
2355
+ - Field IDs (validated against metadata)
2356
+
2357
+ Args:
2358
+ client: Affinity client instance
2359
+ expand_set: Set of expand types ("people", "companies")
2360
+ field_specs: Tuple of field spec strings from --expand-fields
2361
+
2362
+ Returns:
2363
+ List of (original_spec, resolved_field_id) tuples
2364
+
2365
+ Raises:
2366
+ CLIError: If a field spec doesn't match any available field
2367
+ """
2368
+ # Build combined field lookup from person and company metadata
2369
+ # Maps lowercase name -> (display_name, field_id) for name resolution
2370
+ # Also stores field_id -> (display_name, field_id) for ID validation
2371
+ name_to_field: dict[str, tuple[str, AnyFieldId]] = {}
2372
+ id_to_field: dict[str, tuple[str, AnyFieldId]] = {}
2373
+ all_field_names: set[str] = set()
2374
+
2375
+ if "people" in expand_set:
2376
+ person_fields = client.persons.get_fields()
2377
+ for f in person_fields:
2378
+ name_lower = f.name.lower()
2379
+ name_to_field[name_lower] = (f.name, f.id)
2380
+ id_to_field[str(f.id)] = (f.name, f.id)
2381
+ all_field_names.add(f.name)
2382
+
2383
+ if "companies" in expand_set:
2384
+ company_fields = client.companies.get_fields()
2385
+ for f in company_fields:
2386
+ name_lower = f.name.lower()
2387
+ # Only add if not already present (person fields take precedence)
2388
+ if name_lower not in name_to_field:
2389
+ name_to_field[name_lower] = (f.name, f.id)
2390
+ if str(f.id) not in id_to_field:
2391
+ id_to_field[str(f.id)] = (f.name, f.id)
2392
+ all_field_names.add(f.name)
2393
+
2394
+ # Resolve each field spec
2395
+ parsed: list[tuple[str, AnyFieldId]] = []
2396
+ for spec in field_specs:
2397
+ spec = spec.strip()
2398
+ if not spec:
2399
+ continue
2400
+
2401
+ # Try to match by field ID first (exact match)
2402
+ if spec in id_to_field:
2403
+ display_name, field_id = id_to_field[spec]
2404
+ parsed.append((display_name, field_id))
2405
+ continue
2406
+
2407
+ # Try to parse as FieldId format (field-123)
2408
+ try:
2409
+ field_id = FieldId(spec)
2410
+ if str(field_id) in id_to_field:
2411
+ display_name, _ = id_to_field[str(field_id)]
2412
+ parsed.append((display_name, field_id))
2413
+ continue
2414
+ # Valid FieldId format but not found - try name lookup next
2415
+ except ValueError:
2416
+ pass
2417
+
2418
+ # Try to match by name (case-insensitive)
2419
+ spec_lower = spec.lower()
2420
+ if spec_lower in name_to_field:
2421
+ display_name, field_id = name_to_field[spec_lower]
2422
+ parsed.append((display_name, field_id))
2423
+ continue
2424
+
2425
+ # Not found - raise error with helpful message
2426
+ # Show a sample of available field names (up to 10)
2427
+ sample_names = sorted(all_field_names)[:10]
2428
+ hint_suffix = ", ..." if len(all_field_names) > 10 else ""
2429
+ raise CLIError(
2430
+ f"Unknown expand field: '{spec}'",
2431
+ exit_code=2,
2432
+ error_type="usage_error",
2433
+ details={"availableFields": sorted(all_field_names)[:20]},
2434
+ hint=f"Available fields include: {', '.join(sample_names)}{hint_suffix}",
2435
+ )
2436
+
2437
+ return parsed
2438
+
2439
+
2440
+ def _expand_csv_headers(
2441
+ base_headers: list[str],
2442
+ expand_set: set[str],
2443
+ csv_mode: str = "flat",
2444
+ expand_fields: list[tuple[str, AnyFieldId]] | None = None,
2445
+ header_mode: CsvHeaderMode = "names",
2446
+ ) -> list[str]:
2447
+ """
2448
+ Add expansion columns to CSV headers.
2449
+
2450
+ Flat mode: expandedType, expandedId, expandedName, expandedEmail, expandedDomain,
2451
+ plus prefixed field columns (person.{name/id}, company.{name/id}) for --expand-fields
2452
+ Nested mode: _expand_people, _expand_companies (JSON arrays)
2453
+
2454
+ Args:
2455
+ expand_fields: List of (original_spec, field_id) tuples
2456
+ header_mode: "names" uses original spec, "ids" uses field ID
2457
+ """
2458
+ headers = list(base_headers)
2459
+ if csv_mode == "nested":
2460
+ # Nested mode: add JSON array columns
2461
+ if "people" in expand_set:
2462
+ headers.append("_expand_people")
2463
+ if "companies" in expand_set:
2464
+ headers.append("_expand_companies")
2465
+ if "opportunities" in expand_set:
2466
+ headers.append("_expand_opportunities")
2467
+ else:
2468
+ # Flat mode: add row-per-association columns
2469
+ headers.append("expandedType")
2470
+ headers.append("expandedId")
2471
+ headers.append("expandedName")
2472
+ if "people" in expand_set:
2473
+ headers.append("expandedEmail")
2474
+ if "companies" in expand_set:
2475
+ headers.append("expandedDomain")
2476
+ if "opportunities" in expand_set:
2477
+ headers.append("expandedListId")
2478
+ # Add prefixed columns for --expand-fields (Phase 4)
2479
+ if expand_fields:
2480
+ for original_spec, field_id in expand_fields:
2481
+ # Use original spec name for "names" mode, field ID for "ids" mode
2482
+ display_name = original_spec if header_mode == "names" else str(field_id)
2483
+ if "people" in expand_set:
2484
+ headers.append(f"person.{display_name}")
2485
+ if "companies" in expand_set:
2486
+ headers.append(f"company.{display_name}")
2487
+ return headers
2488
+
2489
+
2490
+ @list_group.group(name="entry", cls=RichGroup)
2491
+ def list_entry_group() -> None:
2492
+ """List entry commands."""
2493
+
2494
+
2495
+ @category("read")
2496
+ @list_entry_group.command(name="get", cls=RichCommand)
2497
+ @click.argument("list_selector", type=str)
2498
+ @click.argument("entry_id", type=int)
2499
+ @output_options
2500
+ @click.pass_obj
2501
+ def list_entry_get(
2502
+ ctx: CLIContext,
2503
+ list_selector: str,
2504
+ entry_id: int,
2505
+ ) -> None:
2506
+ """
2507
+ Get a single list entry by ID.
2508
+
2509
+ Displays the list entry with its field values and field names.
2510
+
2511
+ Examples:
2512
+
2513
+ - `xaffinity list entry get "Portfolio" 12345`
2514
+ - `xaffinity list entry get 67890 12345`
2515
+ """
2516
+
2517
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
2518
+ client = ctx.get_client(warnings=warnings)
2519
+ cache = ctx.session_cache
2520
+ resolved_list = resolve_list_selector(client=client, selector=list_selector, cache=cache)
2521
+ entries = client.lists.entries(resolved_list.list.id)
2522
+ entry = entries.get(ListEntryId(entry_id))
2523
+ payload = serialize_model_for_cli(entry)
2524
+
2525
+ # Include raw fields if available
2526
+ fields_raw = getattr(entry, "fields_raw", None)
2527
+ if isinstance(fields_raw, list):
2528
+ payload["fields"] = fields_raw
2529
+
2530
+ resolved = dict(resolved_list.resolved)
2531
+
2532
+ # Fetch field metadata if fields are present
2533
+ entry_fields = payload.get("fields") if isinstance(payload, dict) else None
2534
+ if isinstance(entry_fields, list) and entry_fields:
2535
+ try:
2536
+ from ..field_utils import build_field_id_to_name_map
2537
+
2538
+ field_metadata = client.lists.get_fields(resolved_list.list.id)
2539
+ resolved["fieldMetadata"] = build_field_id_to_name_map(field_metadata)
2540
+ except Exception:
2541
+ # Field metadata is optional - continue without names if fetch fails
2542
+ pass
2543
+
2544
+ # Extract resolved list name for context
2545
+ ctx_resolved: dict[str, str] | None = None
2546
+ list_resolved = resolved_list.resolved.get("list", {})
2547
+ if isinstance(list_resolved, dict):
2548
+ list_name = list_resolved.get("entityName")
2549
+ if list_name:
2550
+ ctx_resolved = {"listId": str(list_name)}
2551
+
2552
+ cmd_context = CommandContext(
2553
+ name="list entry get",
2554
+ inputs={"listId": int(resolved_list.list.id), "entryId": entry_id},
2555
+ modifiers={},
2556
+ resolved=ctx_resolved,
2557
+ )
2558
+
2559
+ return CommandOutput(
2560
+ data={"listEntry": payload},
2561
+ context=cmd_context,
2562
+ resolved=resolved,
2563
+ api_called=True,
2564
+ )
2565
+
2566
+ run_command(ctx, command="list entry get", fn=fn)
2567
+
2568
+
2569
+ def _validate_entry_target(
2570
+ person_id: int | None,
2571
+ company_id: int | None,
2572
+ ) -> None:
2573
+ count = sum(1 for value in (person_id, company_id) if value is not None)
2574
+ if count == 1:
2575
+ return
2576
+ raise CLIError(
2577
+ "Provide exactly one of --person-id or --company-id.",
2578
+ error_type="usage_error",
2579
+ exit_code=2,
2580
+ )
2581
+
2582
+
2583
+ @category("write")
2584
+ @list_entry_group.command(name="add", cls=RichCommand)
2585
+ @click.argument("list_selector", type=str)
2586
+ @click.option("--person-id", type=int, default=None, help="Person id to add.")
2587
+ @click.option("--company-id", type=int, default=None, help="Company id to add.")
2588
+ @click.option("--creator-id", type=int, default=None, help="Creator id override.")
2589
+ @output_options
2590
+ @click.pass_obj
2591
+ def list_entry_add(
2592
+ ctx: CLIContext,
2593
+ list_selector: str,
2594
+ *,
2595
+ person_id: int | None,
2596
+ company_id: int | None,
2597
+ creator_id: int | None,
2598
+ ) -> None:
2599
+ """Add a person or company to a list.
2600
+
2601
+ Note: Opportunities cannot be added to lists this way. Use 'opportunity create --list-id'
2602
+ instead, which creates both the opportunity and its list entry atomically.
2603
+ """
2604
+
2605
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
2606
+ _validate_entry_target(person_id, company_id)
2607
+ client = ctx.get_client(warnings=warnings)
2608
+ cache = ctx.session_cache
2609
+ resolved_list = resolve_list_selector(client=client, selector=list_selector, cache=cache)
2610
+ entries = client.lists.entries(resolved_list.list.id)
2611
+
2612
+ if person_id is not None:
2613
+ created = entries.add_person(PersonId(person_id), creator_id=creator_id)
2614
+ else:
2615
+ assert company_id is not None
2616
+ created = entries.add_company(CompanyId(company_id), creator_id=creator_id)
2617
+
2618
+ # Build CommandContext for list entry add
2619
+ ctx_modifiers: dict[str, object] = {}
2620
+ if person_id is not None:
2621
+ ctx_modifiers["personId"] = person_id
2622
+ if company_id is not None:
2623
+ ctx_modifiers["companyId"] = company_id
2624
+ if creator_id is not None:
2625
+ ctx_modifiers["creatorId"] = creator_id
2626
+
2627
+ # Extract resolved list name for context
2628
+ ctx_resolved: dict[str, str] | None = None
2629
+ list_resolved = resolved_list.resolved.get("list", {})
2630
+ if isinstance(list_resolved, dict):
2631
+ list_name = list_resolved.get("entityName")
2632
+ if list_name:
2633
+ ctx_resolved = {"listId": str(list_name)}
2634
+
2635
+ cmd_context = CommandContext(
2636
+ name="list entry add",
2637
+ inputs={"listId": int(resolved_list.list.id)},
2638
+ modifiers=ctx_modifiers,
2639
+ resolved=ctx_resolved,
2640
+ )
2641
+
2642
+ payload = serialize_model_for_cli(created)
2643
+ return CommandOutput(
2644
+ data={"listEntry": payload},
2645
+ context=cmd_context,
2646
+ resolved=resolved_list.resolved,
2647
+ api_called=True,
2648
+ )
2649
+
2650
+ run_command(ctx, command="list entry add", fn=fn)
2651
+
2652
+
2653
+ @category("write")
2654
+ @destructive
2655
+ @list_entry_group.command(name="delete", cls=RichCommand)
2656
+ @click.argument("list_selector", type=str)
2657
+ @click.argument("entry_id", type=int)
2658
+ @click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompt.")
2659
+ @output_options
2660
+ @click.pass_obj
2661
+ def list_entry_delete(ctx: CLIContext, list_selector: str, entry_id: int, yes: bool) -> None:
2662
+ """Delete a list entry."""
2663
+ if not yes:
2664
+ click.confirm(f"Delete entry {entry_id} from list '{list_selector}'?", abort=True)
2665
+
2666
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
2667
+ client = ctx.get_client(warnings=warnings)
2668
+ cache = ctx.session_cache
2669
+ resolved_list = resolve_list_selector(client=client, selector=list_selector, cache=cache)
2670
+ entries = client.lists.entries(resolved_list.list.id)
2671
+ success = entries.delete(ListEntryId(entry_id))
2672
+
2673
+ # Extract resolved list name for context
2674
+ ctx_resolved: dict[str, str] | None = None
2675
+ list_resolved = resolved_list.resolved.get("list", {})
2676
+ if isinstance(list_resolved, dict):
2677
+ list_name = list_resolved.get("entityName")
2678
+ if list_name:
2679
+ ctx_resolved = {"listId": str(list_name)}
2680
+
2681
+ cmd_context = CommandContext(
2682
+ name="list entry delete",
2683
+ inputs={"listId": int(resolved_list.list.id), "entryId": entry_id},
2684
+ modifiers={},
2685
+ resolved=ctx_resolved,
2686
+ )
2687
+
2688
+ return CommandOutput(
2689
+ data={"success": success},
2690
+ context=cmd_context,
2691
+ resolved=resolved_list.resolved,
2692
+ api_called=True,
2693
+ )
2694
+
2695
+ run_command(ctx, command="list entry delete", fn=fn)
2696
+
2697
+
2698
+ @category("write")
2699
+ @list_entry_group.command(name="field", cls=RichCommand)
2700
+ @click.argument("list_selector", type=str)
2701
+ @click.argument("entry_id", type=int)
2702
+ @click.option(
2703
+ "--set",
2704
+ "set_values",
2705
+ nargs=2,
2706
+ multiple=True,
2707
+ metavar="FIELD VALUE",
2708
+ help="Set field value (repeatable). Replaces existing value(s).",
2709
+ )
2710
+ @click.option(
2711
+ "--append",
2712
+ "append_values",
2713
+ nargs=2,
2714
+ multiple=True,
2715
+ metavar="FIELD VALUE",
2716
+ help="Append to multi-value field (repeatable). Adds without replacing.",
2717
+ )
2718
+ @click.option(
2719
+ "--unset",
2720
+ "unset_fields",
2721
+ multiple=True,
2722
+ metavar="FIELD",
2723
+ help="Unset all values for field (repeatable).",
2724
+ )
2725
+ @click.option(
2726
+ "--unset-value",
2727
+ "unset_values",
2728
+ nargs=2,
2729
+ multiple=True,
2730
+ metavar="FIELD VALUE",
2731
+ help="Unset specific value from multi-value field (repeatable).",
2732
+ )
2733
+ @click.option(
2734
+ "--set-json",
2735
+ "json_input",
2736
+ type=str,
2737
+ help="JSON object of field:value pairs to set.",
2738
+ )
2739
+ @click.option(
2740
+ "--get",
2741
+ "get_fields",
2742
+ multiple=True,
2743
+ metavar="FIELD",
2744
+ help="Get specific field values (repeatable).",
2745
+ )
2746
+ @output_options
2747
+ @click.pass_obj
2748
+ def list_entry_field(
2749
+ ctx: CLIContext,
2750
+ list_selector: str,
2751
+ entry_id: int,
2752
+ *,
2753
+ set_values: tuple[tuple[str, str], ...],
2754
+ append_values: tuple[tuple[str, str], ...],
2755
+ unset_fields: tuple[str, ...],
2756
+ unset_values: tuple[tuple[str, str], ...],
2757
+ json_input: str | None,
2758
+ get_fields: tuple[str, ...],
2759
+ ) -> None:
2760
+ """
2761
+ Manage list entry field values.
2762
+
2763
+ Unified command for getting, setting, appending, and unsetting field values.
2764
+ Field names are resolved case-insensitively. Field IDs (field-123) can also be used.
2765
+
2766
+ Operation order: --set/--set-json first, then --append, then --unset/--unset-value.
2767
+
2768
+ Examples:
2769
+
2770
+ - `xaffinity entry field "Portfolio" 123 --set Status "Active"`
2771
+ - `xaffinity entry field "Portfolio" 123 --set Status "Active" --set Priority "High"`
2772
+ - `xaffinity entry field "Portfolio" 123 --append Tags "Priority"`
2773
+ - `xaffinity entry field "Portfolio" 123 --unset Status`
2774
+ - `xaffinity entry field "Portfolio" 123 --unset-value Tags "OldTag"`
2775
+ - `xaffinity entry field "Portfolio" 123 --set-json '{"Status": "Active"}'`
2776
+ - `xaffinity entry field "Portfolio" 123 --get Status --get Priority`
2777
+ """
2778
+
2779
+ def fn(ctx: CLIContext, warnings: list[str]) -> CommandOutput:
2780
+ from ..field_utils import (
2781
+ FieldResolver,
2782
+ find_field_values_for_field,
2783
+ format_value_for_comparison,
2784
+ )
2785
+
2786
+ # Validate: at least one operation must be specified
2787
+ has_set = bool(set_values) or bool(json_input)
2788
+ has_append = bool(append_values)
2789
+ has_unset = bool(unset_fields)
2790
+ has_unset_value = bool(unset_values)
2791
+ has_get = bool(get_fields)
2792
+
2793
+ if not any([has_set, has_append, has_unset, has_unset_value, has_get]):
2794
+ raise CLIError(
2795
+ "No operation specified. Use --set, --append, --unset, --unset-value, "
2796
+ "--set-json, or --get.",
2797
+ exit_code=2,
2798
+ error_type="usage_error",
2799
+ )
2800
+
2801
+ # Validate: --get is exclusive (can't mix read with write)
2802
+ if has_get and (has_set or has_append or has_unset or has_unset_value):
2803
+ raise CLIError(
2804
+ "--get cannot be combined with write operations "
2805
+ "(--set, --append, --unset, --unset-value, --set-json).",
2806
+ exit_code=2,
2807
+ error_type="usage_error",
2808
+ )
2809
+
2810
+ # Collect all fields from operations for conflict detection
2811
+ set_option_fields: set[str] = {fv[0] for fv in set_values}
2812
+ set_json_fields: set[str] = set()
2813
+ if json_input:
2814
+ try:
2815
+ json_data = json.loads(json_input)
2816
+ if isinstance(json_data, dict):
2817
+ set_json_fields = set(json_data.keys())
2818
+ except json.JSONDecodeError:
2819
+ pass # Handled later
2820
+
2821
+ # Check for duplicate fields between --set and --set-json
2822
+ duplicate_fields = set_option_fields & set_json_fields
2823
+ if duplicate_fields:
2824
+ raise CLIError(
2825
+ f"Field(s) in both --set and --set-json: {duplicate_fields}",
2826
+ exit_code=2,
2827
+ error_type="usage_error",
2828
+ )
2829
+
2830
+ all_set_fields = set_option_fields | set_json_fields
2831
+ all_append_fields: set[str] = {av[0] for av in append_values}
2832
+ all_unset_fields: set[str] = set(unset_fields)
2833
+ all_unset_value_fields: set[str] = {uv[0] for uv in unset_values}
2834
+
2835
+ # Check for conflicting operations on same field
2836
+ if all_set_fields & all_append_fields:
2837
+ raise CLIError(
2838
+ f"Field(s) in both --set and --append: {all_set_fields & all_append_fields}",
2839
+ exit_code=2,
2840
+ error_type="usage_error",
2841
+ )
2842
+ if all_set_fields & all_unset_fields:
2843
+ raise CLIError(
2844
+ f"Field(s) in both --set and --unset: {all_set_fields & all_unset_fields}",
2845
+ exit_code=2,
2846
+ error_type="usage_error",
2847
+ )
2848
+ if all_set_fields & all_unset_value_fields:
2849
+ raise CLIError(
2850
+ f"Field(s) in both --set and --unset-value: "
2851
+ f"{all_set_fields & all_unset_value_fields}",
2852
+ exit_code=2,
2853
+ error_type="usage_error",
2854
+ )
2855
+ if all_append_fields & all_unset_fields:
2856
+ raise CLIError(
2857
+ f"Field(s) in both --append and --unset: {all_append_fields & all_unset_fields}",
2858
+ exit_code=2,
2859
+ error_type="usage_error",
2860
+ )
2861
+ if all_unset_fields & all_unset_value_fields:
2862
+ raise CLIError(
2863
+ f"Field(s) in both --unset and --unset-value: "
2864
+ f"{all_unset_fields & all_unset_value_fields}",
2865
+ exit_code=2,
2866
+ error_type="usage_error",
2867
+ )
2868
+ # Note: --append + --unset-value on same field is ALLOWED (tag swap pattern)
2869
+
2870
+ client = ctx.get_client(warnings=warnings)
2871
+ cache = ctx.session_cache
2872
+ resolved_list = resolve_list_selector(client=client, selector=list_selector, cache=cache)
2873
+ resolved = dict(resolved_list.resolved)
2874
+
2875
+ # Fetch field metadata
2876
+ field_metadata = list_fields_for_list(
2877
+ client=client, list_id=resolved_list.list.id, cache=cache
2878
+ )
2879
+ resolver = FieldResolver(field_metadata)
2880
+
2881
+ # Pattern for field IDs: must be "field-" followed by digits
2882
+ # Note: pure numeric strings like "2024" are treated as field NAMES, not IDs
2883
+ import re
2884
+
2885
+ _field_id_pattern = re.compile(r"^field-\d+$")
2886
+
2887
+ # Helper to resolve field name or ID and validate it exists on the list
2888
+ def resolve_field(field_spec: str) -> str:
2889
+ """Resolve field name/ID, auto-detecting format.
2890
+
2891
+ Args:
2892
+ field_spec: Field name or ID to resolve.
2893
+
2894
+ Returns:
2895
+ The resolved field ID.
2896
+
2897
+ Raises:
2898
+ CLIError: If field is not found on the list.
2899
+ """
2900
+ # Check for FieldId format (pattern: field-\d+)
2901
+ # Note: pure numeric strings like "2024" are treated as field names
2902
+ if _field_id_pattern.match(field_spec):
2903
+ # Validate field ID exists on this list
2904
+ if not resolver.get_field_name(field_spec):
2905
+ raise CLIError(
2906
+ f"Field '{field_spec}' not found on list '{list_selector}'.",
2907
+ exit_code=2,
2908
+ error_type="not_found",
2909
+ )
2910
+ return field_spec # It's a valid field ID
2911
+ # Check for EnrichedFieldId prefix (affinity-data-* or other known prefixes)
2912
+ # Note: enriched fields may not appear in list field metadata, so skip validation
2913
+ if field_spec.startswith("affinity-data-") or field_spec.startswith(
2914
+ "source-of-introduction"
2915
+ ):
2916
+ return field_spec # It's a valid enriched field ID
2917
+ # Resolve as field name (this already throws if not found)
2918
+ return resolver.resolve_field_name_or_id(field_spec, context="field")
2919
+
2920
+ # Build modifiers for CommandContext
2921
+ ctx_modifiers: dict[str, object] = {}
2922
+ if set_values:
2923
+ ctx_modifiers["set"] = [list(sv) for sv in set_values]
2924
+ if append_values:
2925
+ ctx_modifiers["append"] = [list(av) for av in append_values]
2926
+ if unset_fields:
2927
+ ctx_modifiers["unset"] = list(unset_fields)
2928
+ if unset_values:
2929
+ ctx_modifiers["unsetValue"] = [list(uv) for uv in unset_values]
2930
+ if json_input:
2931
+ ctx_modifiers["json"] = json_input
2932
+ if get_fields:
2933
+ ctx_modifiers["get"] = list(get_fields)
2934
+
2935
+ results: dict[str, Any] = {}
2936
+
2937
+ # Upfront field resolution: resolve ALL fields before any API calls (fail-fast)
2938
+ # This catches typos and invalid field names before any side effects
2939
+ resolved_fields: dict[str, str] = {} # field_spec -> resolved_field_id
2940
+
2941
+ all_field_specs: list[str] = []
2942
+ all_field_specs.extend(fv[0] for fv in set_values)
2943
+ all_field_specs.extend(av[0] for av in append_values)
2944
+ all_field_specs.extend(unset_fields)
2945
+ all_field_specs.extend(uv[0] for uv in unset_values)
2946
+ all_field_specs.extend(get_fields)
2947
+ if json_input:
2948
+ try:
2949
+ json_data_for_fields = json.loads(json_input)
2950
+ if isinstance(json_data_for_fields, dict):
2951
+ all_field_specs.extend(json_data_for_fields.keys())
2952
+ except json.JSONDecodeError:
2953
+ pass # Will be caught later with better error message
2954
+
2955
+ for field_spec in all_field_specs:
2956
+ if field_spec not in resolved_fields:
2957
+ resolved_fields[field_spec] = resolve_field(field_spec)
2958
+
2959
+ # Handle --get: read field values
2960
+ if has_get:
2961
+ existing_values = client.field_values.list(list_entry_id=ListEntryId(entry_id))
2962
+ field_results: dict[str, Any] = {}
2963
+
2964
+ for field_spec in get_fields:
2965
+ target_field_id = resolved_fields[field_spec] # Already resolved upfront
2966
+ field_values = find_field_values_for_field(
2967
+ field_values=[serialize_model_for_cli(v) for v in existing_values],
2968
+ field_id=target_field_id,
2969
+ )
2970
+ resolved_name = resolver.get_field_name(target_field_id) or field_spec
2971
+ if field_values:
2972
+ if len(field_values) == 1:
2973
+ field_results[resolved_name] = field_values[0].get("value")
2974
+ else:
2975
+ field_results[resolved_name] = [fv.get("value") for fv in field_values]
2976
+ else:
2977
+ field_results[resolved_name] = None
2978
+
2979
+ results["fields"] = field_results
2980
+
2981
+ cmd_context = CommandContext(
2982
+ name="entry field",
2983
+ inputs={"listSelector": list_selector, "entryId": entry_id},
2984
+ modifiers=ctx_modifiers,
2985
+ resolved={k: str(v) for k, v in resolved.items() if v is not None}
2986
+ if resolved
2987
+ else None,
2988
+ )
2989
+
2990
+ return CommandOutput(
2991
+ data=results,
2992
+ context=cmd_context,
2993
+ resolved=resolved,
2994
+ api_called=True,
2995
+ )
2996
+
2997
+ # Fetch existing values once for all write operations
2998
+ existing_values_list = list(client.field_values.list(list_entry_id=ListEntryId(entry_id)))
2999
+ existing_values_serialized = [serialize_model_for_cli(v) for v in existing_values_list]
3000
+
3001
+ created_values: list[dict[str, Any]] = []
3002
+ deleted_count = 0
3003
+
3004
+ # Get the entries API for this list
3005
+ entries = client.lists.entries(resolved_list.list.id)
3006
+
3007
+ # Phase 1: Handle --set and --set-json (replace semantics)
3008
+ set_operations: list[tuple[str, Any]] = []
3009
+
3010
+ # Collect from --set options
3011
+ for field_spec, value in set_values:
3012
+ set_operations.append((field_spec, value))
3013
+
3014
+ # Collect from --set-json
3015
+ if json_input:
3016
+ try:
3017
+ json_data = json.loads(json_input)
3018
+ if not isinstance(json_data, dict):
3019
+ raise CLIError(
3020
+ "--set-json must be a JSON object.",
3021
+ exit_code=2,
3022
+ error_type="usage_error",
3023
+ )
3024
+ for field_spec, value in json_data.items():
3025
+ set_operations.append((field_spec, value))
3026
+ except json.JSONDecodeError as e:
3027
+ raise CLIError(
3028
+ f"Invalid JSON in --set-json: {e}",
3029
+ exit_code=2,
3030
+ error_type="usage_error",
3031
+ ) from e
3032
+
3033
+ # Execute set operations (delete existing, then create new)
3034
+ for field_spec, value in set_operations:
3035
+ target_field_id = resolved_fields[field_spec] # Already resolved upfront
3036
+ resolved_name = resolver.get_field_name(target_field_id) or field_spec
3037
+
3038
+ # Delete existing values for this field (replace semantics)
3039
+ existing_for_field = find_field_values_for_field(
3040
+ field_values=existing_values_serialized,
3041
+ field_id=target_field_id,
3042
+ )
3043
+ if len(existing_for_field) > 1:
3044
+ # Emit warning for multi-value replace
3045
+ old_vals = [fv.get("value") for fv in existing_for_field]
3046
+ if len(old_vals) > 5:
3047
+ display_vals = [*old_vals[:3], f"...{len(old_vals) - 3} more..."]
3048
+ else:
3049
+ display_vals = old_vals
3050
+ click.echo(
3051
+ f"Warning: Replaced {len(existing_for_field)} existing values "
3052
+ f"for field '{resolved_name}': {display_vals}",
3053
+ err=True,
3054
+ )
3055
+
3056
+ for fv in existing_for_field:
3057
+ fv_id = fv.get("id")
3058
+ if fv_id:
3059
+ client.field_values.delete(fv_id)
3060
+ deleted_count += 1
3061
+
3062
+ # Create new value using V2 API
3063
+ try:
3064
+ parsed_field_id: AnyFieldId = FieldId(target_field_id)
3065
+ except ValueError:
3066
+ parsed_field_id = EnrichedFieldId(target_field_id)
3067
+
3068
+ result = entries.update_field_value(ListEntryId(entry_id), parsed_field_id, value)
3069
+ created_values.append(serialize_model_for_cli(result))
3070
+
3071
+ # Phase 2: Handle --append (add without replacing)
3072
+ for field_spec, value in append_values:
3073
+ target_field_id = resolved_fields[field_spec] # Already resolved upfront
3074
+
3075
+ # Just create new value (no delete = append)
3076
+ try:
3077
+ parsed_field_id = FieldId(target_field_id)
3078
+ except ValueError:
3079
+ parsed_field_id = EnrichedFieldId(target_field_id)
3080
+
3081
+ result = entries.update_field_value(ListEntryId(entry_id), parsed_field_id, value)
3082
+ created_values.append(serialize_model_for_cli(result))
3083
+
3084
+ # Refresh existing values for unset operations (in case set/append modified them)
3085
+ if has_unset or has_unset_value:
3086
+ existing_values_list = list(
3087
+ client.field_values.list(list_entry_id=ListEntryId(entry_id))
3088
+ )
3089
+ existing_values_serialized = [serialize_model_for_cli(v) for v in existing_values_list]
3090
+
3091
+ # Phase 3a: Handle --unset (delete all values for field)
3092
+ for field_spec in unset_fields:
3093
+ target_field_id = resolved_fields[field_spec] # Already resolved upfront
3094
+ existing_for_field = find_field_values_for_field(
3095
+ field_values=existing_values_serialized,
3096
+ field_id=target_field_id,
3097
+ )
3098
+ for fv in existing_for_field:
3099
+ fv_id = fv.get("id")
3100
+ if fv_id:
3101
+ client.field_values.delete(fv_id)
3102
+ deleted_count += 1
3103
+
3104
+ # Phase 3b: Handle --unset-value (delete specific value)
3105
+ for field_spec, value_to_remove in unset_values:
3106
+ target_field_id = resolved_fields[field_spec] # Already resolved upfront
3107
+ existing_for_field = find_field_values_for_field(
3108
+ field_values=existing_values_serialized,
3109
+ field_id=target_field_id,
3110
+ )
3111
+ value_str = value_to_remove.strip()
3112
+ found = False
3113
+ for fv in existing_for_field:
3114
+ fv_value = fv.get("value")
3115
+ if format_value_for_comparison(fv_value) == value_str:
3116
+ fv_id = fv.get("id")
3117
+ if fv_id:
3118
+ client.field_values.delete(fv_id)
3119
+ deleted_count += 1
3120
+ found = True
3121
+ break
3122
+ # Idempotent: silent success if value not found
3123
+ if not found:
3124
+ resolved_name = resolver.get_field_name(target_field_id) or field_spec
3125
+ warnings.append(
3126
+ f"Value '{value_to_remove}' not found for field '{resolved_name}' "
3127
+ "(already removed or never existed)."
3128
+ )
3129
+
3130
+ # Build result
3131
+ if created_values:
3132
+ results["created"] = created_values
3133
+ if deleted_count > 0:
3134
+ results["deleted"] = deleted_count
3135
+
3136
+ cmd_context = CommandContext(
3137
+ name="entry field",
3138
+ inputs={"listSelector": list_selector, "entryId": entry_id},
3139
+ modifiers=ctx_modifiers,
3140
+ resolved={k: str(v) for k, v in resolved.items() if v is not None}
3141
+ if resolved
3142
+ else None,
3143
+ )
3144
+
3145
+ return CommandOutput(
3146
+ data=results,
3147
+ context=cmd_context,
3148
+ resolved=resolved,
3149
+ api_called=True,
3150
+ )
3151
+
3152
+ run_command(ctx, command="entry field", fn=fn)