corp-extractor 0.5.0__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {corp_extractor-0.5.0.dist-info → corp_extractor-0.9.3.dist-info}/METADATA +228 -30
  2. corp_extractor-0.9.3.dist-info/RECORD +79 -0
  3. statement_extractor/__init__.py +1 -1
  4. statement_extractor/cli.py +2030 -24
  5. statement_extractor/data/statement_taxonomy.json +6949 -1159
  6. statement_extractor/database/__init__.py +52 -0
  7. statement_extractor/database/embeddings.py +186 -0
  8. statement_extractor/database/hub.py +428 -0
  9. statement_extractor/database/importers/__init__.py +32 -0
  10. statement_extractor/database/importers/companies_house.py +559 -0
  11. statement_extractor/database/importers/companies_house_officers.py +431 -0
  12. statement_extractor/database/importers/gleif.py +561 -0
  13. statement_extractor/database/importers/sec_edgar.py +392 -0
  14. statement_extractor/database/importers/sec_form4.py +512 -0
  15. statement_extractor/database/importers/wikidata.py +1120 -0
  16. statement_extractor/database/importers/wikidata_dump.py +1951 -0
  17. statement_extractor/database/importers/wikidata_people.py +1130 -0
  18. statement_extractor/database/models.py +254 -0
  19. statement_extractor/database/resolver.py +245 -0
  20. statement_extractor/database/store.py +3034 -0
  21. statement_extractor/document/__init__.py +62 -0
  22. statement_extractor/document/chunker.py +410 -0
  23. statement_extractor/document/context.py +171 -0
  24. statement_extractor/document/deduplicator.py +171 -0
  25. statement_extractor/document/html_extractor.py +246 -0
  26. statement_extractor/document/loader.py +303 -0
  27. statement_extractor/document/pipeline.py +388 -0
  28. statement_extractor/document/summarizer.py +195 -0
  29. statement_extractor/extractor.py +1 -1
  30. statement_extractor/models/__init__.py +19 -3
  31. statement_extractor/models/canonical.py +44 -1
  32. statement_extractor/models/document.py +308 -0
  33. statement_extractor/models/labels.py +47 -18
  34. statement_extractor/models/qualifiers.py +51 -3
  35. statement_extractor/models/statement.py +39 -15
  36. statement_extractor/models.py +1 -1
  37. statement_extractor/pipeline/config.py +6 -11
  38. statement_extractor/pipeline/context.py +5 -5
  39. statement_extractor/pipeline/orchestrator.py +90 -121
  40. statement_extractor/pipeline/registry.py +52 -46
  41. statement_extractor/plugins/__init__.py +20 -8
  42. statement_extractor/plugins/base.py +348 -78
  43. statement_extractor/plugins/extractors/gliner2.py +38 -28
  44. statement_extractor/plugins/labelers/taxonomy.py +18 -5
  45. statement_extractor/plugins/labelers/taxonomy_embedding.py +17 -6
  46. statement_extractor/plugins/pdf/__init__.py +10 -0
  47. statement_extractor/plugins/pdf/pypdf.py +291 -0
  48. statement_extractor/plugins/qualifiers/__init__.py +11 -0
  49. statement_extractor/plugins/qualifiers/companies_house.py +14 -3
  50. statement_extractor/plugins/qualifiers/embedding_company.py +422 -0
  51. statement_extractor/plugins/qualifiers/gleif.py +14 -3
  52. statement_extractor/plugins/qualifiers/person.py +588 -14
  53. statement_extractor/plugins/qualifiers/sec_edgar.py +14 -3
  54. statement_extractor/plugins/scrapers/__init__.py +10 -0
  55. statement_extractor/plugins/scrapers/http.py +236 -0
  56. statement_extractor/plugins/splitters/t5_gemma.py +176 -75
  57. statement_extractor/plugins/taxonomy/embedding.py +193 -46
  58. statement_extractor/plugins/taxonomy/mnli.py +16 -4
  59. statement_extractor/scoring.py +8 -8
  60. corp_extractor-0.5.0.dist-info/RECORD +0 -55
  61. statement_extractor/plugins/canonicalizers/__init__.py +0 -17
  62. statement_extractor/plugins/canonicalizers/base.py +0 -9
  63. statement_extractor/plugins/canonicalizers/location.py +0 -219
  64. statement_extractor/plugins/canonicalizers/organization.py +0 -230
  65. statement_extractor/plugins/canonicalizers/person.py +0 -242
  66. {corp_extractor-0.5.0.dist-info → corp_extractor-0.9.3.dist-info}/WHEEL +0 -0
  67. {corp_extractor-0.5.0.dist-info → corp_extractor-0.9.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,3034 @@
1
+ """
2
+ Entity/Organization database with sqlite-vec for vector search.
3
+
4
+ Uses a hybrid approach:
5
+ 1. Text-based filtering to narrow candidates (Levenshtein-like)
6
+ 2. sqlite-vec vector search for semantic ranking
7
+ """
8
+
9
+ import json
10
+ import logging
11
+ import re
12
+ import sqlite3
13
+ import time
14
+ from pathlib import Path
15
+ from typing import Iterator, Optional
16
+
17
+ import numpy as np
18
+ import pycountry
19
+ import sqlite_vec
20
+
21
+ from .models import CompanyRecord, DatabaseStats, EntityType, PersonRecord, PersonType
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Default database location
26
+ DEFAULT_DB_PATH = Path.home() / ".cache" / "corp-extractor" / "entities.db"
27
+
28
+ # Module-level shared connections by path (both databases share the same connection)
29
+ _shared_connections: dict[str, sqlite3.Connection] = {}
30
+
31
+ # Module-level singleton for OrganizationDatabase to prevent multiple loads
32
+ _database_instances: dict[str, "OrganizationDatabase"] = {}
33
+
34
+ # Module-level singleton for PersonDatabase
35
+ _person_database_instances: dict[str, "PersonDatabase"] = {}
36
+
37
+
38
+ def _get_shared_connection(db_path: Path, embedding_dim: int = 768) -> sqlite3.Connection:
39
+ """Get or create a shared database connection for the given path."""
40
+ path_key = str(db_path)
41
+ if path_key not in _shared_connections:
42
+ # Ensure directory exists
43
+ db_path.parent.mkdir(parents=True, exist_ok=True)
44
+
45
+ conn = sqlite3.connect(str(db_path))
46
+ conn.row_factory = sqlite3.Row
47
+
48
+ # Load sqlite-vec extension
49
+ conn.enable_load_extension(True)
50
+ sqlite_vec.load(conn)
51
+ conn.enable_load_extension(False)
52
+
53
+ _shared_connections[path_key] = conn
54
+ logger.debug(f"Created shared database connection for {path_key}")
55
+
56
+ return _shared_connections[path_key]
57
+
58
+
59
+ def close_shared_connection(db_path: Optional[Path] = None) -> None:
60
+ """Close a shared database connection."""
61
+ path_key = str(db_path or DEFAULT_DB_PATH)
62
+ if path_key in _shared_connections:
63
+ _shared_connections[path_key].close()
64
+ del _shared_connections[path_key]
65
+ logger.debug(f"Closed shared database connection for {path_key}")
66
+
67
+ # Comprehensive set of corporate legal suffixes (international)
68
+ COMPANY_SUFFIXES: set[str] = {
69
+ 'A/S', 'AB', 'AG', 'AO', 'AG & Co', 'AG &', 'AG & CO.', 'AG & CO. KG', 'AG & CO. KGaA',
70
+ 'AG & KG', 'AG & KGaA', 'AG & PARTNER', 'ATE', 'ASA', 'B.V.', 'BV', 'Class A', 'Class B',
71
+ 'Class C', 'Class D', 'Class E', 'Class F', 'Class G', 'CO', 'Co', 'Co.', 'Company',
72
+ 'Corp', 'Corp.', 'Corporation', 'DAC', 'GmbH', 'Inc', 'Inc.', 'Incorporated', 'KGaA',
73
+ 'Limited', 'LLC', 'LLP', 'LP', 'Ltd', 'Ltd.', 'N.V.', 'NV', 'Plc', 'PC', 'plc', 'PLC',
74
+ 'Pty Ltd', 'Pty', 'Pty. Ltd.', 'S.A.', 'S.A.B. de C.V.', 'SAB de CV', 'S.A.B.', 'S.A.P.I.',
75
+ 'NV/SA', 'SDI', 'SpA', 'S.L.', 'S.p.A.', 'SA', 'SE', 'Tbk PT', 'U.A.',
76
+ # Additional common suffixes
77
+ 'Group', 'Holdings', 'Holding', 'Partners', 'Trust', 'Fund', 'Bank', 'N.A.', 'The',
78
+ }
79
+
80
+ # Source priority for organization canonicalization (lower = higher priority)
81
+ SOURCE_PRIORITY: dict[str, int] = {
82
+ "gleif": 1, # Gold standard LEI - globally unique legal entity identifier
83
+ "sec_edgar": 2, # Vetted US filers with CIK + ticker
84
+ "companies_house": 3, # Official UK registry
85
+ "wikipedia": 4, # Crowdsourced, less authoritative
86
+ }
87
+
88
+ # Source priority for people canonicalization (lower = higher priority)
89
+ PERSON_SOURCE_PRIORITY: dict[str, int] = {
90
+ "wikidata": 1, # Curated, has rich biographical data and Q codes
91
+ "sec_edgar": 2, # Vetted US filers (Form 4 officers/directors)
92
+ "companies_house": 3, # UK company officers
93
+ }
94
+
95
+ # Suffix expansions for canonical name matching
96
+ SUFFIX_EXPANSIONS: dict[str, str] = {
97
+ " ltd": " limited",
98
+ " corp": " corporation",
99
+ " inc": " incorporated",
100
+ " co": " company",
101
+ " intl": " international",
102
+ " natl": " national",
103
+ }
104
+
105
+
106
+ class UnionFind:
107
+ """Simple Union-Find (Disjoint Set Union) data structure for canonicalization."""
108
+
109
+ def __init__(self, elements: list[int]):
110
+ """Initialize with list of element IDs."""
111
+ self.parent: dict[int, int] = {e: e for e in elements}
112
+ self.rank: dict[int, int] = {e: 0 for e in elements}
113
+
114
+ def find(self, x: int) -> int:
115
+ """Find with path compression."""
116
+ if self.parent[x] != x:
117
+ self.parent[x] = self.find(self.parent[x])
118
+ return self.parent[x]
119
+
120
+ def union(self, x: int, y: int) -> None:
121
+ """Union by rank."""
122
+ px, py = self.find(x), self.find(y)
123
+ if px == py:
124
+ return
125
+ if self.rank[px] < self.rank[py]:
126
+ px, py = py, px
127
+ self.parent[py] = px
128
+ if self.rank[px] == self.rank[py]:
129
+ self.rank[px] += 1
130
+
131
+ def groups(self) -> dict[int, list[int]]:
132
+ """Return dict of root -> list of members."""
133
+ result: dict[int, list[int]] = {}
134
+ for e in self.parent:
135
+ root = self.find(e)
136
+ result.setdefault(root, []).append(e)
137
+ return result
138
+
139
+
140
+ # Common region aliases not handled well by pycountry fuzzy search
141
+ REGION_ALIASES: dict[str, str] = {
142
+ "uk": "GB",
143
+ "u.k.": "GB",
144
+ "england": "GB",
145
+ "scotland": "GB",
146
+ "wales": "GB",
147
+ "northern ireland": "GB",
148
+ "usa": "US",
149
+ "u.s.a.": "US",
150
+ "u.s.": "US",
151
+ "united states of america": "US",
152
+ "america": "US",
153
+ }
154
+
155
+ # Cache for region normalization lookups
156
+ _region_cache: dict[str, str] = {}
157
+
158
+
159
+ def _normalize_region(region: str) -> str:
160
+ """
161
+ Normalize a region string to ISO 3166-1 alpha-2 country code.
162
+
163
+ Handles:
164
+ - Country codes (2-letter, 3-letter)
165
+ - Country names (with fuzzy matching)
166
+ - US state codes (CA, NY) -> US
167
+ - US state names (California, New York) -> US
168
+ - Common aliases (UK, USA, England) -> proper codes
169
+
170
+ Returns empty string if region cannot be normalized.
171
+ """
172
+ if not region:
173
+ return ""
174
+
175
+ # Check cache first
176
+ cache_key = region.lower().strip()
177
+ if cache_key in _region_cache:
178
+ return _region_cache[cache_key]
179
+
180
+ result = _normalize_region_uncached(region)
181
+ _region_cache[cache_key] = result
182
+ return result
183
+
184
+
185
+ def _normalize_region_uncached(region: str) -> str:
186
+ """Uncached region normalization logic."""
187
+ region_clean = region.strip()
188
+
189
+ # Empty after stripping = empty result
190
+ if not region_clean:
191
+ return ""
192
+
193
+ region_lower = region_clean.lower()
194
+ region_upper = region_clean.upper()
195
+
196
+ # Check common aliases first
197
+ if region_lower in REGION_ALIASES:
198
+ return REGION_ALIASES[region_lower]
199
+
200
+ # For 2-letter codes, check country first, then US state
201
+ # This means ambiguous codes like "CA" (Canada vs California) prefer country
202
+ # But unambiguous codes like "NY" (not a country) will match as US state
203
+ if len(region_clean) == 2:
204
+ # Try as country alpha-2 first
205
+ country = pycountry.countries.get(alpha_2=region_upper)
206
+ if country:
207
+ return country.alpha_2
208
+
209
+ # If not a country, try as US state code
210
+ subdivision = pycountry.subdivisions.get(code=f"US-{region_upper}")
211
+ if subdivision:
212
+ return "US"
213
+
214
+ # Try alpha-3 lookup
215
+ if len(region_clean) == 3:
216
+ country = pycountry.countries.get(alpha_3=region_upper)
217
+ if country:
218
+ return country.alpha_2
219
+
220
+ # Try as US state name (e.g., "California", "New York")
221
+ try:
222
+ subdivisions = list(pycountry.subdivisions.search_fuzzy(region_clean))
223
+ if subdivisions:
224
+ # Check if it's a US state
225
+ if subdivisions[0].code.startswith("US-"):
226
+ return "US"
227
+ # Return the parent country code
228
+ return subdivisions[0].country_code
229
+ except LookupError:
230
+ pass
231
+
232
+ # Try country fuzzy search
233
+ try:
234
+ countries = pycountry.countries.search_fuzzy(region_clean)
235
+ if countries:
236
+ return countries[0].alpha_2
237
+ except LookupError:
238
+ pass
239
+
240
+ # Return empty if we can't normalize
241
+ return ""
242
+
243
+
244
+ def _regions_match(region1: str, region2: str) -> bool:
245
+ """
246
+ Check if two regions match after normalization.
247
+
248
+ Empty regions match anything (lenient matching for incomplete data).
249
+ """
250
+ norm1 = _normalize_region(region1)
251
+ norm2 = _normalize_region(region2)
252
+
253
+ # Empty regions match anything
254
+ if not norm1 or not norm2:
255
+ return True
256
+
257
+ return norm1 == norm2
258
+
259
+
260
+ def _normalize_for_canon(name: str) -> str:
261
+ """Normalize name for canonical matching (simpler than search normalization)."""
262
+ # Lowercase
263
+ result = name.lower()
264
+ # Remove trailing dots
265
+ result = result.rstrip(".")
266
+ # Remove extra whitespace
267
+ result = " ".join(result.split())
268
+ return result
269
+
270
+
271
+ def _expand_suffix(name: str) -> str:
272
+ """Expand known suffix abbreviations."""
273
+ result = name.lower().rstrip(".")
274
+ for abbrev, full in SUFFIX_EXPANSIONS.items():
275
+ if result.endswith(abbrev):
276
+ result = result[:-len(abbrev)] + full
277
+ break # Only expand one suffix
278
+ return result
279
+
280
+
281
+ def _names_match_for_canon(name1: str, name2: str) -> bool:
282
+ """Check if two names match for canonicalization."""
283
+ n1 = _normalize_for_canon(name1)
284
+ n2 = _normalize_for_canon(name2)
285
+
286
+ # Exact match after normalization
287
+ if n1 == n2:
288
+ return True
289
+
290
+ # Try with suffix expansion
291
+ if _expand_suffix(n1) == _expand_suffix(n2):
292
+ return True
293
+
294
+ return False
295
+
296
+ # Pre-compile the suffix pattern for performance
297
+ _SUFFIX_PATTERN = re.compile(
298
+ r'\s+(' + '|'.join(re.escape(suffix) for suffix in COMPANY_SUFFIXES) + r')\.?$',
299
+ re.IGNORECASE
300
+ )
301
+
302
+
303
+ def _clean_org_name(name: str | None) -> str:
304
+ """
305
+ Remove special characters and formatting from organization name.
306
+
307
+ Removes brackets, parentheses, quotes, and other formatting artifacts.
308
+ """
309
+ if not name:
310
+ return ""
311
+ # Remove special characters, keeping only alphanumeric and spaces
312
+ cleaned = re.sub(r'[•;:\'"\[\](){}<>`~!@#$%^&*\-_=+\\|/?!`~]+', ' ', name)
313
+ cleaned = re.sub(r'\s+', ' ', cleaned).strip()
314
+ # Recurse if changes were made (handles nested special chars)
315
+ return _clean_org_name(cleaned) if cleaned != name else cleaned
316
+
317
+
318
+ def _remove_suffix(name: str) -> str:
319
+ """
320
+ Remove corporate legal suffixes from company name.
321
+
322
+ Iteratively removes suffixes until no more are found.
323
+ Also removes possessive 's and trailing punctuation.
324
+ """
325
+ cleaned = name.strip()
326
+ cleaned = re.sub(r'\s+', ' ', cleaned)
327
+ # Remove possessive 's (e.g., "Amazon's" -> "Amazon")
328
+ cleaned = re.sub(r"'s\b", "", cleaned)
329
+ cleaned = re.sub(r'\s+', ' ', cleaned).strip()
330
+
331
+ while True:
332
+ new_name = _SUFFIX_PATTERN.sub('', cleaned)
333
+ # Remove trailing punctuation
334
+ new_name = re.sub(r'[ .,;&\n\t/)]$', '', new_name)
335
+
336
+ if new_name == cleaned:
337
+ break
338
+ cleaned = new_name.strip()
339
+
340
+ return cleaned.strip()
341
+
342
+
343
+ def _normalize_name(name: str) -> str:
344
+ """
345
+ Normalize company name for text matching.
346
+
347
+ 1. Remove possessive 's (before cleaning removes apostrophe)
348
+ 2. Clean special characters
349
+ 3. Remove legal suffixes
350
+ 4. Lowercase
351
+ 5. If result is empty, use cleaned lowercase original
352
+
353
+ Always returns a non-empty string for valid input.
354
+ """
355
+ if not name:
356
+ return ""
357
+ # Remove possessive 's first (before cleaning removes the apostrophe)
358
+ normalized = re.sub(r"'s\b", "", name)
359
+ # Clean special characters
360
+ cleaned = _clean_org_name(normalized)
361
+ # Remove legal suffixes
362
+ normalized = _remove_suffix(cleaned)
363
+ # Lowercase for matching
364
+ normalized = normalized.lower()
365
+ # If normalized is empty (e.g., name was just "Ltd"), use the cleaned name
366
+ if not normalized:
367
+ normalized = cleaned.lower() if cleaned else name.lower()
368
+ return normalized
369
+
370
+
371
+ def _extract_search_terms(query: str) -> list[str]:
372
+ """
373
+ Extract search terms from a query for SQL LIKE matching.
374
+
375
+ Returns list of terms to search for, ordered by length (longest first).
376
+ """
377
+ # Split into words
378
+ words = query.split()
379
+
380
+ # Filter out very short words (< 3 chars) unless it's the only word
381
+ if len(words) > 1:
382
+ words = [w for w in words if len(w) >= 3]
383
+
384
+ # Sort by length descending (longer words are more specific)
385
+ words.sort(key=len, reverse=True)
386
+
387
+ return words[:3] # Limit to top 3 terms
388
+
389
+
390
+ # Person name normalization patterns
391
+ _PERSON_PREFIXES = {
392
+ "dr.", "dr", "prof.", "prof", "professor",
393
+ "mr.", "mr", "mrs.", "mrs", "ms.", "ms", "miss",
394
+ "sir", "dame", "lord", "lady",
395
+ "rev.", "rev", "reverend",
396
+ "hon.", "hon", "honorable",
397
+ "gen.", "gen", "general",
398
+ "col.", "col", "colonel",
399
+ "capt.", "capt", "captain",
400
+ "lt.", "lt", "lieutenant",
401
+ "sgt.", "sgt", "sergeant",
402
+ }
403
+
404
+ _PERSON_SUFFIXES = {
405
+ "jr.", "jr", "junior",
406
+ "sr.", "sr", "senior",
407
+ "ii", "iii", "iv", "v",
408
+ "2nd", "3rd", "4th", "5th",
409
+ "phd", "ph.d.", "ph.d",
410
+ "md", "m.d.", "m.d",
411
+ "esq", "esq.",
412
+ "mba", "m.b.a.",
413
+ "cpa", "c.p.a.",
414
+ "jd", "j.d.",
415
+ }
416
+
417
+
418
+ def _normalize_person_name(name: str) -> str:
419
+ """
420
+ Normalize person name for text matching.
421
+
422
+ 1. Remove honorific prefixes (Dr., Prof., Mr., etc.)
423
+ 2. Remove generational suffixes (Jr., Sr., III, PhD, etc.)
424
+ 3. Keep name particles (von, van, de, al-, etc.)
425
+ 4. Lowercase and strip
426
+
427
+ Always returns a non-empty string for valid input.
428
+ """
429
+ if not name:
430
+ return ""
431
+
432
+ # Lowercase for matching
433
+ normalized = name.lower().strip()
434
+
435
+ # Split into words
436
+ words = normalized.split()
437
+ if not words:
438
+ return ""
439
+
440
+ # Remove prefix if first word is a title
441
+ while words and words[0].rstrip(".") in _PERSON_PREFIXES:
442
+ words.pop(0)
443
+ if not words:
444
+ return name.lower().strip() # Fallback if name was just a title
445
+
446
+ # Remove suffix if last word is a suffix
447
+ while words and words[-1].rstrip(".") in _PERSON_SUFFIXES:
448
+ words.pop()
449
+ if not words:
450
+ return name.lower().strip() # Fallback if name was just suffixes
451
+
452
+ # Rejoin remaining words
453
+ normalized = " ".join(words)
454
+
455
+ # Clean up extra spaces
456
+ normalized = re.sub(r'\s+', ' ', normalized).strip()
457
+
458
+ return normalized if normalized else name.lower().strip()
459
+
460
+
461
+ def get_database(db_path: Optional[str | Path] = None, embedding_dim: int = 768) -> "OrganizationDatabase":
462
+ """
463
+ Get a singleton OrganizationDatabase instance for the given path.
464
+
465
+ Args:
466
+ db_path: Path to database file
467
+ embedding_dim: Dimension of embeddings
468
+
469
+ Returns:
470
+ Shared OrganizationDatabase instance
471
+ """
472
+ path_key = str(db_path or DEFAULT_DB_PATH)
473
+ if path_key not in _database_instances:
474
+ logger.debug(f"Creating new OrganizationDatabase instance for {path_key}")
475
+ _database_instances[path_key] = OrganizationDatabase(db_path=db_path, embedding_dim=embedding_dim)
476
+ return _database_instances[path_key]
477
+
478
+
479
+ class OrganizationDatabase:
480
+ """
481
+ SQLite database with sqlite-vec for organization vector search.
482
+
483
+ Uses hybrid text + vector search:
484
+ 1. Text filtering with Levenshtein distance to reduce candidates
485
+ 2. sqlite-vec for semantic similarity ranking
486
+ """
487
+
488
+ def __init__(
489
+ self,
490
+ db_path: Optional[str | Path] = None,
491
+ embedding_dim: int = 768, # Default for embeddinggemma-300m
492
+ ):
493
+ """
494
+ Initialize the organization database.
495
+
496
+ Args:
497
+ db_path: Path to database file (creates if not exists)
498
+ embedding_dim: Dimension of embeddings to store
499
+ """
500
+ self._db_path = Path(db_path) if db_path else DEFAULT_DB_PATH
501
+ self._embedding_dim = embedding_dim
502
+ self._conn: Optional[sqlite3.Connection] = None
503
+
504
+ def _ensure_dir(self) -> None:
505
+ """Ensure database directory exists."""
506
+ self._db_path.parent.mkdir(parents=True, exist_ok=True)
507
+
508
+ def _connect(self) -> sqlite3.Connection:
509
+ """Get or create database connection using shared connection pool."""
510
+ if self._conn is not None:
511
+ return self._conn
512
+
513
+ self._conn = _get_shared_connection(self._db_path, self._embedding_dim)
514
+
515
+ # Create tables (idempotent)
516
+ self._create_tables()
517
+
518
+ return self._conn
519
+
520
+ def _create_tables(self) -> None:
521
+ """Create database tables including sqlite-vec virtual table."""
522
+ conn = self._conn
523
+ assert conn is not None
524
+
525
+ # Main organization records table
526
+ conn.execute("""
527
+ CREATE TABLE IF NOT EXISTS organizations (
528
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
529
+ name TEXT NOT NULL,
530
+ name_normalized TEXT NOT NULL,
531
+ source TEXT NOT NULL,
532
+ source_id TEXT NOT NULL,
533
+ region TEXT NOT NULL DEFAULT '',
534
+ entity_type TEXT NOT NULL DEFAULT 'unknown',
535
+ from_date TEXT NOT NULL DEFAULT '',
536
+ to_date TEXT NOT NULL DEFAULT '',
537
+ record TEXT NOT NULL,
538
+ UNIQUE(source, source_id)
539
+ )
540
+ """)
541
+
542
+ # Add region column if it doesn't exist (migration for existing DBs)
543
+ try:
544
+ conn.execute("ALTER TABLE organizations ADD COLUMN region TEXT NOT NULL DEFAULT ''")
545
+ logger.info("Added region column to organizations table")
546
+ except sqlite3.OperationalError:
547
+ pass # Column already exists
548
+
549
+ # Add entity_type column if it doesn't exist (migration for existing DBs)
550
+ try:
551
+ conn.execute("ALTER TABLE organizations ADD COLUMN entity_type TEXT NOT NULL DEFAULT 'unknown'")
552
+ logger.info("Added entity_type column to organizations table")
553
+ except sqlite3.OperationalError:
554
+ pass # Column already exists
555
+
556
+ # Add from_date column if it doesn't exist (migration for existing DBs)
557
+ try:
558
+ conn.execute("ALTER TABLE organizations ADD COLUMN from_date TEXT NOT NULL DEFAULT ''")
559
+ logger.info("Added from_date column to organizations table")
560
+ except sqlite3.OperationalError:
561
+ pass # Column already exists
562
+
563
+ # Add to_date column if it doesn't exist (migration for existing DBs)
564
+ try:
565
+ conn.execute("ALTER TABLE organizations ADD COLUMN to_date TEXT NOT NULL DEFAULT ''")
566
+ logger.info("Added to_date column to organizations table")
567
+ except sqlite3.OperationalError:
568
+ pass # Column already exists
569
+
570
+ # Add canon_id column if it doesn't exist (migration for canonicalization)
571
+ try:
572
+ conn.execute("ALTER TABLE organizations ADD COLUMN canon_id INTEGER DEFAULT NULL")
573
+ logger.info("Added canon_id column to organizations table")
574
+ except sqlite3.OperationalError:
575
+ pass # Column already exists
576
+
577
+ # Add canon_size column if it doesn't exist (migration for canonicalization)
578
+ try:
579
+ conn.execute("ALTER TABLE organizations ADD COLUMN canon_size INTEGER DEFAULT 1")
580
+ logger.info("Added canon_size column to organizations table")
581
+ except sqlite3.OperationalError:
582
+ pass # Column already exists
583
+
584
+ # Create indexes on main table
585
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_name ON organizations(name)")
586
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_name_normalized ON organizations(name_normalized)")
587
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_source ON organizations(source)")
588
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_source_id ON organizations(source, source_id)")
589
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_region ON organizations(region)")
590
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_entity_type ON organizations(entity_type)")
591
+ conn.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_orgs_name_region_source ON organizations(name, region, source)")
592
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_canon_id ON organizations(canon_id)")
593
+
594
+ # Create sqlite-vec virtual table for embeddings
595
+ # vec0 is the recommended virtual table type
596
+ conn.execute(f"""
597
+ CREATE VIRTUAL TABLE IF NOT EXISTS organization_embeddings USING vec0(
598
+ org_id INTEGER PRIMARY KEY,
599
+ embedding float[{self._embedding_dim}]
600
+ )
601
+ """)
602
+
603
+ conn.commit()
604
+
605
+ def close(self) -> None:
606
+ """Clear connection reference (shared connection remains open)."""
607
+ self._conn = None
608
+
609
+ def insert(self, record: CompanyRecord, embedding: np.ndarray) -> int:
610
+ """
611
+ Insert an organization record with its embedding.
612
+
613
+ Args:
614
+ record: Organization record to insert
615
+ embedding: Embedding vector for the organization name
616
+
617
+ Returns:
618
+ Row ID of inserted record
619
+ """
620
+ conn = self._connect()
621
+
622
+ # Serialize record
623
+ record_json = json.dumps(record.record)
624
+ name_normalized = _normalize_name(record.name)
625
+
626
+ cursor = conn.execute("""
627
+ INSERT OR REPLACE INTO organizations
628
+ (name, name_normalized, source, source_id, region, entity_type, from_date, to_date, record)
629
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
630
+ """, (
631
+ record.name,
632
+ name_normalized,
633
+ record.source,
634
+ record.source_id,
635
+ record.region,
636
+ record.entity_type.value,
637
+ record.from_date or "",
638
+ record.to_date or "",
639
+ record_json,
640
+ ))
641
+
642
+ row_id = cursor.lastrowid
643
+ assert row_id is not None
644
+
645
+ # Insert embedding into vec table
646
+ # sqlite-vec virtual tables don't support INSERT OR REPLACE, so delete first
647
+ embedding_blob = embedding.astype(np.float32).tobytes()
648
+ conn.execute("DELETE FROM organization_embeddings WHERE org_id = ?", (row_id,))
649
+ conn.execute("""
650
+ INSERT INTO organization_embeddings (org_id, embedding)
651
+ VALUES (?, ?)
652
+ """, (row_id, embedding_blob))
653
+
654
+ conn.commit()
655
+ return row_id
656
+
657
+ def insert_batch(
658
+ self,
659
+ records: list[CompanyRecord],
660
+ embeddings: np.ndarray,
661
+ batch_size: int = 1000,
662
+ ) -> int:
663
+ """
664
+ Insert multiple organization records with embeddings.
665
+
666
+ Args:
667
+ records: List of organization records
668
+ embeddings: Matrix of embeddings (N x dim)
669
+ batch_size: Commit batch size
670
+
671
+ Returns:
672
+ Number of records inserted
673
+ """
674
+ conn = self._connect()
675
+ count = 0
676
+
677
+ for record, embedding in zip(records, embeddings):
678
+ record_json = json.dumps(record.record)
679
+ name_normalized = _normalize_name(record.name)
680
+
681
+ cursor = conn.execute("""
682
+ INSERT OR REPLACE INTO organizations
683
+ (name, name_normalized, source, source_id, region, entity_type, from_date, to_date, record)
684
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
685
+ """, (
686
+ record.name,
687
+ name_normalized,
688
+ record.source,
689
+ record.source_id,
690
+ record.region,
691
+ record.entity_type.value,
692
+ record.from_date or "",
693
+ record.to_date or "",
694
+ record_json,
695
+ ))
696
+
697
+ row_id = cursor.lastrowid
698
+ assert row_id is not None
699
+
700
+ # Insert embedding (delete first since sqlite-vec doesn't support REPLACE)
701
+ embedding_blob = embedding.astype(np.float32).tobytes()
702
+ conn.execute("DELETE FROM organization_embeddings WHERE org_id = ?", (row_id,))
703
+ conn.execute("""
704
+ INSERT INTO organization_embeddings (org_id, embedding)
705
+ VALUES (?, ?)
706
+ """, (row_id, embedding_blob))
707
+
708
+ count += 1
709
+
710
+ if count % batch_size == 0:
711
+ conn.commit()
712
+ logger.info(f"Inserted {count} records...")
713
+
714
+ conn.commit()
715
+ return count
716
+
717
+ def search(
718
+ self,
719
+ query_embedding: np.ndarray,
720
+ top_k: int = 20,
721
+ source_filter: Optional[str] = None,
722
+ query_text: Optional[str] = None,
723
+ max_text_candidates: int = 5000,
724
+ rerank_min_candidates: int = 500,
725
+ ) -> list[tuple[CompanyRecord, float]]:
726
+ """
727
+ Search for similar organizations using hybrid text + vector search.
728
+
729
+ Three-stage approach:
730
+ 1. If query_text provided, use SQL LIKE to find candidates containing search terms
731
+ 2. Use sqlite-vec for vector similarity ranking on filtered candidates
732
+ 3. Apply prominence-based re-ranking to boost major companies (SEC filers, tickers)
733
+
734
+ Args:
735
+ query_embedding: Query embedding vector
736
+ top_k: Number of results to return
737
+ source_filter: Optional filter by source (gleif, sec_edgar, etc.)
738
+ query_text: Optional query text for text-based pre-filtering
739
+ max_text_candidates: Max candidates to keep after text filtering
740
+ rerank_min_candidates: Minimum candidates to fetch for re-ranking (default 500)
741
+
742
+ Returns:
743
+ List of (CompanyRecord, adjusted_score) tuples sorted by prominence-adjusted score
744
+ """
745
+ start = time.time()
746
+ self._connect()
747
+
748
+ # Normalize query embedding
749
+ query_norm = np.linalg.norm(query_embedding)
750
+ if query_norm == 0:
751
+ return []
752
+ query_normalized = query_embedding / query_norm
753
+ query_blob = query_normalized.astype(np.float32).tobytes()
754
+
755
+ # Stage 1: Text-based pre-filtering (if query_text provided)
756
+ candidate_ids: Optional[set[int]] = None
757
+ query_normalized_text = ""
758
+ if query_text:
759
+ query_normalized_text = _normalize_name(query_text)
760
+ if query_normalized_text:
761
+ candidate_ids = self._text_filter_candidates(
762
+ query_normalized_text,
763
+ max_candidates=max_text_candidates,
764
+ source_filter=source_filter,
765
+ )
766
+ logger.info(f"Text filter: {len(candidate_ids)} candidates for '{query_text}'")
767
+
768
+ # Stage 2: Vector search - fetch more candidates for re-ranking
769
+ if candidate_ids is not None and len(candidate_ids) == 0:
770
+ # No text matches, return empty
771
+ return []
772
+
773
+ # Fetch enough candidates for prominence re-ranking to be effective
774
+ # Use at least rerank_min_candidates, or all text-filtered candidates if fewer
775
+ if candidate_ids is not None:
776
+ fetch_k = min(len(candidate_ids), max(rerank_min_candidates, top_k * 5))
777
+ else:
778
+ fetch_k = max(rerank_min_candidates, top_k * 5)
779
+
780
+ if candidate_ids is not None:
781
+ # Search within text-filtered candidates
782
+ results = self._vector_search_filtered(
783
+ query_blob, candidate_ids, fetch_k, source_filter
784
+ )
785
+ else:
786
+ # Full vector search
787
+ results = self._vector_search_full(query_blob, fetch_k, source_filter)
788
+
789
+ # Stage 3: Prominence-based re-ranking
790
+ if results and query_normalized_text:
791
+ results = self._apply_prominence_reranking(results, query_normalized_text, top_k)
792
+ else:
793
+ # No re-ranking, just trim to top_k
794
+ results = results[:top_k]
795
+
796
+ elapsed = time.time() - start
797
+ logger.debug(f"Hybrid search took {elapsed:.3f}s (results={len(results)})")
798
+ return results
799
+
800
+ def _calculate_prominence_boost(
801
+ self,
802
+ record: CompanyRecord,
803
+ query_normalized: str,
804
+ canon_sources: Optional[set[str]] = None,
805
+ ) -> float:
806
+ """
807
+ Calculate prominence boost for re-ranking search results.
808
+
809
+ Boosts scores based on signals that indicate a major/prominent company:
810
+ - Has ticker symbol (publicly traded)
811
+ - GLEIF source (has LEI)
812
+ - SEC source (vetted US filers)
813
+ - Wikidata source (Wikipedia-notable)
814
+ - Exact normalized name match
815
+
816
+ When canon_sources is provided (from a canonical group), boosts are
817
+ applied for ALL sources in the canon group, not just this record's source.
818
+
819
+ Args:
820
+ record: The company record to evaluate
821
+ query_normalized: Normalized query text for exact match check
822
+ canon_sources: Optional set of sources in this record's canonical group
823
+
824
+ Returns:
825
+ Boost value to add to embedding similarity (0.0 to ~0.21)
826
+ """
827
+ boost = 0.0
828
+
829
+ # Get all sources to consider (canon group or just this record)
830
+ sources_to_check = canon_sources or {record.source}
831
+
832
+ # Has ticker symbol = publicly traded major company
833
+ # Check if ANY record in canon group has ticker
834
+ if record.record.get("ticker") or (canon_sources and "sec_edgar" in canon_sources):
835
+ boost += 0.08
836
+
837
+ # Source-based boosts - accumulate for all sources in canon group
838
+ if "gleif" in sources_to_check:
839
+ boost += 0.05 # Has LEI = verified legal entity
840
+ if "sec_edgar" in sources_to_check:
841
+ boost += 0.03 # SEC filer
842
+ if "wikipedia" in sources_to_check:
843
+ boost += 0.02 # Wikipedia notable
844
+
845
+ # Exact normalized name match bonus
846
+ record_normalized = _normalize_name(record.name)
847
+ if query_normalized == record_normalized:
848
+ boost += 0.05
849
+
850
+ return boost
851
+
852
+ def _apply_prominence_reranking(
853
+ self,
854
+ results: list[tuple[CompanyRecord, float]],
855
+ query_normalized: str,
856
+ top_k: int,
857
+ similarity_weight: float = 0.3,
858
+ ) -> list[tuple[CompanyRecord, float]]:
859
+ """
860
+ Apply prominence-based re-ranking to search results with canon group awareness.
861
+
862
+ When records have been canonicalized, boosts are applied based on ALL sources
863
+ in the canonical group, not just the matched record's source.
864
+
865
+ Args:
866
+ results: List of (record, similarity) from vector search
867
+ query_normalized: Normalized query text
868
+ top_k: Number of results to return after re-ranking
869
+ similarity_weight: Weight for similarity score (0-1), lower = prominence matters more
870
+
871
+ Returns:
872
+ Re-ranked list of (record, adjusted_score) tuples
873
+ """
874
+ conn = self._conn
875
+ assert conn is not None
876
+
877
+ # Build canon_id -> sources mapping for all results that have canon_id
878
+ canon_sources_map: dict[int, set[str]] = {}
879
+ canon_ids = [
880
+ r.record.get("canon_id")
881
+ for r, _ in results
882
+ if r.record.get("canon_id") is not None
883
+ ]
884
+
885
+ if canon_ids:
886
+ # Fetch all sources for each canon_id in one query
887
+ unique_canon_ids = list(set(canon_ids))
888
+ placeholders = ",".join("?" * len(unique_canon_ids))
889
+ rows = conn.execute(f"""
890
+ SELECT canon_id, source
891
+ FROM organizations
892
+ WHERE canon_id IN ({placeholders})
893
+ """, unique_canon_ids).fetchall()
894
+
895
+ for row in rows:
896
+ canon_id = row["canon_id"]
897
+ canon_sources_map.setdefault(canon_id, set()).add(row["source"])
898
+
899
+ # Calculate boosted scores with canon group awareness
900
+ # Formula: adjusted = (similarity * weight) + boost
901
+ # With weight=0.3, a sim=0.65 SEC+ticker (boost=0.11) beats sim=0.75 no-boost
902
+ boosted_results: list[tuple[CompanyRecord, float, float, float]] = []
903
+ for record, similarity in results:
904
+ canon_id = record.record.get("canon_id")
905
+ # Get all sources in this record's canon group (if any)
906
+ canon_sources = canon_sources_map.get(canon_id) if canon_id else None
907
+
908
+ boost = self._calculate_prominence_boost(record, query_normalized, canon_sources)
909
+ adjusted_score = (similarity * similarity_weight) + boost
910
+ boosted_results.append((record, similarity, boost, adjusted_score))
911
+
912
+ # Sort by adjusted score (descending)
913
+ boosted_results.sort(key=lambda x: x[3], reverse=True)
914
+
915
+ # Log re-ranking details for top results
916
+ logger.debug(f"Prominence re-ranking for '{query_normalized}':")
917
+ for record, sim, boost, adj in boosted_results[:10]:
918
+ ticker = record.record.get("ticker", "")
919
+ ticker_str = f" ticker={ticker}" if ticker else ""
920
+ canon_id = record.record.get("canon_id")
921
+ canon_str = f" canon={canon_id}" if canon_id else ""
922
+ logger.debug(
923
+ f" {record.name}: sim={sim:.3f} + boost={boost:.3f} = {adj:.3f} "
924
+ f"[{record.source}{ticker_str}{canon_str}]"
925
+ )
926
+
927
+ # Return top_k with adjusted scores
928
+ return [(r, adj) for r, _, _, adj in boosted_results[:top_k]]
929
+
930
+ def _text_filter_candidates(
931
+ self,
932
+ query_normalized: str,
933
+ max_candidates: int,
934
+ source_filter: Optional[str] = None,
935
+ ) -> set[int]:
936
+ """
937
+ Filter candidates using SQL LIKE for fast text matching.
938
+
939
+ This is a generous pre-filter to reduce the embedding search space.
940
+ Returns set of organization IDs that contain any search term.
941
+ Uses `name_normalized` column for consistent matching.
942
+ """
943
+ conn = self._conn
944
+ assert conn is not None
945
+
946
+ # Extract search terms from the normalized query
947
+ search_terms = _extract_search_terms(query_normalized)
948
+ if not search_terms:
949
+ return set()
950
+
951
+ logger.debug(f"Text filter search terms: {search_terms}")
952
+
953
+ # Build OR clause for LIKE matching on any term
954
+ # Use name_normalized for consistent matching (already lowercased, suffixes removed)
955
+ like_clauses = []
956
+ params: list = []
957
+ for term in search_terms:
958
+ like_clauses.append("name_normalized LIKE ?")
959
+ params.append(f"%{term}%")
960
+
961
+ where_clause = " OR ".join(like_clauses)
962
+
963
+ # Add source filter if specified
964
+ if source_filter:
965
+ query = f"""
966
+ SELECT id FROM organizations
967
+ WHERE ({where_clause}) AND source = ?
968
+ LIMIT ?
969
+ """
970
+ params.append(source_filter)
971
+ else:
972
+ query = f"""
973
+ SELECT id FROM organizations
974
+ WHERE {where_clause}
975
+ LIMIT ?
976
+ """
977
+
978
+ params.append(max_candidates)
979
+
980
+ cursor = conn.execute(query, params)
981
+ return set(row["id"] for row in cursor)
982
+
983
+ def _vector_search_filtered(
984
+ self,
985
+ query_blob: bytes,
986
+ candidate_ids: set[int],
987
+ top_k: int,
988
+ source_filter: Optional[str],
989
+ ) -> list[tuple[CompanyRecord, float]]:
990
+ """Vector search within a filtered set of candidates."""
991
+ conn = self._conn
992
+ assert conn is not None
993
+
994
+ if not candidate_ids:
995
+ return []
996
+
997
+ # Build IN clause for candidate IDs
998
+ placeholders = ",".join("?" * len(candidate_ids))
999
+
1000
+ # Query sqlite-vec with KNN search, filtered by candidate IDs
1001
+ # Using distance function - lower is more similar for L2
1002
+ # We'll use cosine distance
1003
+ query = f"""
1004
+ SELECT
1005
+ e.org_id,
1006
+ vec_distance_cosine(e.embedding, ?) as distance
1007
+ FROM organization_embeddings e
1008
+ WHERE e.org_id IN ({placeholders})
1009
+ ORDER BY distance
1010
+ LIMIT ?
1011
+ """
1012
+
1013
+ cursor = conn.execute(query, [query_blob] + list(candidate_ids) + [top_k])
1014
+
1015
+ results = []
1016
+ for row in cursor:
1017
+ org_id = row["org_id"]
1018
+ distance = row["distance"]
1019
+ # Convert cosine distance to similarity (1 - distance)
1020
+ similarity = 1.0 - distance
1021
+
1022
+ # Fetch full record
1023
+ record = self._get_record_by_id(org_id)
1024
+ if record:
1025
+ # Apply source filter if specified
1026
+ if source_filter and record.source != source_filter:
1027
+ continue
1028
+ results.append((record, similarity))
1029
+
1030
+ return results
1031
+
1032
+ def _vector_search_full(
1033
+ self,
1034
+ query_blob: bytes,
1035
+ top_k: int,
1036
+ source_filter: Optional[str],
1037
+ ) -> list[tuple[CompanyRecord, float]]:
1038
+ """Full vector search without text pre-filtering."""
1039
+ conn = self._conn
1040
+ assert conn is not None
1041
+
1042
+ # KNN search with sqlite-vec
1043
+ if source_filter:
1044
+ # Need to join with organizations table for source filter
1045
+ query = """
1046
+ SELECT
1047
+ e.org_id,
1048
+ vec_distance_cosine(e.embedding, ?) as distance
1049
+ FROM organization_embeddings e
1050
+ JOIN organizations c ON e.org_id = c.id
1051
+ WHERE c.source = ?
1052
+ ORDER BY distance
1053
+ LIMIT ?
1054
+ """
1055
+ cursor = conn.execute(query, (query_blob, source_filter, top_k))
1056
+ else:
1057
+ query = """
1058
+ SELECT
1059
+ org_id,
1060
+ vec_distance_cosine(embedding, ?) as distance
1061
+ FROM organization_embeddings
1062
+ ORDER BY distance
1063
+ LIMIT ?
1064
+ """
1065
+ cursor = conn.execute(query, (query_blob, top_k))
1066
+
1067
+ results = []
1068
+ for row in cursor:
1069
+ org_id = row["org_id"]
1070
+ distance = row["distance"]
1071
+ similarity = 1.0 - distance
1072
+
1073
+ record = self._get_record_by_id(org_id)
1074
+ if record:
1075
+ results.append((record, similarity))
1076
+
1077
+ return results
1078
+
1079
+ def _get_record_by_id(self, org_id: int) -> Optional[CompanyRecord]:
1080
+ """Get an organization record by ID, including db_id and canon_id in record dict."""
1081
+ conn = self._conn
1082
+ assert conn is not None
1083
+
1084
+ cursor = conn.execute("""
1085
+ SELECT id, name, source, source_id, region, entity_type, record, canon_id
1086
+ FROM organizations WHERE id = ?
1087
+ """, (org_id,))
1088
+
1089
+ row = cursor.fetchone()
1090
+ if row:
1091
+ record_data = json.loads(row["record"])
1092
+ # Add db_id and canon_id to record dict for canon-aware search
1093
+ record_data["db_id"] = row["id"]
1094
+ record_data["canon_id"] = row["canon_id"]
1095
+ return CompanyRecord(
1096
+ name=row["name"],
1097
+ source=row["source"],
1098
+ source_id=row["source_id"],
1099
+ region=row["region"] or "",
1100
+ entity_type=EntityType(row["entity_type"]) if row["entity_type"] else EntityType.UNKNOWN,
1101
+ record=record_data,
1102
+ )
1103
+ return None
1104
+
1105
+ def get_by_source_id(self, source: str, source_id: str) -> Optional[CompanyRecord]:
1106
+ """Get an organization record by source and source_id."""
1107
+ conn = self._connect()
1108
+
1109
+ cursor = conn.execute("""
1110
+ SELECT name, source, source_id, region, entity_type, record
1111
+ FROM organizations
1112
+ WHERE source = ? AND source_id = ?
1113
+ """, (source, source_id))
1114
+
1115
+ row = cursor.fetchone()
1116
+ if row:
1117
+ return CompanyRecord(
1118
+ name=row["name"],
1119
+ source=row["source"],
1120
+ source_id=row["source_id"],
1121
+ region=row["region"] or "",
1122
+ entity_type=EntityType(row["entity_type"]) if row["entity_type"] else EntityType.UNKNOWN,
1123
+ record=json.loads(row["record"]),
1124
+ )
1125
+ return None
1126
+
1127
+ def get_id_by_source_id(self, source: str, source_id: str) -> Optional[int]:
1128
+ """Get the internal database ID for an organization by source and source_id."""
1129
+ conn = self._connect()
1130
+
1131
+ cursor = conn.execute("""
1132
+ SELECT id FROM organizations
1133
+ WHERE source = ? AND source_id = ?
1134
+ """, (source, source_id))
1135
+
1136
+ row = cursor.fetchone()
1137
+ if row:
1138
+ return row["id"]
1139
+ return None
1140
+
1141
+ def get_stats(self) -> DatabaseStats:
1142
+ """Get database statistics."""
1143
+ conn = self._connect()
1144
+
1145
+ # Total count
1146
+ cursor = conn.execute("SELECT COUNT(*) FROM organizations")
1147
+ total = cursor.fetchone()[0]
1148
+
1149
+ # Count by source
1150
+ cursor = conn.execute("SELECT source, COUNT(*) as cnt FROM organizations GROUP BY source")
1151
+ by_source = {row["source"]: row["cnt"] for row in cursor}
1152
+
1153
+ # Database file size
1154
+ db_size = self._db_path.stat().st_size if self._db_path.exists() else 0
1155
+
1156
+ return DatabaseStats(
1157
+ total_records=total,
1158
+ by_source=by_source,
1159
+ embedding_dimension=self._embedding_dim,
1160
+ database_size_bytes=db_size,
1161
+ )
1162
+
1163
+ def get_all_source_ids(self, source: Optional[str] = None) -> set[str]:
1164
+ """
1165
+ Get all source_ids from the organizations table.
1166
+
1167
+ Useful for resume operations to skip already-imported records.
1168
+
1169
+ Args:
1170
+ source: Optional source filter (e.g., "wikipedia" for Wikidata orgs)
1171
+
1172
+ Returns:
1173
+ Set of source_id strings (e.g., Q codes for Wikidata)
1174
+ """
1175
+ conn = self._connect()
1176
+
1177
+ if source:
1178
+ cursor = conn.execute(
1179
+ "SELECT DISTINCT source_id FROM organizations WHERE source = ?",
1180
+ (source,)
1181
+ )
1182
+ else:
1183
+ cursor = conn.execute("SELECT DISTINCT source_id FROM organizations")
1184
+
1185
+ return {row[0] for row in cursor}
1186
+
1187
+ def iter_records(self, source: Optional[str] = None) -> Iterator[CompanyRecord]:
1188
+ """Iterate over all records, optionally filtered by source."""
1189
+ conn = self._connect()
1190
+
1191
+ if source:
1192
+ cursor = conn.execute("""
1193
+ SELECT name, source, source_id, region, entity_type, record
1194
+ FROM organizations
1195
+ WHERE source = ?
1196
+ """, (source,))
1197
+ else:
1198
+ cursor = conn.execute("""
1199
+ SELECT name, source, source_id, region, entity_type, record
1200
+ FROM organizations
1201
+ """)
1202
+
1203
+ for row in cursor:
1204
+ yield CompanyRecord(
1205
+ name=row["name"],
1206
+ source=row["source"],
1207
+ source_id=row["source_id"],
1208
+ region=row["region"] or "",
1209
+ entity_type=EntityType(row["entity_type"]) if row["entity_type"] else EntityType.UNKNOWN,
1210
+ record=json.loads(row["record"]),
1211
+ )
1212
+
1213
+ def canonicalize(self, batch_size: int = 10000) -> dict[str, int]:
1214
+ """
1215
+ Canonicalize all organizations by linking equivalent records.
1216
+
1217
+ Records are considered equivalent if they match by:
1218
+ 1. Same LEI (GLEIF source_id or Wikidata P1278) - globally unique, no region check
1219
+ 2. Same ticker symbol - globally unique, no region check
1220
+ 3. Same CIK - globally unique, no region check
1221
+ 4. Same normalized name AND same normalized region
1222
+ 5. Name match with suffix expansion AND same region
1223
+
1224
+ Region normalization uses pycountry to handle:
1225
+ - Country codes/names (GB, United Kingdom, Great Britain -> GB)
1226
+ - US state codes/names (CA, California -> US)
1227
+ - Common aliases (UK -> GB, USA -> US)
1228
+
1229
+ For each group of equivalent records, the highest-priority source
1230
+ (gleif > sec_edgar > companies_house > wikipedia) becomes canonical.
1231
+
1232
+ Args:
1233
+ batch_size: Commit batch size for updates
1234
+
1235
+ Returns:
1236
+ Dict with stats: total_records, groups_found, records_updated
1237
+ """
1238
+ conn = self._connect()
1239
+ logger.info("Starting canonicalization...")
1240
+
1241
+ # Phase 1: Load all organization data and build indexes
1242
+ logger.info("Phase 1: Building indexes...")
1243
+
1244
+ lei_index: dict[str, list[int]] = {}
1245
+ ticker_index: dict[str, list[int]] = {}
1246
+ cik_index: dict[str, list[int]] = {}
1247
+ # Name indexes now keyed by (normalized_name, normalized_region)
1248
+ # Region-less matching only applies for identifier-based matching
1249
+ name_region_index: dict[tuple[str, str], list[int]] = {}
1250
+ expanded_name_region_index: dict[tuple[str, str], list[int]] = {}
1251
+
1252
+ sources: dict[int, str] = {} # org_id -> source
1253
+ all_org_ids: list[int] = []
1254
+
1255
+ cursor = conn.execute("""
1256
+ SELECT id, source, source_id, name, region, record
1257
+ FROM organizations
1258
+ """)
1259
+
1260
+ count = 0
1261
+ for row in cursor:
1262
+ org_id = row["id"]
1263
+ source = row["source"]
1264
+ name = row["name"]
1265
+ region = row["region"] or ""
1266
+ record = json.loads(row["record"])
1267
+
1268
+ all_org_ids.append(org_id)
1269
+ sources[org_id] = source
1270
+
1271
+ # Index by LEI (GLEIF source_id or Wikidata's P1278)
1272
+ # LEI is globally unique - no region check needed
1273
+ if source == "gleif":
1274
+ lei = row["source_id"]
1275
+ else:
1276
+ lei = record.get("lei")
1277
+ if lei:
1278
+ lei_index.setdefault(lei.upper(), []).append(org_id)
1279
+
1280
+ # Index by ticker - globally unique, no region check
1281
+ ticker = record.get("ticker")
1282
+ if ticker:
1283
+ ticker_index.setdefault(ticker.upper(), []).append(org_id)
1284
+
1285
+ # Index by CIK - globally unique, no region check
1286
+ if source == "sec_edgar":
1287
+ cik = row["source_id"]
1288
+ else:
1289
+ cik = record.get("cik")
1290
+ if cik:
1291
+ cik_index.setdefault(str(cik), []).append(org_id)
1292
+
1293
+ # Index by (normalized_name, normalized_region)
1294
+ # Same name in different regions = different legal entities
1295
+ norm_name = _normalize_for_canon(name)
1296
+ norm_region = _normalize_region(region)
1297
+ if norm_name:
1298
+ key = (norm_name, norm_region)
1299
+ name_region_index.setdefault(key, []).append(org_id)
1300
+
1301
+ # Index by (expanded_name, normalized_region)
1302
+ expanded_name = _expand_suffix(name)
1303
+ if expanded_name and expanded_name != norm_name:
1304
+ key = (expanded_name, norm_region)
1305
+ expanded_name_region_index.setdefault(key, []).append(org_id)
1306
+
1307
+ count += 1
1308
+ if count % 100000 == 0:
1309
+ logger.info(f" Indexed {count} organizations...")
1310
+
1311
+ logger.info(f" Indexed {count} organizations total")
1312
+ logger.info(f" LEI index: {len(lei_index)} unique LEIs")
1313
+ logger.info(f" Ticker index: {len(ticker_index)} unique tickers")
1314
+ logger.info(f" CIK index: {len(cik_index)} unique CIKs")
1315
+ logger.info(f" Name+region index: {len(name_region_index)} unique (name, region) pairs")
1316
+ logger.info(f" Expanded name+region index: {len(expanded_name_region_index)} unique pairs")
1317
+
1318
+ # Phase 2: Build equivalence groups using Union-Find
1319
+ logger.info("Phase 2: Building equivalence groups...")
1320
+
1321
+ uf = UnionFind(all_org_ids)
1322
+
1323
+ # Merge by LEI (globally unique identifier)
1324
+ for _lei, ids in lei_index.items():
1325
+ for i in range(1, len(ids)):
1326
+ uf.union(ids[0], ids[i])
1327
+
1328
+ # Merge by ticker (globally unique identifier)
1329
+ for _ticker, ids in ticker_index.items():
1330
+ for i in range(1, len(ids)):
1331
+ uf.union(ids[0], ids[i])
1332
+
1333
+ # Merge by CIK (globally unique identifier)
1334
+ for _cik, ids in cik_index.items():
1335
+ for i in range(1, len(ids)):
1336
+ uf.union(ids[0], ids[i])
1337
+
1338
+ # Merge by (normalized_name, normalized_region)
1339
+ for _name_region, ids in name_region_index.items():
1340
+ for i in range(1, len(ids)):
1341
+ uf.union(ids[0], ids[i])
1342
+
1343
+ # Merge by (expanded_name, normalized_region)
1344
+ # This connects "Amazon Ltd" with "Amazon Limited" in same region
1345
+ for key, expanded_ids in expanded_name_region_index.items():
1346
+ # Find org_ids with the expanded form as their normalized name in same region
1347
+ if key in name_region_index:
1348
+ # Link first expanded_id to first name_id
1349
+ uf.union(expanded_ids[0], name_region_index[key][0])
1350
+
1351
+ groups = uf.groups()
1352
+ logger.info(f" Found {len(groups)} equivalence groups")
1353
+
1354
+ # Count groups with multiple records
1355
+ multi_record_groups = sum(1 for ids in groups.values() if len(ids) > 1)
1356
+ logger.info(f" Groups with multiple records: {multi_record_groups}")
1357
+
1358
+ # Phase 3: Select canonical record for each group and update database
1359
+ logger.info("Phase 3: Updating database...")
1360
+
1361
+ updated_count = 0
1362
+ batch_updates: list[tuple[int, int, int]] = [] # (org_id, canon_id, canon_size)
1363
+
1364
+ for _root, group_ids in groups.items():
1365
+ if len(group_ids) == 1:
1366
+ # Single record - canonical to itself
1367
+ batch_updates.append((group_ids[0], group_ids[0], 1))
1368
+ else:
1369
+ # Multiple records - find highest priority source
1370
+ best_id = min(
1371
+ group_ids,
1372
+ key=lambda oid: (SOURCE_PRIORITY.get(sources[oid], 99), oid)
1373
+ )
1374
+ group_size = len(group_ids)
1375
+
1376
+ # All records in group point to the best one
1377
+ for oid in group_ids:
1378
+ # canon_size is only set on the canonical record
1379
+ size = group_size if oid == best_id else 1
1380
+ batch_updates.append((oid, best_id, size))
1381
+
1382
+ # Commit batch
1383
+ if len(batch_updates) >= batch_size:
1384
+ self._apply_canon_updates(batch_updates)
1385
+ updated_count += len(batch_updates)
1386
+ logger.info(f" Updated {updated_count} records...")
1387
+ batch_updates = []
1388
+
1389
+ # Final batch
1390
+ if batch_updates:
1391
+ self._apply_canon_updates(batch_updates)
1392
+ updated_count += len(batch_updates)
1393
+
1394
+ conn.commit()
1395
+ logger.info(f"Canonicalization complete: {updated_count} records updated, {multi_record_groups} multi-record groups")
1396
+
1397
+ return {
1398
+ "total_records": count,
1399
+ "groups_found": len(groups),
1400
+ "multi_record_groups": multi_record_groups,
1401
+ "records_updated": updated_count,
1402
+ }
1403
+
1404
+ def _apply_canon_updates(self, updates: list[tuple[int, int, int]]) -> None:
1405
+ """Apply batch of canon updates: (org_id, canon_id, canon_size)."""
1406
+ conn = self._conn
1407
+ assert conn is not None
1408
+
1409
+ for org_id, canon_id, canon_size in updates:
1410
+ conn.execute(
1411
+ "UPDATE organizations SET canon_id = ?, canon_size = ? WHERE id = ?",
1412
+ (canon_id, canon_size, org_id)
1413
+ )
1414
+
1415
+ conn.commit()
1416
+
1417
+ def get_canon_stats(self) -> dict[str, int]:
1418
+ """Get statistics about canonicalization status."""
1419
+ conn = self._connect()
1420
+
1421
+ # Total records
1422
+ cursor = conn.execute("SELECT COUNT(*) FROM organizations")
1423
+ total = cursor.fetchone()[0]
1424
+
1425
+ # Records with canon_id set
1426
+ cursor = conn.execute("SELECT COUNT(*) FROM organizations WHERE canon_id IS NOT NULL")
1427
+ canonicalized = cursor.fetchone()[0]
1428
+
1429
+ # Number of canonical groups (unique canon_ids)
1430
+ cursor = conn.execute("SELECT COUNT(DISTINCT canon_id) FROM organizations WHERE canon_id IS NOT NULL")
1431
+ groups = cursor.fetchone()[0]
1432
+
1433
+ # Multi-record groups (canon_size > 1)
1434
+ cursor = conn.execute("SELECT COUNT(*) FROM organizations WHERE canon_size > 1")
1435
+ multi_record_groups = cursor.fetchone()[0]
1436
+
1437
+ # Records in multi-record groups
1438
+ cursor = conn.execute("""
1439
+ SELECT COUNT(*) FROM organizations o1
1440
+ WHERE EXISTS (SELECT 1 FROM organizations o2 WHERE o2.id = o1.canon_id AND o2.canon_size > 1)
1441
+ """)
1442
+ records_in_multi = cursor.fetchone()[0]
1443
+
1444
+ return {
1445
+ "total_records": total,
1446
+ "canonicalized_records": canonicalized,
1447
+ "canonical_groups": groups,
1448
+ "multi_record_groups": multi_record_groups,
1449
+ "records_in_multi_groups": records_in_multi,
1450
+ }
1451
+
1452
+ def migrate_name_normalized(self, batch_size: int = 50000) -> int:
1453
+ """
1454
+ Populate the name_normalized column for all records.
1455
+
1456
+ This is a one-time migration for databases that don't have
1457
+ normalized names populated.
1458
+
1459
+ Args:
1460
+ batch_size: Number of records to process per batch
1461
+
1462
+ Returns:
1463
+ Number of records updated
1464
+ """
1465
+ conn = self._connect()
1466
+
1467
+ # Check how many need migration (empty, null, or placeholder "-")
1468
+ cursor = conn.execute(
1469
+ "SELECT COUNT(*) FROM organizations WHERE name_normalized = '' OR name_normalized IS NULL OR name_normalized = '-'"
1470
+ )
1471
+ empty_count = cursor.fetchone()[0]
1472
+
1473
+ if empty_count == 0:
1474
+ logger.info("All records already have name_normalized populated")
1475
+ return 0
1476
+
1477
+ logger.info(f"Populating name_normalized for {empty_count} records...")
1478
+
1479
+ updated = 0
1480
+ last_id = 0
1481
+
1482
+ while True:
1483
+ # Get batch of records that need normalization, ordered by ID
1484
+ cursor = conn.execute("""
1485
+ SELECT id, name FROM organizations
1486
+ WHERE id > ? AND (name_normalized = '' OR name_normalized IS NULL OR name_normalized = '-')
1487
+ ORDER BY id
1488
+ LIMIT ?
1489
+ """, (last_id, batch_size))
1490
+
1491
+ rows = cursor.fetchall()
1492
+ if not rows:
1493
+ break
1494
+
1495
+ # Update each record
1496
+ for row in rows:
1497
+ # _normalize_name now always returns non-empty for valid input
1498
+ normalized = _normalize_name(row["name"])
1499
+ conn.execute(
1500
+ "UPDATE organizations SET name_normalized = ? WHERE id = ?",
1501
+ (normalized, row["id"])
1502
+ )
1503
+ last_id = row["id"]
1504
+
1505
+ conn.commit()
1506
+ updated += len(rows)
1507
+ logger.info(f" Updated {updated}/{empty_count} records...")
1508
+
1509
+ logger.info(f"Migration complete: {updated} name_normalized values populated")
1510
+ return updated
1511
+
1512
+ def migrate_to_sqlite_vec(self, batch_size: int = 10000) -> int:
1513
+ """
1514
+ Migrate embeddings from BLOB column to sqlite-vec virtual table.
1515
+
1516
+ This is a one-time migration for databases created before sqlite-vec support.
1517
+
1518
+ Args:
1519
+ batch_size: Number of records to process per batch
1520
+
1521
+ Returns:
1522
+ Number of embeddings migrated
1523
+ """
1524
+ conn = self._connect()
1525
+
1526
+ # Check if migration is needed
1527
+ cursor = conn.execute("SELECT COUNT(*) FROM organization_embeddings")
1528
+ vec_count = cursor.fetchone()[0]
1529
+
1530
+ cursor = conn.execute("SELECT COUNT(*) FROM organizations WHERE embedding IS NOT NULL")
1531
+ blob_count = cursor.fetchone()[0]
1532
+
1533
+ if vec_count >= blob_count:
1534
+ logger.info(f"Migration not needed: sqlite-vec has {vec_count} embeddings, BLOB has {blob_count}")
1535
+ return 0
1536
+
1537
+ logger.info(f"Migrating {blob_count} embeddings from BLOB to sqlite-vec...")
1538
+
1539
+ # Get IDs that need migration (in sqlite-vec but not in organizations)
1540
+ cursor = conn.execute("""
1541
+ SELECT c.id, c.embedding
1542
+ FROM organizations c
1543
+ LEFT JOIN organization_embeddings e ON c.id = e.org_id
1544
+ WHERE c.embedding IS NOT NULL AND e.org_id IS NULL
1545
+ """)
1546
+
1547
+ migrated = 0
1548
+ batch = []
1549
+
1550
+ for row in cursor:
1551
+ org_id = row["id"]
1552
+ embedding_blob = row["embedding"]
1553
+
1554
+ if embedding_blob:
1555
+ batch.append((org_id, embedding_blob))
1556
+
1557
+ if len(batch) >= batch_size:
1558
+ self._insert_vec_batch(batch)
1559
+ migrated += len(batch)
1560
+ logger.info(f" Migrated {migrated}/{blob_count} embeddings...")
1561
+ batch = []
1562
+
1563
+ # Insert remaining batch
1564
+ if batch:
1565
+ self._insert_vec_batch(batch)
1566
+ migrated += len(batch)
1567
+
1568
+ logger.info(f"Migration complete: {migrated} embeddings migrated to sqlite-vec")
1569
+ return migrated
1570
+
1571
+ def _insert_vec_batch(self, batch: list[tuple[int, bytes]]) -> None:
1572
+ """Insert a batch of embeddings into sqlite-vec table."""
1573
+ conn = self._conn
1574
+ assert conn is not None
1575
+
1576
+ for org_id, embedding_blob in batch:
1577
+ conn.execute("DELETE FROM organization_embeddings WHERE org_id = ?", (org_id,))
1578
+ conn.execute("""
1579
+ INSERT INTO organization_embeddings (org_id, embedding)
1580
+ VALUES (?, ?)
1581
+ """, (org_id, embedding_blob))
1582
+
1583
+ conn.commit()
1584
+
1585
+ def delete_source(self, source: str) -> int:
1586
+ """Delete all records from a specific source."""
1587
+ conn = self._connect()
1588
+
1589
+ # First get IDs to delete from vec table
1590
+ cursor = conn.execute("SELECT id FROM organizations WHERE source = ?", (source,))
1591
+ ids_to_delete = [row["id"] for row in cursor]
1592
+
1593
+ # Delete from vec table
1594
+ if ids_to_delete:
1595
+ placeholders = ",".join("?" * len(ids_to_delete))
1596
+ conn.execute(f"DELETE FROM organization_embeddings WHERE org_id IN ({placeholders})", ids_to_delete)
1597
+
1598
+ # Delete from main table
1599
+ cursor = conn.execute("DELETE FROM organizations WHERE source = ?", (source,))
1600
+ deleted = cursor.rowcount
1601
+
1602
+ conn.commit()
1603
+
1604
+ logger.info(f"Deleted {deleted} records from source '{source}'")
1605
+ return deleted
1606
+
1607
+ def migrate_from_legacy_schema(self) -> dict[str, str]:
1608
+ """
1609
+ Migrate database from legacy schema (companies/company_embeddings tables)
1610
+ to new schema (organizations/organization_embeddings tables).
1611
+
1612
+ This handles:
1613
+ - Renaming 'companies' table to 'organizations'
1614
+ - Renaming 'company_embeddings' table to 'organization_embeddings'
1615
+ - Renaming 'company_id' column to 'org_id' in embeddings table
1616
+ - Updating indexes to use new naming
1617
+
1618
+ Returns:
1619
+ Dict of migrations performed (table_name -> action)
1620
+ """
1621
+ conn = self._connect()
1622
+ migrations = {}
1623
+
1624
+ # Check what tables exist
1625
+ cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
1626
+ existing_tables = {row[0] for row in cursor}
1627
+
1628
+ has_companies = "companies" in existing_tables
1629
+ has_organizations = "organizations" in existing_tables
1630
+ has_company_embeddings = "company_embeddings" in existing_tables
1631
+ has_org_embeddings = "organization_embeddings" in existing_tables
1632
+
1633
+ if not has_companies and not has_company_embeddings:
1634
+ if has_organizations and has_org_embeddings:
1635
+ logger.info("Database already uses new schema, no migration needed")
1636
+ return {}
1637
+ else:
1638
+ logger.info("No legacy tables found, database will use new schema")
1639
+ return {}
1640
+
1641
+ logger.info("Migrating database from legacy schema...")
1642
+ conn.execute("BEGIN")
1643
+
1644
+ try:
1645
+ # Migrate companies -> organizations
1646
+ if has_companies:
1647
+ if has_organizations:
1648
+ # Both exist - merge data from companies into organizations
1649
+ logger.info("Merging companies table into organizations...")
1650
+ conn.execute("""
1651
+ INSERT OR IGNORE INTO organizations
1652
+ (name, name_normalized, source, source_id, region, entity_type, record)
1653
+ SELECT name, name_normalized, source, source_id,
1654
+ COALESCE(region, ''), COALESCE(entity_type, 'unknown'), record
1655
+ FROM companies
1656
+ """)
1657
+ conn.execute("DROP TABLE companies")
1658
+ migrations["companies"] = "merged_into_organizations"
1659
+ else:
1660
+ # Just rename
1661
+ logger.info("Renaming companies table to organizations...")
1662
+ conn.execute("ALTER TABLE companies RENAME TO organizations")
1663
+ migrations["companies"] = "renamed_to_organizations"
1664
+
1665
+ # Update indexes
1666
+ for old_idx in ["idx_companies_name", "idx_companies_name_normalized",
1667
+ "idx_companies_source", "idx_companies_source_id",
1668
+ "idx_companies_region", "idx_companies_entity_type",
1669
+ "idx_companies_name_region_source"]:
1670
+ try:
1671
+ conn.execute(f"DROP INDEX IF EXISTS {old_idx}")
1672
+ except Exception:
1673
+ pass
1674
+
1675
+ # Create new indexes
1676
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_name ON organizations(name)")
1677
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_name_normalized ON organizations(name_normalized)")
1678
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_source ON organizations(source)")
1679
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_source_id ON organizations(source, source_id)")
1680
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_region ON organizations(region)")
1681
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_orgs_entity_type ON organizations(entity_type)")
1682
+ conn.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_orgs_name_region_source ON organizations(name, region, source)")
1683
+
1684
+ # Migrate company_embeddings -> organization_embeddings
1685
+ if has_company_embeddings:
1686
+ if has_org_embeddings:
1687
+ # Both exist - merge
1688
+ logger.info("Merging company_embeddings into organization_embeddings...")
1689
+ # Get column info to check for company_id vs org_id
1690
+ cursor = conn.execute("PRAGMA table_info(company_embeddings)")
1691
+ cols = {row[1] for row in cursor}
1692
+ id_col = "company_id" if "company_id" in cols else "org_id"
1693
+
1694
+ conn.execute(f"""
1695
+ INSERT OR IGNORE INTO organization_embeddings (org_id, embedding)
1696
+ SELECT {id_col}, embedding FROM company_embeddings
1697
+ """)
1698
+ conn.execute("DROP TABLE company_embeddings")
1699
+ migrations["company_embeddings"] = "merged_into_organization_embeddings"
1700
+ else:
1701
+ # Need to recreate with new column name
1702
+ logger.info("Migrating company_embeddings to organization_embeddings...")
1703
+
1704
+ # Check if it has company_id or org_id column
1705
+ cursor = conn.execute("PRAGMA table_info(company_embeddings)")
1706
+ cols = {row[1] for row in cursor}
1707
+ id_col = "company_id" if "company_id" in cols else "org_id"
1708
+
1709
+ # Create new virtual table
1710
+ conn.execute(f"""
1711
+ CREATE VIRTUAL TABLE organization_embeddings USING vec0(
1712
+ org_id INTEGER PRIMARY KEY,
1713
+ embedding float[{self._embedding_dim}]
1714
+ )
1715
+ """)
1716
+
1717
+ # Copy data
1718
+ conn.execute(f"""
1719
+ INSERT INTO organization_embeddings (org_id, embedding)
1720
+ SELECT {id_col}, embedding FROM company_embeddings
1721
+ """)
1722
+
1723
+ # Drop old table
1724
+ conn.execute("DROP TABLE company_embeddings")
1725
+ migrations["company_embeddings"] = "renamed_to_organization_embeddings"
1726
+
1727
+ conn.execute("COMMIT")
1728
+ logger.info(f"Migration complete: {migrations}")
1729
+
1730
+ except Exception as e:
1731
+ conn.execute("ROLLBACK")
1732
+ logger.error(f"Migration failed: {e}")
1733
+ raise
1734
+
1735
+ # Vacuum to clean up - outside try block since COMMIT already succeeded
1736
+ try:
1737
+ conn.execute("VACUUM")
1738
+ except Exception as e:
1739
+ logger.warning(f"VACUUM failed (migration was successful): {e}")
1740
+
1741
+ return migrations
1742
+
1743
+ def get_missing_embedding_count(self) -> int:
1744
+ """Get count of organizations without embeddings in organization_embeddings table."""
1745
+ conn = self._connect()
1746
+
1747
+ cursor = conn.execute("""
1748
+ SELECT COUNT(*) FROM organizations c
1749
+ LEFT JOIN organization_embeddings e ON c.id = e.org_id
1750
+ WHERE e.org_id IS NULL
1751
+ """)
1752
+ return cursor.fetchone()[0]
1753
+
1754
+ def get_organizations_without_embeddings(
1755
+ self,
1756
+ batch_size: int = 1000,
1757
+ source: Optional[str] = None,
1758
+ ) -> Iterator[tuple[int, str]]:
1759
+ """
1760
+ Iterate over organizations that don't have embeddings.
1761
+
1762
+ Args:
1763
+ batch_size: Number of records per batch
1764
+ source: Optional source filter
1765
+
1766
+ Yields:
1767
+ Tuples of (org_id, name)
1768
+ """
1769
+ conn = self._connect()
1770
+
1771
+ last_id = 0
1772
+ while True:
1773
+ if source:
1774
+ cursor = conn.execute("""
1775
+ SELECT c.id, c.name FROM organizations c
1776
+ LEFT JOIN organization_embeddings e ON c.id = e.org_id
1777
+ WHERE e.org_id IS NULL AND c.id > ? AND c.source = ?
1778
+ ORDER BY c.id
1779
+ LIMIT ?
1780
+ """, (last_id, source, batch_size))
1781
+ else:
1782
+ cursor = conn.execute("""
1783
+ SELECT c.id, c.name FROM organizations c
1784
+ LEFT JOIN organization_embeddings e ON c.id = e.org_id
1785
+ WHERE e.org_id IS NULL AND c.id > ?
1786
+ ORDER BY c.id
1787
+ LIMIT ?
1788
+ """, (last_id, batch_size))
1789
+
1790
+ rows = cursor.fetchall()
1791
+ if not rows:
1792
+ break
1793
+
1794
+ for row in rows:
1795
+ yield (row[0], row[1])
1796
+ last_id = row[0]
1797
+
1798
+ def insert_embeddings_batch(
1799
+ self,
1800
+ org_ids: list[int],
1801
+ embeddings: np.ndarray,
1802
+ ) -> int:
1803
+ """
1804
+ Insert embeddings for existing organizations.
1805
+
1806
+ Args:
1807
+ org_ids: List of organization IDs
1808
+ embeddings: Matrix of embeddings (N x dim)
1809
+
1810
+ Returns:
1811
+ Number of embeddings inserted
1812
+ """
1813
+ conn = self._connect()
1814
+ count = 0
1815
+
1816
+ for org_id, embedding in zip(org_ids, embeddings):
1817
+ embedding_blob = embedding.astype(np.float32).tobytes()
1818
+ conn.execute("DELETE FROM organization_embeddings WHERE org_id = ?", (org_id,))
1819
+ conn.execute("""
1820
+ INSERT INTO organization_embeddings (org_id, embedding)
1821
+ VALUES (?, ?)
1822
+ """, (org_id, embedding_blob))
1823
+ count += 1
1824
+
1825
+ conn.commit()
1826
+ return count
1827
+
1828
+ def resolve_qid_labels(
1829
+ self,
1830
+ label_map: dict[str, str],
1831
+ batch_size: int = 1000,
1832
+ ) -> int:
1833
+ """
1834
+ Update organization records that have QIDs instead of labels in region field.
1835
+
1836
+ Args:
1837
+ label_map: Mapping of QID -> label for resolution
1838
+ batch_size: Commit batch size
1839
+
1840
+ Returns:
1841
+ Number of records updated
1842
+ """
1843
+ conn = self._connect()
1844
+
1845
+ # Find records with QIDs in region field (starts with 'Q' followed by digits)
1846
+ region_updates = 0
1847
+ cursor = conn.execute("""
1848
+ SELECT id, region FROM organizations
1849
+ WHERE region LIKE 'Q%' AND region GLOB 'Q[0-9]*'
1850
+ """)
1851
+ rows = cursor.fetchall()
1852
+
1853
+ for row in rows:
1854
+ org_id = row["id"]
1855
+ qid = row["region"]
1856
+ if qid in label_map:
1857
+ conn.execute(
1858
+ "UPDATE organizations SET region = ? WHERE id = ?",
1859
+ (label_map[qid], org_id)
1860
+ )
1861
+ region_updates += 1
1862
+
1863
+ if region_updates % batch_size == 0:
1864
+ conn.commit()
1865
+ logger.info(f"Updated {region_updates} organization region labels...")
1866
+
1867
+ conn.commit()
1868
+ logger.info(f"Resolved QID labels: {region_updates} organization regions")
1869
+ return region_updates
1870
+
1871
+ def get_unresolved_qids(self) -> set[str]:
1872
+ """
1873
+ Get all QIDs that still need resolution in the organizations table.
1874
+
1875
+ Returns:
1876
+ Set of QIDs (starting with 'Q') found in region field
1877
+ """
1878
+ conn = self._connect()
1879
+ qids: set[str] = set()
1880
+
1881
+ cursor = conn.execute("""
1882
+ SELECT DISTINCT region FROM organizations
1883
+ WHERE region LIKE 'Q%' AND region GLOB 'Q[0-9]*'
1884
+ """)
1885
+ for row in cursor:
1886
+ qids.add(row["region"])
1887
+
1888
+ return qids
1889
+
1890
+
1891
+ def get_person_database(db_path: Optional[str | Path] = None, embedding_dim: int = 768) -> "PersonDatabase":
1892
+ """
1893
+ Get a singleton PersonDatabase instance for the given path.
1894
+
1895
+ Args:
1896
+ db_path: Path to database file
1897
+ embedding_dim: Dimension of embeddings
1898
+
1899
+ Returns:
1900
+ Shared PersonDatabase instance
1901
+ """
1902
+ path_key = str(db_path or DEFAULT_DB_PATH)
1903
+ if path_key not in _person_database_instances:
1904
+ logger.debug(f"Creating new PersonDatabase instance for {path_key}")
1905
+ _person_database_instances[path_key] = PersonDatabase(db_path=db_path, embedding_dim=embedding_dim)
1906
+ return _person_database_instances[path_key]
1907
+
1908
+
1909
+ class PersonDatabase:
1910
+ """
1911
+ SQLite database with sqlite-vec for person vector search.
1912
+
1913
+ Uses hybrid text + vector search:
1914
+ 1. Text filtering with LIKE to reduce candidates
1915
+ 2. sqlite-vec for semantic similarity ranking
1916
+
1917
+ Stores people from sources like Wikidata with role/org context.
1918
+ """
1919
+
1920
+ def __init__(
1921
+ self,
1922
+ db_path: Optional[str | Path] = None,
1923
+ embedding_dim: int = 768, # Default for embeddinggemma-300m
1924
+ ):
1925
+ """
1926
+ Initialize the person database.
1927
+
1928
+ Args:
1929
+ db_path: Path to database file (creates if not exists)
1930
+ embedding_dim: Dimension of embeddings to store
1931
+ """
1932
+ self._db_path = Path(db_path) if db_path else DEFAULT_DB_PATH
1933
+ self._embedding_dim = embedding_dim
1934
+ self._conn: Optional[sqlite3.Connection] = None
1935
+
1936
+ def _ensure_dir(self) -> None:
1937
+ """Ensure database directory exists."""
1938
+ self._db_path.parent.mkdir(parents=True, exist_ok=True)
1939
+
1940
+ def _connect(self) -> sqlite3.Connection:
1941
+ """Get or create database connection using shared connection pool."""
1942
+ if self._conn is not None:
1943
+ return self._conn
1944
+
1945
+ self._conn = _get_shared_connection(self._db_path, self._embedding_dim)
1946
+
1947
+ # Create tables (idempotent)
1948
+ self._create_tables()
1949
+
1950
+ return self._conn
1951
+
1952
+ def _create_tables(self) -> None:
1953
+ """Create database tables including sqlite-vec virtual table."""
1954
+ conn = self._conn
1955
+ assert conn is not None
1956
+
1957
+ # Check if we need to migrate from old schema (unique on source+source_id only)
1958
+ self._migrate_people_schema_if_needed(conn)
1959
+
1960
+ # Main people records table
1961
+ # Unique constraint on source+source_id+role+org allows multiple records
1962
+ # for the same person with different role/org combinations
1963
+ conn.execute("""
1964
+ CREATE TABLE IF NOT EXISTS people (
1965
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1966
+ name TEXT NOT NULL,
1967
+ name_normalized TEXT NOT NULL,
1968
+ source TEXT NOT NULL DEFAULT 'wikidata',
1969
+ source_id TEXT NOT NULL,
1970
+ country TEXT NOT NULL DEFAULT '',
1971
+ person_type TEXT NOT NULL DEFAULT 'unknown',
1972
+ known_for_role TEXT NOT NULL DEFAULT '',
1973
+ known_for_org TEXT NOT NULL DEFAULT '',
1974
+ known_for_org_id INTEGER DEFAULT NULL,
1975
+ from_date TEXT NOT NULL DEFAULT '',
1976
+ to_date TEXT NOT NULL DEFAULT '',
1977
+ birth_date TEXT NOT NULL DEFAULT '',
1978
+ death_date TEXT NOT NULL DEFAULT '',
1979
+ record TEXT NOT NULL,
1980
+ UNIQUE(source, source_id, known_for_role, known_for_org),
1981
+ FOREIGN KEY (known_for_org_id) REFERENCES organizations(id)
1982
+ )
1983
+ """)
1984
+
1985
+ # Create indexes on main table
1986
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_name ON people(name)")
1987
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_name_normalized ON people(name_normalized)")
1988
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_source ON people(source)")
1989
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_source_id ON people(source, source_id, known_for_role, known_for_org)")
1990
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_known_for_org ON people(known_for_org)")
1991
+
1992
+ # Add from_date column if it doesn't exist (migration for existing DBs)
1993
+ try:
1994
+ conn.execute("ALTER TABLE people ADD COLUMN from_date TEXT NOT NULL DEFAULT ''")
1995
+ logger.info("Added from_date column to people table")
1996
+ except sqlite3.OperationalError:
1997
+ pass # Column already exists
1998
+
1999
+ # Add to_date column if it doesn't exist (migration for existing DBs)
2000
+ try:
2001
+ conn.execute("ALTER TABLE people ADD COLUMN to_date TEXT NOT NULL DEFAULT ''")
2002
+ logger.info("Added to_date column to people table")
2003
+ except sqlite3.OperationalError:
2004
+ pass # Column already exists
2005
+
2006
+ # Add known_for_org_id column if it doesn't exist (migration for existing DBs)
2007
+ # This is a foreign key to the organizations table (nullable)
2008
+ try:
2009
+ conn.execute("ALTER TABLE people ADD COLUMN known_for_org_id INTEGER DEFAULT NULL")
2010
+ logger.info("Added known_for_org_id column to people table")
2011
+ except sqlite3.OperationalError:
2012
+ pass # Column already exists
2013
+
2014
+ # Create index on known_for_org_id for joins (only if column exists)
2015
+ try:
2016
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_known_for_org_id ON people(known_for_org_id)")
2017
+ except sqlite3.OperationalError:
2018
+ pass # Column doesn't exist yet (will be added on next connection)
2019
+
2020
+ # Add birth_date column if it doesn't exist (migration for existing DBs)
2021
+ try:
2022
+ conn.execute("ALTER TABLE people ADD COLUMN birth_date TEXT NOT NULL DEFAULT ''")
2023
+ logger.info("Added birth_date column to people table")
2024
+ except sqlite3.OperationalError:
2025
+ pass # Column already exists
2026
+
2027
+ # Add death_date column if it doesn't exist (migration for existing DBs)
2028
+ try:
2029
+ conn.execute("ALTER TABLE people ADD COLUMN death_date TEXT NOT NULL DEFAULT ''")
2030
+ logger.info("Added death_date column to people table")
2031
+ except sqlite3.OperationalError:
2032
+ pass # Column already exists
2033
+
2034
+ # Add canon_id column if it doesn't exist (migration for canonicalization)
2035
+ try:
2036
+ conn.execute("ALTER TABLE people ADD COLUMN canon_id INTEGER DEFAULT NULL")
2037
+ logger.info("Added canon_id column to people table")
2038
+ except sqlite3.OperationalError:
2039
+ pass # Column already exists
2040
+
2041
+ # Add canon_size column if it doesn't exist (migration for canonicalization)
2042
+ try:
2043
+ conn.execute("ALTER TABLE people ADD COLUMN canon_size INTEGER DEFAULT 1")
2044
+ logger.info("Added canon_size column to people table")
2045
+ except sqlite3.OperationalError:
2046
+ pass # Column already exists
2047
+
2048
+ # Create index on canon_id for joins
2049
+ try:
2050
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_people_canon_id ON people(canon_id)")
2051
+ except sqlite3.OperationalError:
2052
+ pass # Column doesn't exist yet
2053
+
2054
+ # Create sqlite-vec virtual table for embeddings
2055
+ conn.execute(f"""
2056
+ CREATE VIRTUAL TABLE IF NOT EXISTS person_embeddings USING vec0(
2057
+ person_id INTEGER PRIMARY KEY,
2058
+ embedding float[{self._embedding_dim}]
2059
+ )
2060
+ """)
2061
+
2062
+ # Create QID labels lookup table for Wikidata QID -> label mappings
2063
+ conn.execute("""
2064
+ CREATE TABLE IF NOT EXISTS qid_labels (
2065
+ qid TEXT PRIMARY KEY,
2066
+ label TEXT NOT NULL
2067
+ )
2068
+ """)
2069
+
2070
+ conn.commit()
2071
+
2072
+ def _migrate_people_schema_if_needed(self, conn: sqlite3.Connection) -> None:
2073
+ """Migrate people table from old schema if needed."""
2074
+ # Check if people table exists
2075
+ cursor = conn.execute(
2076
+ "SELECT name FROM sqlite_master WHERE type='table' AND name='people'"
2077
+ )
2078
+ if not cursor.fetchone():
2079
+ return # Table doesn't exist, no migration needed
2080
+
2081
+ # Check the unique constraint - look at index info
2082
+ # Old schema: UNIQUE(source, source_id)
2083
+ # New schema: UNIQUE(source, source_id, known_for_role, known_for_org)
2084
+ cursor = conn.execute("PRAGMA index_list(people)")
2085
+ indexes = cursor.fetchall()
2086
+
2087
+ needs_migration = False
2088
+ for idx in indexes:
2089
+ idx_name = idx[1]
2090
+ if "sqlite_autoindex_people" in idx_name:
2091
+ # Check columns in this unique index
2092
+ cursor = conn.execute(f"PRAGMA index_info('{idx_name}')")
2093
+ cols = [row[2] for row in cursor.fetchall()]
2094
+ # Old schema has only 2 columns in unique constraint
2095
+ if cols == ["source", "source_id"]:
2096
+ needs_migration = True
2097
+ logger.info("Detected old people schema, migrating to new unique constraint...")
2098
+ break
2099
+
2100
+ if not needs_migration:
2101
+ return
2102
+
2103
+ # Migrate: create new table, copy data, drop old, rename new
2104
+ logger.info("Migrating people table to new schema with (source, source_id, role, org) unique constraint...")
2105
+
2106
+ conn.execute("""
2107
+ CREATE TABLE people_new (
2108
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
2109
+ name TEXT NOT NULL,
2110
+ name_normalized TEXT NOT NULL,
2111
+ source TEXT NOT NULL DEFAULT 'wikidata',
2112
+ source_id TEXT NOT NULL,
2113
+ country TEXT NOT NULL DEFAULT '',
2114
+ person_type TEXT NOT NULL DEFAULT 'unknown',
2115
+ known_for_role TEXT NOT NULL DEFAULT '',
2116
+ known_for_org TEXT NOT NULL DEFAULT '',
2117
+ known_for_org_id INTEGER DEFAULT NULL,
2118
+ from_date TEXT NOT NULL DEFAULT '',
2119
+ to_date TEXT NOT NULL DEFAULT '',
2120
+ record TEXT NOT NULL,
2121
+ UNIQUE(source, source_id, known_for_role, known_for_org),
2122
+ FOREIGN KEY (known_for_org_id) REFERENCES organizations(id)
2123
+ )
2124
+ """)
2125
+
2126
+ # Copy data (old IDs will change, but embeddings table references them)
2127
+ # Note: old table may not have from_date/to_date columns, so use defaults
2128
+ conn.execute("""
2129
+ INSERT INTO people_new (name, name_normalized, source, source_id, country,
2130
+ person_type, known_for_role, known_for_org, record)
2131
+ SELECT name, name_normalized, source, source_id, country,
2132
+ person_type, known_for_role, known_for_org, record
2133
+ FROM people
2134
+ """)
2135
+
2136
+ # Drop old table and embeddings (IDs changed, embeddings are invalid)
2137
+ conn.execute("DROP TABLE IF EXISTS person_embeddings")
2138
+ conn.execute("DROP TABLE people")
2139
+ conn.execute("ALTER TABLE people_new RENAME TO people")
2140
+
2141
+ # Drop old index if it exists
2142
+ conn.execute("DROP INDEX IF EXISTS idx_people_source_id")
2143
+
2144
+ conn.commit()
2145
+ logger.info("Migration complete. Note: person embeddings were cleared and need to be regenerated.")
2146
+
2147
+ def close(self) -> None:
2148
+ """Clear connection reference (shared connection remains open)."""
2149
+ self._conn = None
2150
+
2151
+ def insert(self, record: PersonRecord, embedding: np.ndarray) -> int:
2152
+ """
2153
+ Insert a person record with its embedding.
2154
+
2155
+ Args:
2156
+ record: Person record to insert
2157
+ embedding: Embedding vector for the person name
2158
+
2159
+ Returns:
2160
+ Row ID of inserted record
2161
+ """
2162
+ conn = self._connect()
2163
+
2164
+ # Serialize record
2165
+ record_json = json.dumps(record.record)
2166
+ name_normalized = _normalize_person_name(record.name)
2167
+
2168
+ cursor = conn.execute("""
2169
+ INSERT OR REPLACE INTO people
2170
+ (name, name_normalized, source, source_id, country, person_type,
2171
+ known_for_role, known_for_org, known_for_org_id, from_date, to_date,
2172
+ birth_date, death_date, record)
2173
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
2174
+ """, (
2175
+ record.name,
2176
+ name_normalized,
2177
+ record.source,
2178
+ record.source_id,
2179
+ record.country,
2180
+ record.person_type.value,
2181
+ record.known_for_role,
2182
+ record.known_for_org,
2183
+ record.known_for_org_id, # Can be None
2184
+ record.from_date or "",
2185
+ record.to_date or "",
2186
+ record.birth_date or "",
2187
+ record.death_date or "",
2188
+ record_json,
2189
+ ))
2190
+
2191
+ row_id = cursor.lastrowid
2192
+ assert row_id is not None
2193
+
2194
+ # Insert embedding into vec table (delete first since sqlite-vec doesn't support REPLACE)
2195
+ embedding_blob = embedding.astype(np.float32).tobytes()
2196
+ conn.execute("DELETE FROM person_embeddings WHERE person_id = ?", (row_id,))
2197
+ conn.execute("""
2198
+ INSERT INTO person_embeddings (person_id, embedding)
2199
+ VALUES (?, ?)
2200
+ """, (row_id, embedding_blob))
2201
+
2202
+ conn.commit()
2203
+ return row_id
2204
+
2205
+ def insert_batch(
2206
+ self,
2207
+ records: list[PersonRecord],
2208
+ embeddings: np.ndarray,
2209
+ batch_size: int = 1000,
2210
+ ) -> int:
2211
+ """
2212
+ Insert multiple person records with embeddings.
2213
+
2214
+ Args:
2215
+ records: List of person records
2216
+ embeddings: Matrix of embeddings (N x dim)
2217
+ batch_size: Commit batch size
2218
+
2219
+ Returns:
2220
+ Number of records inserted
2221
+ """
2222
+ conn = self._connect()
2223
+ count = 0
2224
+
2225
+ for record, embedding in zip(records, embeddings):
2226
+ record_json = json.dumps(record.record)
2227
+ name_normalized = _normalize_person_name(record.name)
2228
+
2229
+ cursor = conn.execute("""
2230
+ INSERT OR REPLACE INTO people
2231
+ (name, name_normalized, source, source_id, country, person_type,
2232
+ known_for_role, known_for_org, known_for_org_id, from_date, to_date,
2233
+ birth_date, death_date, record)
2234
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
2235
+ """, (
2236
+ record.name,
2237
+ name_normalized,
2238
+ record.source,
2239
+ record.source_id,
2240
+ record.country,
2241
+ record.person_type.value,
2242
+ record.known_for_role,
2243
+ record.known_for_org,
2244
+ record.known_for_org_id, # Can be None
2245
+ record.from_date or "",
2246
+ record.to_date or "",
2247
+ record.birth_date or "",
2248
+ record.death_date or "",
2249
+ record_json,
2250
+ ))
2251
+
2252
+ row_id = cursor.lastrowid
2253
+ assert row_id is not None
2254
+
2255
+ # Insert embedding (delete first since sqlite-vec doesn't support REPLACE)
2256
+ embedding_blob = embedding.astype(np.float32).tobytes()
2257
+ conn.execute("DELETE FROM person_embeddings WHERE person_id = ?", (row_id,))
2258
+ conn.execute("""
2259
+ INSERT INTO person_embeddings (person_id, embedding)
2260
+ VALUES (?, ?)
2261
+ """, (row_id, embedding_blob))
2262
+
2263
+ count += 1
2264
+
2265
+ if count % batch_size == 0:
2266
+ conn.commit()
2267
+ logger.info(f"Inserted {count} person records...")
2268
+
2269
+ conn.commit()
2270
+ return count
2271
+
2272
+ def update_dates(self, source: str, source_id: str, from_date: Optional[str], to_date: Optional[str]) -> bool:
2273
+ """
2274
+ Update the from_date and to_date for a person record.
2275
+
2276
+ Args:
2277
+ source: Data source (e.g., 'wikidata')
2278
+ source_id: Source identifier (e.g., QID)
2279
+ from_date: Start date in ISO format or None
2280
+ to_date: End date in ISO format or None
2281
+
2282
+ Returns:
2283
+ True if record was updated, False if not found
2284
+ """
2285
+ conn = self._connect()
2286
+
2287
+ cursor = conn.execute("""
2288
+ UPDATE people SET from_date = ?, to_date = ?
2289
+ WHERE source = ? AND source_id = ?
2290
+ """, (from_date or "", to_date or "", source, source_id))
2291
+
2292
+ conn.commit()
2293
+ return cursor.rowcount > 0
2294
+
2295
+ def update_role_org(
2296
+ self,
2297
+ source: str,
2298
+ source_id: str,
2299
+ known_for_role: str,
2300
+ known_for_org: str,
2301
+ known_for_org_id: Optional[int],
2302
+ new_embedding: np.ndarray,
2303
+ from_date: Optional[str] = None,
2304
+ to_date: Optional[str] = None,
2305
+ ) -> bool:
2306
+ """
2307
+ Update the role/org/dates data for a person record and re-embed.
2308
+
2309
+ Args:
2310
+ source: Data source (e.g., 'wikidata')
2311
+ source_id: Source identifier (e.g., QID)
2312
+ known_for_role: Role/position title
2313
+ known_for_org: Organization name
2314
+ known_for_org_id: Organization internal ID (FK) or None
2315
+ new_embedding: New embedding vector based on updated data
2316
+ from_date: Start date in ISO format or None
2317
+ to_date: End date in ISO format or None
2318
+
2319
+ Returns:
2320
+ True if record was updated, False if not found
2321
+ """
2322
+ conn = self._connect()
2323
+
2324
+ # First get the person's internal ID
2325
+ row = conn.execute(
2326
+ "SELECT id FROM people WHERE source = ? AND source_id = ?",
2327
+ (source, source_id)
2328
+ ).fetchone()
2329
+
2330
+ if not row:
2331
+ return False
2332
+
2333
+ person_id = row[0]
2334
+
2335
+ # Update the person record (including dates)
2336
+ conn.execute("""
2337
+ UPDATE people SET
2338
+ known_for_role = ?, known_for_org = ?, known_for_org_id = ?,
2339
+ from_date = COALESCE(?, from_date, ''),
2340
+ to_date = COALESCE(?, to_date, '')
2341
+ WHERE id = ?
2342
+ """, (known_for_role, known_for_org, known_for_org_id, from_date, to_date, person_id))
2343
+
2344
+ # Update the embedding
2345
+ embedding_bytes = new_embedding.astype(np.float32).tobytes()
2346
+ conn.execute("""
2347
+ UPDATE people_vec SET embedding = ?
2348
+ WHERE rowid = ?
2349
+ """, (embedding_bytes, person_id))
2350
+
2351
+ conn.commit()
2352
+ return True
2353
+
2354
+ def search(
2355
+ self,
2356
+ query_embedding: np.ndarray,
2357
+ top_k: int = 20,
2358
+ query_text: Optional[str] = None,
2359
+ max_text_candidates: int = 5000,
2360
+ ) -> list[tuple[PersonRecord, float]]:
2361
+ """
2362
+ Search for similar people using hybrid text + vector search.
2363
+
2364
+ Two-stage approach:
2365
+ 1. If query_text provided, use SQL LIKE to find candidates containing search terms
2366
+ 2. Use sqlite-vec for vector similarity ranking on filtered candidates
2367
+
2368
+ Args:
2369
+ query_embedding: Query embedding vector
2370
+ top_k: Number of results to return
2371
+ query_text: Optional query text for text-based pre-filtering
2372
+ max_text_candidates: Max candidates to keep after text filtering
2373
+
2374
+ Returns:
2375
+ List of (PersonRecord, similarity_score) tuples
2376
+ """
2377
+ start = time.time()
2378
+ self._connect()
2379
+
2380
+ # Normalize query embedding
2381
+ query_norm = np.linalg.norm(query_embedding)
2382
+ if query_norm == 0:
2383
+ return []
2384
+ query_normalized = query_embedding / query_norm
2385
+ query_blob = query_normalized.astype(np.float32).tobytes()
2386
+
2387
+ # Stage 1: Text-based pre-filtering (if query_text provided)
2388
+ candidate_ids: Optional[set[int]] = None
2389
+ if query_text:
2390
+ query_normalized_text = _normalize_person_name(query_text)
2391
+ if query_normalized_text:
2392
+ candidate_ids = self._text_filter_candidates(
2393
+ query_normalized_text,
2394
+ max_candidates=max_text_candidates,
2395
+ )
2396
+ logger.info(f"Text filter: {len(candidate_ids)} candidates for '{query_text}'")
2397
+
2398
+ # Stage 2: Vector search
2399
+ if candidate_ids is not None and len(candidate_ids) == 0:
2400
+ # No text matches, return empty
2401
+ return []
2402
+
2403
+ if candidate_ids is not None:
2404
+ # Search within text-filtered candidates
2405
+ results = self._vector_search_filtered(
2406
+ query_blob, candidate_ids, top_k
2407
+ )
2408
+ else:
2409
+ # Full vector search
2410
+ results = self._vector_search_full(query_blob, top_k)
2411
+
2412
+ elapsed = time.time() - start
2413
+ logger.debug(f"Person search took {elapsed:.3f}s (results={len(results)})")
2414
+ return results
2415
+
2416
+ def _text_filter_candidates(
2417
+ self,
2418
+ query_normalized: str,
2419
+ max_candidates: int,
2420
+ ) -> set[int]:
2421
+ """
2422
+ Filter candidates using SQL LIKE for fast text matching.
2423
+
2424
+ Uses `name_normalized` column for consistent matching.
2425
+ """
2426
+ conn = self._conn
2427
+ assert conn is not None
2428
+
2429
+ # Extract search terms from the normalized query
2430
+ search_terms = _extract_search_terms(query_normalized)
2431
+ if not search_terms:
2432
+ return set()
2433
+
2434
+ logger.debug(f"Person text filter search terms: {search_terms}")
2435
+
2436
+ # Build OR clause for LIKE matching on any term
2437
+ like_clauses = []
2438
+ params: list = []
2439
+ for term in search_terms:
2440
+ like_clauses.append("name_normalized LIKE ?")
2441
+ params.append(f"%{term}%")
2442
+
2443
+ where_clause = " OR ".join(like_clauses)
2444
+
2445
+ query = f"""
2446
+ SELECT id FROM people
2447
+ WHERE {where_clause}
2448
+ LIMIT ?
2449
+ """
2450
+
2451
+ params.append(max_candidates)
2452
+
2453
+ cursor = conn.execute(query, params)
2454
+ return set(row["id"] for row in cursor)
2455
+
2456
+ def _vector_search_filtered(
2457
+ self,
2458
+ query_blob: bytes,
2459
+ candidate_ids: set[int],
2460
+ top_k: int,
2461
+ ) -> list[tuple[PersonRecord, float]]:
2462
+ """Vector search within a filtered set of candidates."""
2463
+ conn = self._conn
2464
+ assert conn is not None
2465
+
2466
+ if not candidate_ids:
2467
+ return []
2468
+
2469
+ # Build IN clause for candidate IDs
2470
+ placeholders = ",".join("?" * len(candidate_ids))
2471
+
2472
+ query = f"""
2473
+ SELECT
2474
+ e.person_id,
2475
+ vec_distance_cosine(e.embedding, ?) as distance
2476
+ FROM person_embeddings e
2477
+ WHERE e.person_id IN ({placeholders})
2478
+ ORDER BY distance
2479
+ LIMIT ?
2480
+ """
2481
+
2482
+ cursor = conn.execute(query, [query_blob] + list(candidate_ids) + [top_k])
2483
+
2484
+ results = []
2485
+ for row in cursor:
2486
+ person_id = row["person_id"]
2487
+ distance = row["distance"]
2488
+ # Convert cosine distance to similarity (1 - distance)
2489
+ similarity = 1.0 - distance
2490
+
2491
+ # Fetch full record
2492
+ record = self._get_record_by_id(person_id)
2493
+ if record:
2494
+ results.append((record, similarity))
2495
+
2496
+ return results
2497
+
2498
+ def _vector_search_full(
2499
+ self,
2500
+ query_blob: bytes,
2501
+ top_k: int,
2502
+ ) -> list[tuple[PersonRecord, float]]:
2503
+ """Full vector search without text pre-filtering."""
2504
+ conn = self._conn
2505
+ assert conn is not None
2506
+
2507
+ query = """
2508
+ SELECT
2509
+ person_id,
2510
+ vec_distance_cosine(embedding, ?) as distance
2511
+ FROM person_embeddings
2512
+ ORDER BY distance
2513
+ LIMIT ?
2514
+ """
2515
+ cursor = conn.execute(query, (query_blob, top_k))
2516
+
2517
+ results = []
2518
+ for row in cursor:
2519
+ person_id = row["person_id"]
2520
+ distance = row["distance"]
2521
+ similarity = 1.0 - distance
2522
+
2523
+ record = self._get_record_by_id(person_id)
2524
+ if record:
2525
+ results.append((record, similarity))
2526
+
2527
+ return results
2528
+
2529
+ def _get_record_by_id(self, person_id: int) -> Optional[PersonRecord]:
2530
+ """Get a person record by ID."""
2531
+ conn = self._conn
2532
+ assert conn is not None
2533
+
2534
+ cursor = conn.execute("""
2535
+ SELECT name, source, source_id, country, person_type, known_for_role, known_for_org, known_for_org_id, birth_date, death_date, record
2536
+ FROM people WHERE id = ?
2537
+ """, (person_id,))
2538
+
2539
+ row = cursor.fetchone()
2540
+ if row:
2541
+ return PersonRecord(
2542
+ name=row["name"],
2543
+ source=row["source"],
2544
+ source_id=row["source_id"],
2545
+ country=row["country"] or "",
2546
+ person_type=PersonType(row["person_type"]) if row["person_type"] else PersonType.UNKNOWN,
2547
+ known_for_role=row["known_for_role"] or "",
2548
+ known_for_org=row["known_for_org"] or "",
2549
+ known_for_org_id=row["known_for_org_id"], # Can be None
2550
+ birth_date=row["birth_date"] or "",
2551
+ death_date=row["death_date"] or "",
2552
+ record=json.loads(row["record"]),
2553
+ )
2554
+ return None
2555
+
2556
+ def get_by_source_id(self, source: str, source_id: str) -> Optional[PersonRecord]:
2557
+ """Get a person record by source and source_id."""
2558
+ conn = self._connect()
2559
+
2560
+ cursor = conn.execute("""
2561
+ SELECT name, source, source_id, country, person_type, known_for_role, known_for_org, known_for_org_id, birth_date, death_date, record
2562
+ FROM people
2563
+ WHERE source = ? AND source_id = ?
2564
+ """, (source, source_id))
2565
+
2566
+ row = cursor.fetchone()
2567
+ if row:
2568
+ return PersonRecord(
2569
+ name=row["name"],
2570
+ source=row["source"],
2571
+ source_id=row["source_id"],
2572
+ country=row["country"] or "",
2573
+ person_type=PersonType(row["person_type"]) if row["person_type"] else PersonType.UNKNOWN,
2574
+ known_for_role=row["known_for_role"] or "",
2575
+ known_for_org=row["known_for_org"] or "",
2576
+ known_for_org_id=row["known_for_org_id"], # Can be None
2577
+ birth_date=row["birth_date"] or "",
2578
+ death_date=row["death_date"] or "",
2579
+ record=json.loads(row["record"]),
2580
+ )
2581
+ return None
2582
+
2583
+ def get_stats(self) -> dict:
2584
+ """Get database statistics for people table."""
2585
+ conn = self._connect()
2586
+
2587
+ # Total count
2588
+ cursor = conn.execute("SELECT COUNT(*) FROM people")
2589
+ total = cursor.fetchone()[0]
2590
+
2591
+ # Count by person_type
2592
+ cursor = conn.execute("SELECT person_type, COUNT(*) as cnt FROM people GROUP BY person_type")
2593
+ by_type = {row["person_type"]: row["cnt"] for row in cursor}
2594
+
2595
+ # Count by source
2596
+ cursor = conn.execute("SELECT source, COUNT(*) as cnt FROM people GROUP BY source")
2597
+ by_source = {row["source"]: row["cnt"] for row in cursor}
2598
+
2599
+ return {
2600
+ "total_records": total,
2601
+ "by_type": by_type,
2602
+ "by_source": by_source,
2603
+ }
2604
+
2605
+ def get_all_source_ids(self, source: Optional[str] = None) -> set[str]:
2606
+ """
2607
+ Get all source_ids from the people table.
2608
+
2609
+ Useful for resume operations to skip already-imported records.
2610
+
2611
+ Args:
2612
+ source: Optional source filter (e.g., "wikidata")
2613
+
2614
+ Returns:
2615
+ Set of source_id strings (e.g., Q codes for Wikidata)
2616
+ """
2617
+ conn = self._connect()
2618
+
2619
+ if source:
2620
+ cursor = conn.execute(
2621
+ "SELECT DISTINCT source_id FROM people WHERE source = ?",
2622
+ (source,)
2623
+ )
2624
+ else:
2625
+ cursor = conn.execute("SELECT DISTINCT source_id FROM people")
2626
+
2627
+ return {row[0] for row in cursor}
2628
+
2629
+ def iter_records(self, source: Optional[str] = None) -> Iterator[PersonRecord]:
2630
+ """Iterate over all person records, optionally filtered by source."""
2631
+ conn = self._connect()
2632
+
2633
+ if source:
2634
+ cursor = conn.execute("""
2635
+ SELECT name, source, source_id, country, person_type, known_for_role, known_for_org, known_for_org_id, birth_date, death_date, record
2636
+ FROM people
2637
+ WHERE source = ?
2638
+ """, (source,))
2639
+ else:
2640
+ cursor = conn.execute("""
2641
+ SELECT name, source, source_id, country, person_type, known_for_role, known_for_org, known_for_org_id, birth_date, death_date, record
2642
+ FROM people
2643
+ """)
2644
+
2645
+ for row in cursor:
2646
+ yield PersonRecord(
2647
+ name=row["name"],
2648
+ source=row["source"],
2649
+ source_id=row["source_id"],
2650
+ country=row["country"] or "",
2651
+ person_type=PersonType(row["person_type"]) if row["person_type"] else PersonType.UNKNOWN,
2652
+ known_for_role=row["known_for_role"] or "",
2653
+ known_for_org=row["known_for_org"] or "",
2654
+ known_for_org_id=row["known_for_org_id"], # Can be None
2655
+ birth_date=row["birth_date"] or "",
2656
+ death_date=row["death_date"] or "",
2657
+ record=json.loads(row["record"]),
2658
+ )
2659
+
2660
+ def resolve_qid_labels(
2661
+ self,
2662
+ label_map: dict[str, str],
2663
+ batch_size: int = 1000,
2664
+ ) -> tuple[int, int]:
2665
+ """
2666
+ Update records that have QIDs instead of labels.
2667
+
2668
+ This is called after dump import to resolve any QIDs that were
2669
+ stored because labels weren't available in the cache at import time.
2670
+
2671
+ If resolving would create a duplicate of an existing record with
2672
+ resolved labels, the QID version is deleted instead.
2673
+
2674
+ Args:
2675
+ label_map: Mapping of QID -> label for resolution
2676
+ batch_size: Commit batch size
2677
+
2678
+ Returns:
2679
+ Tuple of (updates, deletes)
2680
+ """
2681
+ conn = self._connect()
2682
+
2683
+ # Find all records with QIDs in any field (role or org - these are in unique constraint)
2684
+ # Country is not part of unique constraint so can be updated directly
2685
+ cursor = conn.execute("""
2686
+ SELECT id, source, source_id, country, known_for_role, known_for_org
2687
+ FROM people
2688
+ WHERE (country LIKE 'Q%' AND country GLOB 'Q[0-9]*')
2689
+ OR (known_for_role LIKE 'Q%' AND known_for_role GLOB 'Q[0-9]*')
2690
+ OR (known_for_org LIKE 'Q%' AND known_for_org GLOB 'Q[0-9]*')
2691
+ """)
2692
+ rows = cursor.fetchall()
2693
+
2694
+ updates = 0
2695
+ deletes = 0
2696
+
2697
+ for row in rows:
2698
+ person_id = row["id"]
2699
+ source = row["source"]
2700
+ source_id = row["source_id"]
2701
+ country = row["country"]
2702
+ role = row["known_for_role"]
2703
+ org = row["known_for_org"]
2704
+
2705
+ # Resolve QIDs to labels
2706
+ new_country = label_map.get(country, country) if country.startswith("Q") and country[1:].isdigit() else country
2707
+ new_role = label_map.get(role, role) if role.startswith("Q") and role[1:].isdigit() else role
2708
+ new_org = label_map.get(org, org) if org.startswith("Q") and org[1:].isdigit() else org
2709
+
2710
+ # Skip if nothing changed
2711
+ if new_country == country and new_role == role and new_org == org:
2712
+ continue
2713
+
2714
+ # Check if resolved values would duplicate an existing record
2715
+ # (unique constraint is on source, source_id, known_for_role, known_for_org)
2716
+ if new_role != role or new_org != org:
2717
+ cursor2 = conn.execute("""
2718
+ SELECT id FROM people
2719
+ WHERE source = ? AND source_id = ? AND known_for_role = ? AND known_for_org = ?
2720
+ AND id != ?
2721
+ """, (source, source_id, new_role, new_org, person_id))
2722
+ existing = cursor2.fetchone()
2723
+
2724
+ if existing:
2725
+ # Duplicate would exist - delete the QID version
2726
+ conn.execute("DELETE FROM people WHERE id = ?", (person_id,))
2727
+ conn.execute("DELETE FROM person_embeddings WHERE person_id = ?", (person_id,))
2728
+ deletes += 1
2729
+ logger.debug(f"Deleted duplicate QID record {person_id} (source_id={source_id})")
2730
+ continue
2731
+
2732
+ # No duplicate - update in place
2733
+ conn.execute("""
2734
+ UPDATE people SET country = ?, known_for_role = ?, known_for_org = ?
2735
+ WHERE id = ?
2736
+ """, (new_country, new_role, new_org, person_id))
2737
+ updates += 1
2738
+
2739
+ if (updates + deletes) % batch_size == 0:
2740
+ conn.commit()
2741
+ logger.info(f"Resolved QID labels: {updates} updates, {deletes} deletes...")
2742
+
2743
+ conn.commit()
2744
+ logger.info(f"Resolved QID labels: {updates} updates, {deletes} deletes")
2745
+ return updates, deletes
2746
+
2747
+ def get_unresolved_qids(self) -> set[str]:
2748
+ """
2749
+ Get all QIDs that still need resolution in the database.
2750
+
2751
+ Returns:
2752
+ Set of QIDs (starting with 'Q') found in country, role, or org fields
2753
+ """
2754
+ conn = self._connect()
2755
+ qids: set[str] = set()
2756
+
2757
+ # Get QIDs from country field
2758
+ cursor = conn.execute("""
2759
+ SELECT DISTINCT country FROM people
2760
+ WHERE country LIKE 'Q%' AND country GLOB 'Q[0-9]*'
2761
+ """)
2762
+ for row in cursor:
2763
+ qids.add(row["country"])
2764
+
2765
+ # Get QIDs from known_for_role field
2766
+ cursor = conn.execute("""
2767
+ SELECT DISTINCT known_for_role FROM people
2768
+ WHERE known_for_role LIKE 'Q%' AND known_for_role GLOB 'Q[0-9]*'
2769
+ """)
2770
+ for row in cursor:
2771
+ qids.add(row["known_for_role"])
2772
+
2773
+ # Get QIDs from known_for_org field
2774
+ cursor = conn.execute("""
2775
+ SELECT DISTINCT known_for_org FROM people
2776
+ WHERE known_for_org LIKE 'Q%' AND known_for_org GLOB 'Q[0-9]*'
2777
+ """)
2778
+ for row in cursor:
2779
+ qids.add(row["known_for_org"])
2780
+
2781
+ return qids
2782
+
2783
+ def insert_qid_labels(
2784
+ self,
2785
+ label_map: dict[str, str],
2786
+ batch_size: int = 1000,
2787
+ ) -> int:
2788
+ """
2789
+ Insert QID -> label mappings into the lookup table.
2790
+
2791
+ Args:
2792
+ label_map: Mapping of QID -> label
2793
+ batch_size: Commit batch size
2794
+
2795
+ Returns:
2796
+ Number of labels inserted/updated
2797
+ """
2798
+ conn = self._connect()
2799
+ count = 0
2800
+
2801
+ for qid, label in label_map.items():
2802
+ conn.execute(
2803
+ "INSERT OR REPLACE INTO qid_labels (qid, label) VALUES (?, ?)",
2804
+ (qid, label)
2805
+ )
2806
+ count += 1
2807
+
2808
+ if count % batch_size == 0:
2809
+ conn.commit()
2810
+ logger.debug(f"Inserted {count} QID labels...")
2811
+
2812
+ conn.commit()
2813
+ logger.info(f"Inserted {count} QID labels into lookup table")
2814
+ return count
2815
+
2816
+ def get_qid_label(self, qid: str) -> Optional[str]:
2817
+ """
2818
+ Get the label for a QID from the lookup table.
2819
+
2820
+ Args:
2821
+ qid: Wikidata QID (e.g., 'Q30')
2822
+
2823
+ Returns:
2824
+ Label string or None if not found
2825
+ """
2826
+ conn = self._connect()
2827
+ cursor = conn.execute(
2828
+ "SELECT label FROM qid_labels WHERE qid = ?",
2829
+ (qid,)
2830
+ )
2831
+ row = cursor.fetchone()
2832
+ return row["label"] if row else None
2833
+
2834
+ def get_all_qid_labels(self) -> dict[str, str]:
2835
+ """
2836
+ Get all QID -> label mappings from the lookup table.
2837
+
2838
+ Returns:
2839
+ Dict mapping QID -> label
2840
+ """
2841
+ conn = self._connect()
2842
+ cursor = conn.execute("SELECT qid, label FROM qid_labels")
2843
+ return {row["qid"]: row["label"] for row in cursor}
2844
+
2845
+ def get_qid_labels_count(self) -> int:
2846
+ """Get the number of QID labels in the lookup table."""
2847
+ conn = self._connect()
2848
+ cursor = conn.execute("SELECT COUNT(*) FROM qid_labels")
2849
+ return cursor.fetchone()[0]
2850
+
2851
+ def canonicalize(self, batch_size: int = 10000) -> dict[str, int]:
2852
+ """
2853
+ Canonicalize person records by linking equivalent entries across sources.
2854
+
2855
+ Uses a multi-phase approach:
2856
+ 1. Match by normalized name + same organization (org canonical group)
2857
+ 2. Match by normalized name + overlapping date ranges
2858
+
2859
+ Source priority (lower = more authoritative):
2860
+ - wikidata: 1 (curated, has Q codes)
2861
+ - sec_edgar: 2 (US insider filings)
2862
+ - companies_house: 3 (UK officers)
2863
+
2864
+ Args:
2865
+ batch_size: Number of records to process before committing
2866
+
2867
+ Returns:
2868
+ Stats dict with counts for each matching type
2869
+ """
2870
+ conn = self._connect()
2871
+ stats = {
2872
+ "total_records": 0,
2873
+ "matched_by_org": 0,
2874
+ "matched_by_date": 0,
2875
+ "canonical_groups": 0,
2876
+ "records_in_groups": 0,
2877
+ }
2878
+
2879
+ logger.info("Phase 1: Building person index...")
2880
+
2881
+ # Load all people with their normalized names and org info
2882
+ cursor = conn.execute("""
2883
+ SELECT id, name, name_normalized, source, source_id,
2884
+ known_for_org, known_for_org_id, from_date, to_date
2885
+ FROM people
2886
+ """)
2887
+
2888
+ people: list[dict] = []
2889
+ for row in cursor:
2890
+ people.append({
2891
+ "id": row["id"],
2892
+ "name": row["name"],
2893
+ "name_normalized": row["name_normalized"],
2894
+ "source": row["source"],
2895
+ "source_id": row["source_id"],
2896
+ "known_for_org": row["known_for_org"],
2897
+ "known_for_org_id": row["known_for_org_id"],
2898
+ "from_date": row["from_date"],
2899
+ "to_date": row["to_date"],
2900
+ })
2901
+
2902
+ stats["total_records"] = len(people)
2903
+ logger.info(f"Loaded {len(people)} person records")
2904
+
2905
+ if len(people) == 0:
2906
+ return stats
2907
+
2908
+ # Initialize Union-Find
2909
+ person_ids = [p["id"] for p in people]
2910
+ uf = UnionFind(person_ids)
2911
+
2912
+ # Build indexes for efficient matching
2913
+ # Index by normalized name
2914
+ name_to_people: dict[str, list[dict]] = {}
2915
+ for p in people:
2916
+ name_norm = p["name_normalized"]
2917
+ name_to_people.setdefault(name_norm, []).append(p)
2918
+
2919
+ logger.info("Phase 2: Matching by normalized name + organization...")
2920
+
2921
+ # Match people with same normalized name and same organization
2922
+ for name_norm, same_name in name_to_people.items():
2923
+ if len(same_name) < 2:
2924
+ continue
2925
+
2926
+ # Group by organization (using known_for_org_id if available, else known_for_org)
2927
+ org_groups: dict[str, list[dict]] = {}
2928
+ for p in same_name:
2929
+ org_key = str(p["known_for_org_id"]) if p["known_for_org_id"] else p["known_for_org"]
2930
+ if org_key: # Only group if they have an org
2931
+ org_groups.setdefault(org_key, []).append(p)
2932
+
2933
+ # Union people with same name + same org
2934
+ for org_key, org_people in org_groups.items():
2935
+ if len(org_people) >= 2:
2936
+ first_id = org_people[0]["id"]
2937
+ for p in org_people[1:]:
2938
+ uf.union(first_id, p["id"])
2939
+ stats["matched_by_org"] += 1
2940
+
2941
+ logger.info(f"Phase 2 complete: {stats['matched_by_org']} matches by org")
2942
+
2943
+ logger.info("Phase 3: Matching by normalized name + overlapping dates...")
2944
+
2945
+ # Match people with same normalized name and overlapping date ranges
2946
+ for name_norm, same_name in name_to_people.items():
2947
+ if len(same_name) < 2:
2948
+ continue
2949
+
2950
+ # Skip if already all unified
2951
+ roots = set(uf.find(p["id"]) for p in same_name)
2952
+ if len(roots) == 1:
2953
+ continue
2954
+
2955
+ # Check for overlapping date ranges
2956
+ for i, p1 in enumerate(same_name):
2957
+ for p2 in same_name[i+1:]:
2958
+ # Skip if already in same group
2959
+ if uf.find(p1["id"]) == uf.find(p2["id"]):
2960
+ continue
2961
+
2962
+ # Check date overlap (if both have dates)
2963
+ if p1["from_date"] and p2["from_date"]:
2964
+ # Simple overlap check: if either from_date is before other's to_date
2965
+ p1_from = p1["from_date"]
2966
+ p1_to = p1["to_date"] or "9999-12-31"
2967
+ p2_from = p2["from_date"]
2968
+ p2_to = p2["to_date"] or "9999-12-31"
2969
+
2970
+ # Overlap if: p1_from <= p2_to AND p2_from <= p1_to
2971
+ if p1_from <= p2_to and p2_from <= p1_to:
2972
+ uf.union(p1["id"], p2["id"])
2973
+ stats["matched_by_date"] += 1
2974
+
2975
+ logger.info(f"Phase 3 complete: {stats['matched_by_date']} matches by date")
2976
+
2977
+ logger.info("Phase 4: Applying canonical updates...")
2978
+
2979
+ # Get all groups and select canonical record for each
2980
+ groups = uf.groups()
2981
+
2982
+ # Build id -> source mapping
2983
+ id_to_source = {p["id"]: p["source"] for p in people}
2984
+
2985
+ batch_updates: list[tuple[int, int, int]] = [] # (person_id, canon_id, canon_size)
2986
+
2987
+ for _root, group_ids in groups.items():
2988
+ group_size = len(group_ids)
2989
+
2990
+ if group_size == 1:
2991
+ # Single record is its own canonical
2992
+ person_id = group_ids[0]
2993
+ batch_updates.append((person_id, person_id, 1))
2994
+ else:
2995
+ # Multiple records - pick highest priority source as canonical
2996
+ # Sort by source priority, then by id (for stability)
2997
+ sorted_ids = sorted(
2998
+ group_ids,
2999
+ key=lambda pid: (PERSON_SOURCE_PRIORITY.get(id_to_source[pid], 99), pid)
3000
+ )
3001
+ canon_id = sorted_ids[0]
3002
+ stats["canonical_groups"] += 1
3003
+ stats["records_in_groups"] += group_size
3004
+
3005
+ for person_id in group_ids:
3006
+ batch_updates.append((person_id, canon_id, group_size if person_id == canon_id else 1))
3007
+
3008
+ # Commit in batches
3009
+ if len(batch_updates) >= batch_size:
3010
+ self._apply_person_canon_updates(batch_updates)
3011
+ conn.commit()
3012
+ logger.info(f"Applied {len(batch_updates)} canon updates...")
3013
+ batch_updates = []
3014
+
3015
+ # Final batch
3016
+ if batch_updates:
3017
+ self._apply_person_canon_updates(batch_updates)
3018
+ conn.commit()
3019
+
3020
+ logger.info(f"Canonicalization complete: {stats['canonical_groups']} groups, "
3021
+ f"{stats['records_in_groups']} records in multi-record groups")
3022
+
3023
+ return stats
3024
+
3025
+ def _apply_person_canon_updates(self, updates: list[tuple[int, int, int]]) -> None:
3026
+ """Apply batch of canon updates: (person_id, canon_id, canon_size)."""
3027
+ conn = self._conn
3028
+ assert conn is not None
3029
+
3030
+ for person_id, canon_id, canon_size in updates:
3031
+ conn.execute(
3032
+ "UPDATE people SET canon_id = ?, canon_size = ? WHERE id = ?",
3033
+ (canon_id, canon_size, person_id)
3034
+ )