tablassert 7.0.1__tar.gz → 7.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. tablassert-7.1.0/.pre-commit-config.yaml +15 -0
  2. {tablassert-7.0.1 → tablassert-7.1.0}/CHANGELOG.md +21 -0
  3. {tablassert-7.0.1 → tablassert-7.1.0}/PKG-INFO +3 -3
  4. {tablassert-7.0.1 → tablassert-7.1.0}/docs/api/fullmap.md +29 -15
  5. {tablassert-7.0.1 → tablassert-7.1.0}/docs/api/qc.md +7 -9
  6. {tablassert-7.0.1 → tablassert-7.1.0}/docs/cli.md +1 -1
  7. {tablassert-7.0.1 → tablassert-7.1.0}/pyproject.toml +4 -5
  8. tablassert-7.1.0/src/tablassert/downloader.py +44 -0
  9. tablassert-7.1.0/src/tablassert/enums.py +523 -0
  10. tablassert-7.1.0/src/tablassert/fullmap.py +190 -0
  11. tablassert-7.1.0/src/tablassert/ingests.py +50 -0
  12. tablassert-7.1.0/src/tablassert/lib.py +597 -0
  13. tablassert-7.1.0/src/tablassert/log.py +18 -0
  14. tablassert-7.1.0/src/tablassert/models.py +134 -0
  15. tablassert-7.1.0/src/tablassert/qc.py +140 -0
  16. tablassert-7.1.0/src/tablassert/utils.py +51 -0
  17. {tablassert-7.0.1 → tablassert-7.1.0}/uv.lock +15 -1
  18. tablassert-7.0.1/.planning/PROJECT.md +0 -47
  19. tablassert-7.0.1/.planning/REQUIREMENTS.md +0 -73
  20. tablassert-7.0.1/.planning/ROADMAP.md +0 -66
  21. tablassert-7.0.1/.planning/STATE.md +0 -79
  22. tablassert-7.0.1/.planning/config.json +0 -15
  23. tablassert-7.0.1/.planning/quick/1-please-add-a-github-action-that-runs-uv-/1-PLAN.md +0 -90
  24. tablassert-7.0.1/.planning/quick/1-please-add-a-github-action-that-runs-uv-/1-SUMMARY.md +0 -80
  25. tablassert-7.0.1/.planning/research/ARCHITECTURE.md +0 -220
  26. tablassert-7.0.1/.planning/research/FEATURES.md +0 -134
  27. tablassert-7.0.1/.planning/research/PITFALLS.md +0 -219
  28. tablassert-7.0.1/.planning/research/STACK.md +0 -140
  29. tablassert-7.0.1/.planning/research/SUMMARY.md +0 -146
  30. tablassert-7.0.1/.pre-commit-config.yaml +0 -15
  31. tablassert-7.0.1/src/tablassert/downloader.py +0 -35
  32. tablassert-7.0.1/src/tablassert/enums.py +0 -521
  33. tablassert-7.0.1/src/tablassert/fullmap.py +0 -167
  34. tablassert-7.0.1/src/tablassert/ingests.py +0 -43
  35. tablassert-7.0.1/src/tablassert/lib.py +0 -602
  36. tablassert-7.0.1/src/tablassert/log.py +0 -15
  37. tablassert-7.0.1/src/tablassert/models.py +0 -131
  38. tablassert-7.0.1/src/tablassert/qc.py +0 -124
  39. tablassert-7.0.1/src/tablassert/utils.py +0 -43
  40. {tablassert-7.0.1 → tablassert-7.1.0}/.github/workflows/docs.yml +0 -0
  41. {tablassert-7.0.1 → tablassert-7.1.0}/.github/workflows/pipy.yml +0 -0
  42. {tablassert-7.0.1 → tablassert-7.1.0}/.gitignore +0 -0
  43. {tablassert-7.0.1 → tablassert-7.1.0}/.python-version +0 -0
  44. {tablassert-7.0.1 → tablassert-7.1.0}/.vscode/settings.json +0 -0
  45. {tablassert-7.0.1 → tablassert-7.1.0}/LICENSE +0 -0
  46. {tablassert-7.0.1 → tablassert-7.1.0}/README.md +0 -0
  47. {tablassert-7.0.1 → tablassert-7.1.0}/docs/api/utils.md +0 -0
  48. {tablassert-7.0.1 → tablassert-7.1.0}/docs/configuration/advanced-example.md +0 -0
  49. {tablassert-7.0.1 → tablassert-7.1.0}/docs/configuration/graph.md +0 -0
  50. {tablassert-7.0.1 → tablassert-7.1.0}/docs/configuration/table.md +0 -0
  51. {tablassert-7.0.1 → tablassert-7.1.0}/docs/examples/tutorial-data.csv +0 -0
  52. {tablassert-7.0.1 → tablassert-7.1.0}/docs/examples/tutorial-graph.yaml +0 -0
  53. {tablassert-7.0.1 → tablassert-7.1.0}/docs/examples/tutorial-table.yaml +0 -0
  54. {tablassert-7.0.1 → tablassert-7.1.0}/docs/index.md +0 -0
  55. {tablassert-7.0.1 → tablassert-7.1.0}/docs/installation.md +0 -0
  56. {tablassert-7.0.1 → tablassert-7.1.0}/docs/tutorial.md +0 -0
  57. {tablassert-7.0.1 → tablassert-7.1.0}/mkdocs.yml +0 -0
  58. {tablassert-7.0.1 → tablassert-7.1.0}/src/tablassert/__init__.py +0 -0
@@ -0,0 +1,15 @@
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ rev: v0.9.0
4
+ hooks:
5
+ - id: ruff
6
+ args: [--fix]
7
+ - id: ruff-format
8
+ - repo: local
9
+ hooks:
10
+ - id: pyright
11
+ name: pyright
12
+ entry: uv run pyright
13
+ language: system
14
+ types: [python]
15
+ pass_filenames: false
@@ -2,6 +2,27 @@
2
2
 
3
3
  All notable changes to this project are documented in this file.
4
4
 
5
+ ## Unreleased
6
+
7
+ ### Changes
8
+ - Updated `fullmap` ranking to prioritize case-insensitive exact matches between normalized terms and preferred names.
9
+ - Updated `fullmap` term de-duplication to keep first occurrences, improving deterministic output ordering.
10
+
11
+ ## 7.0.2 - 2026-03-23
12
+
13
+ ### Changes
14
+ - Updated package metadata for the 7.0.2 release.
15
+ - Added optional `log` and `column_context` controls to `fullmap.version4()` for more configurable entity-resolution behavior.
16
+
17
+ ### Bug Fixes
18
+ - Reworked entity-resolution querying to register terms directly in DuckDB instead of writing temporary parquet files, removing tempfile lifecycle issues in `fullmap` query execution.
19
+ - Isolated unmatched-entity logging into a dedicated helper and gated it behind an explicit logging flag.
20
+
21
+ ### Documentation
22
+ - Updated API reference docs to match the current `version4()` function signature and behavior.
23
+ - Corrected QC documentation to reflect the implemented fuzzy/BERT validation pipeline.
24
+ - Fixed documentation path typos for cache/store artifact directories.
25
+
5
26
  ## 7.0.1 - 2026-03-17
6
27
 
7
28
  ### Documentation
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tablassert
3
- Version: 7.0.1
3
+ Version: 7.1.0
4
4
  Summary: Tablassert is a highly performant declarative knowledge graph backend designed to extract knowledge assertions from tabular data while exporting NCATS Translator-compliant Knowledge Graph Exchange (KGX) NDJSON.
5
5
  Project-URL: Homepage, https://github.com/SkyeAv/Tablassert
6
6
  Project-URL: Source, https://github.com/SkyeAv/Tablassert
7
7
  Project-URL: Documentation, https://skyeav.github.io/Tablassert/
8
- Author: Jared C. Roach
9
- Author-email: Skye Lane Goetz <sgoetz@isbscience.org>, Gwennen Glusman <gglusman@isbscience.org>
8
+ Author-email: Skye Lane Goetz <sgoetz@isbscience.org>
10
9
  License-Expression: Apache-2.0
11
10
  License-File: LICENSE
12
11
  Keywords: declarative pipeline,knowledge graph,natural language processing,ncats translator,ner,tablassert,table mining,yaml configuration
@@ -15,6 +14,7 @@ Requires-Python: >=3.13
15
14
  Requires-Dist: diskcache>=5.6.3
16
15
  Requires-Dist: duckdb>=1.5.0
17
16
  Requires-Dist: fastexcel>=0.19.0
17
+ Requires-Dist: lazy-loader>=0.5
18
18
  Requires-Dist: loguru>=0.7.3
19
19
  Requires-Dist: mkdocs>=1.6.1
20
20
  Requires-Dist: onnxruntime>=1.24.3
@@ -13,11 +13,13 @@ def version4(
13
13
  lf: pl.LazyFrame,
14
14
  col: str,
15
15
  conn: object,
16
- taxon: Optional[str],
17
- prioritize: Optional[list[Categories]],
18
- avoid: Optional[list[Categories]],
19
- section_hash: str,
20
- config_file: str,
16
+ taxon: Optional[str] = None,
17
+ prioritize: Optional[list[Categories]] = None,
18
+ avoid: Optional[list[Categories]] = None,
19
+ log: bool = True,
20
+ section_hash: Optional[str] = None,
21
+ config_file: Optional[str] = None,
22
+ column_context: bool = True,
21
23
  tag: str = " one"
22
24
  ) -> pl.LazyFrame
23
25
  ```
@@ -61,6 +63,18 @@ Optional list of Biolink categories to exclude from results.
61
63
 
62
64
  Example: `[Categories.Gene]` prevents gene mappings.
63
65
 
66
+ **`log: bool` (default: `True`)**
67
+
68
+ Controls unmatched-value logging. When enabled, unresolved terms are logged with section/config/column context.
69
+
70
+ **`section_hash: Optional[str]` / `config_file: Optional[str]`**
71
+
72
+ Optional context fields used for operational logging when unmatched values are encountered.
73
+
74
+ **`column_context: bool` (default: `True`)**
75
+
76
+ Controls category-frequency tie-breaking when multiple matches exist for a term. When `True`, the query result adds a category frequency score and prefers more frequent category hits.
77
+
64
78
  **`tag: str` (default: `" one"`)**
65
79
 
66
80
  Suffix for NLP processing level column.
@@ -71,10 +85,6 @@ The function looks for both:
71
85
 
72
86
  Default `" one"` means it uses level-one text processing (lowercase, stripped).
73
87
 
74
- **`section_hash: str` / `config_file: str`**
75
-
76
- Context fields used for operational logging when unmatched values are encountered.
77
-
78
88
  ### Return Value
79
89
 
80
90
  Returns a Polars LazyFrame with these columns added:
@@ -91,25 +101,27 @@ Returns a Polars LazyFrame with these columns added:
91
101
 
92
102
  ### DuckDB Query
93
103
 
94
- The function executes a complex SQL query that:
104
+ The function executes a SQL query that:
105
+
106
+ 1. **Builds an in-memory term table** by collecting terms from both NLP levels, deduplicating by keeping first occurrences for deterministic ordering, and registering them in DuckDB as `PARQUET` via `conn.register("PARQUET", df.to_arrow())`.
95
107
 
96
- 1. **Ranks matches** by:
108
+ 2. **Ranks matches** by:
97
109
  - Category priority (if `prioritize` specified)
110
+ - Preferred-name exactness (case-insensitive exact match of normalized term to preferred name)
98
111
  - NLP level (exact case match preferred over normalized)
99
- - Source confidence
112
+ - Category frequency (if `column_context=True`)
100
113
 
101
- 2. **Filters by:**
114
+ 3. **Filters by:**
102
115
  - Taxon ID (if specified)
103
116
  - Category avoidance (if specified)
104
117
 
105
- 3. **Deduplicates** to one CURIE per row per input string
118
+ 4. **Deduplicates** to one CURIE per input string
106
119
 
107
120
  ### Example Usage
108
121
 
109
122
  ```python
110
123
  from tablassert.fullmap import version4
111
124
  from tablassert.enums import Categories
112
- from pathlib import Path
113
125
  import duckdb
114
126
  import polars as pl
115
127
 
@@ -127,8 +139,10 @@ result = version4(
127
139
  taxon="9606", # Human only
128
140
  prioritize=[Categories.Gene],
129
141
  avoid=[Categories.Protein],
142
+ log=True,
130
143
  section_hash="tutorial-section",
131
144
  config_file="tutorial-table.yaml",
145
+ column_context=True,
132
146
  tag=" one"
133
147
  )
134
148
 
@@ -72,28 +72,26 @@ original == preferred_name
72
72
 
73
73
  **Performance:** O(1) string comparison
74
74
 
75
+ Before fuzzy matching, the function also applies rule-based pass-through checks for known safe patterns (for example CHEBI/PR/UniProtKB CURIE families and selected exception prefixes).
76
+
75
77
  #### Stage 2: Fuzzy Matching
76
78
 
77
79
  **Medium confidence using RapidFuzz.**
78
80
 
79
- Four fuzzy matching algorithms:
81
+ Two fuzzy matching algorithms:
80
82
  1. **Ratio:** Overall string similarity
81
- 2. **Partial ratio:** Substring matching
82
- 3. **Token sort ratio:** Order-independent word matching
83
- 4. **Partial token sort ratio:** Combined approach
83
+ 2. **Partial token sort ratio:** Combined token/subsequence matching
84
84
 
85
85
  **Threshold:** Default 20% similarity (configurable)
86
86
 
87
87
  ```python
88
88
  fuzz.ratio(original, preferred) >= 20
89
- or fuzz.ratio(original, curie) >= 20
90
89
  or fuzz.partial_token_sort_ratio(original, preferred) >= 20
91
- or fuzz.partial_token_sort_ratio(original, curie) >= 20
92
90
  ```
93
91
 
94
92
  **Example passes:**
95
93
  - Original: `"breast ca"` → Preferred: `"breast cancer"` ✓
96
- - Original: `"T53"` → CURIE: `"HGNC:11998"` (TP53) ✗ (goes to Stage 3)
94
+ - Original: `"T53"` → Preferred: `"tumor protein p53"` ✗ (goes to Stage 3)
97
95
 
98
96
  **Performance:** O(n) string operations, cached via `@DISKCACHE.memoize()`
99
97
 
@@ -128,7 +126,7 @@ return similarity >= 0.2
128
126
  - ONNX session caching
129
127
  - Disk cache for embeddings (~100MB LRU)
130
128
 
131
- Loaded once at module import, reused for all calls.
129
+ Lazy-loaded on first `BERT_audit()` call, then reused for subsequent calls.
132
130
 
133
131
  ### Disk Caching
134
132
 
@@ -142,7 +140,7 @@ def fuzz_audit(...): ...
142
140
  def BERT_audit(...): ...
143
141
  ```
144
142
 
145
- **Cache location:** `cachessert/` directory
143
+ **Cache location:** `./.cachassert` directory
146
144
 
147
145
  **Cache strategy:** LRU eviction when size exceeds limit
148
146
 
@@ -40,7 +40,7 @@ Final output files are written to the current working directory as:
40
40
  - `{name}_{version}.nodes.ndjson` - Node file (entities)
41
41
  - `{name}_{version}.edges.ndjson` - Edge file (relationships)
42
42
 
43
- Intermediate parquet artifacts are written to `storessert/` during section processing.
43
+ Intermediate parquet artifacts are written to `.storassert/` during section processing.
44
44
 
45
45
  See [Graph Configuration](configuration/graph.md) for details on the YAML schema.
46
46
 
@@ -1,11 +1,9 @@
1
1
  [project]
2
2
  name = "tablassert"
3
- version = "7.0.1"
3
+ version = "7.1.0"
4
4
  description = "Tablassert is a highly performant declarative knowledge graph backend designed to extract knowledge assertions from tabular data while exporting NCATS Translator-compliant Knowledge Graph Exchange (KGX) NDJSON."
5
5
  authors = [
6
- { name = "Skye Lane Goetz", email = "sgoetz@isbscience.org" },
7
- { name = "Gwennen Glusman", email = "gglusman@isbscience.org" },
8
- { name = "Jared C. Roach" },
6
+ { name = "Skye Lane Goetz", email = "sgoetz@isbscience.org" }
9
7
  ]
10
8
  keywords = [
11
9
  "knowledge graph",
@@ -27,6 +25,7 @@ dependencies = [
27
25
  "diskcache>=5.6.3",
28
26
  "duckdb>=1.5.0",
29
27
  "fastexcel>=0.19.0",
28
+ "lazy-loader>=0.5",
30
29
  "loguru>=0.7.3",
31
30
  "mkdocs>=1.6.1",
32
31
  "onnxruntime>=1.24.3",
@@ -77,7 +76,7 @@ dev = [
77
76
 
78
77
  [tool.ruff]
79
78
  line-length = 120
80
- indent-width = 2
79
+ indent-width = 4
81
80
  target-version = "py313"
82
81
 
83
82
  [tool.ruff.format]
@@ -0,0 +1,44 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from time import sleep
5
+ from typing import TYPE_CHECKING, Optional
6
+
7
+ import lazy_loader as Lazy
8
+ from playwright.sync_api import sync_playwright
9
+
10
+ if TYPE_CHECKING:
11
+ import pyexcel
12
+ else:
13
+ pyexcel = Lazy.load("pyexcel")
14
+
15
+
16
+ def modernize_xls(p: Path) -> Path:
17
+ xlsx: Path = p.with_suffix(".xlsx")
18
+ pyexcel.save_book_as(file_name=str(p), dest_file_name=str(xlsx))
19
+ return xlsx
20
+
21
+
22
+ def from_url(website: str, p: Path, timeout: int = 60_000, retries: int = 3) -> Path:
23
+ p.parent.mkdir(parents=True, exist_ok=True)
24
+ if p.is_file():
25
+ return p
26
+
27
+ last: Optional[Exception] = None
28
+ for attempt in range(retries):
29
+ try:
30
+ with sync_playwright() as pw:
31
+ browser = pw.chromium.launch(headless=True)
32
+ page = browser.new_page()
33
+ page.goto(website, wait_until="networkidle", timeout=timeout)
34
+ with page.expect_download(timeout=timeout) as info:
35
+ download = info.value
36
+ download.save_as(p)
37
+ browser.close()
38
+ return p
39
+ except Exception as e:
40
+ last = e
41
+ if attempt < retries - 1:
42
+ sleep(2**attempt)
43
+
44
+ raise RuntimeError(f"01 | Download Failed After {retries} Attempts: {last}")