biblicus 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biblicus/__init__.py CHANGED
@@ -27,4 +27,4 @@ __all__ = [
27
27
  "RetrievalRun",
28
28
  ]
29
29
 
30
- __version__ = "0.12.0"
30
+ __version__ = "0.13.0"
biblicus/cli.py CHANGED
@@ -28,6 +28,11 @@ from .errors import ExtractionRunFatalError
28
28
  from .evaluation import evaluate_run, load_dataset
29
29
  from .evidence_processing import apply_evidence_filter, apply_evidence_reranker
30
30
  from .extraction import build_extraction_run
31
+ from .extraction_evaluation import (
32
+ evaluate_extraction_run,
33
+ load_extraction_dataset,
34
+ write_extraction_evaluation_result,
35
+ )
31
36
  from .models import QueryBudget, RetrievalResult, parse_extraction_run_reference
32
37
  from .uris import corpus_ref_to_path
33
38
 
@@ -506,6 +511,54 @@ def cmd_extract_delete(arguments: argparse.Namespace) -> int:
506
511
  return 0
507
512
 
508
513
 
514
+ def cmd_extract_evaluate(arguments: argparse.Namespace) -> int:
515
+ """
516
+ Evaluate an extraction run against a dataset.
517
+
518
+ :param arguments: Parsed command-line interface arguments.
519
+ :type arguments: argparse.Namespace
520
+ :return: Exit code.
521
+ :rtype: int
522
+ """
523
+ corpus = (
524
+ Corpus.open(arguments.corpus)
525
+ if getattr(arguments, "corpus", None)
526
+ else Corpus.find(Path.cwd())
527
+ )
528
+ if arguments.run:
529
+ run_ref = parse_extraction_run_reference(arguments.run)
530
+ else:
531
+ run_ref = corpus.latest_extraction_run_reference()
532
+ if run_ref is None:
533
+ raise ValueError("Extraction evaluation requires an extraction run")
534
+ print(
535
+ "Warning: using latest extraction run; pass --run for reproducibility.",
536
+ file=sys.stderr,
537
+ )
538
+
539
+ dataset_path = Path(arguments.dataset)
540
+ if not dataset_path.is_file():
541
+ raise FileNotFoundError(f"Dataset file not found: {dataset_path}")
542
+ try:
543
+ dataset = load_extraction_dataset(dataset_path)
544
+ except ValidationError as exc:
545
+ raise ValueError(f"Invalid extraction dataset: {exc}") from exc
546
+
547
+ run = corpus.load_extraction_run_manifest(
548
+ extractor_id=run_ref.extractor_id,
549
+ run_id=run_ref.run_id,
550
+ )
551
+ result = evaluate_extraction_run(
552
+ corpus=corpus,
553
+ run=run,
554
+ extractor_id=run_ref.extractor_id,
555
+ dataset=dataset,
556
+ )
557
+ write_extraction_evaluation_result(corpus=corpus, run_id=run.run_id, result=result)
558
+ print(result.model_dump_json(indent=2))
559
+ return 0
560
+
561
+
509
562
  def cmd_query(arguments: argparse.Namespace) -> int:
510
563
  """
511
564
  Execute a retrieval query.
@@ -901,6 +954,22 @@ def build_parser() -> argparse.ArgumentParser:
901
954
  )
902
955
  p_extract_delete.set_defaults(func=cmd_extract_delete)
903
956
 
957
+ p_extract_evaluate = extract_sub.add_parser(
958
+ "evaluate", help="Evaluate an extraction run against a dataset."
959
+ )
960
+ _add_common_corpus_arg(p_extract_evaluate)
961
+ p_extract_evaluate.add_argument(
962
+ "--run",
963
+ default=None,
964
+ help="Extraction run reference in the form extractor_id:run_id (defaults to latest run).",
965
+ )
966
+ p_extract_evaluate.add_argument(
967
+ "--dataset",
968
+ required=True,
969
+ help="Path to the extraction evaluation dataset JSON file.",
970
+ )
971
+ p_extract_evaluate.set_defaults(func=cmd_extract_evaluate)
972
+
904
973
  p_query = sub.add_parser("query", help="Run a retrieval query.")
905
974
  _add_common_corpus_arg(p_query)
906
975
  p_query.add_argument("--run", default=None, help="Run identifier (defaults to latest run).")
biblicus/constants.py CHANGED
@@ -4,6 +4,7 @@ Shared constants for Biblicus.
4
4
 
5
5
  SCHEMA_VERSION = 2
6
6
  DATASET_SCHEMA_VERSION = 1
7
+ EXTRACTION_DATASET_SCHEMA_VERSION = 1
7
8
  ANALYSIS_SCHEMA_VERSION = 1
8
9
  CORPUS_DIR_NAME = ".biblicus"
9
10
  DEFAULT_RAW_DIR = "raw"
@@ -0,0 +1,312 @@
1
+ """
2
+ Extraction evaluation utilities for Biblicus.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ from difflib import SequenceMatcher
9
+ from pathlib import Path
10
+ from typing import Dict, List, Optional
11
+
12
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
13
+
14
+ from .constants import EXTRACTION_DATASET_SCHEMA_VERSION
15
+ from .corpus import Corpus
16
+ from .extraction import ExtractionRunManifest
17
+ from .models import CatalogItem
18
+ from .time import utc_now_iso
19
+
20
+
21
+ class ExtractionEvaluationItem(BaseModel):
22
+ """
23
+ Dataset item for extraction evaluation.
24
+
25
+ :ivar item_id: Optional item identifier.
26
+ :vartype item_id: str or None
27
+ :ivar source_uri: Optional source uniform resource identifier.
28
+ :vartype source_uri: str or None
29
+ :ivar expected_text: Expected extracted text.
30
+ :vartype expected_text: str
31
+ :ivar kind: Label kind (gold or synthetic).
32
+ :vartype kind: str
33
+ """
34
+
35
+ model_config = ConfigDict(extra="forbid")
36
+
37
+ item_id: Optional[str] = None
38
+ source_uri: Optional[str] = None
39
+ expected_text: str
40
+ kind: str = Field(default="gold")
41
+
42
+ @model_validator(mode="after")
43
+ def _require_locator(self) -> "ExtractionEvaluationItem":
44
+ if not self.item_id and not self.source_uri:
45
+ raise ValueError("Evaluation items must include item_id or source_uri")
46
+ return self
47
+
48
+
49
+ class ExtractionEvaluationDataset(BaseModel):
50
+ """
51
+ Dataset for extraction evaluation.
52
+
53
+ :ivar schema_version: Dataset schema version.
54
+ :vartype schema_version: int
55
+ :ivar name: Dataset name.
56
+ :vartype name: str
57
+ :ivar description: Optional description.
58
+ :vartype description: str or None
59
+ :ivar items: Labeled evaluation items.
60
+ :vartype items: list[ExtractionEvaluationItem]
61
+ """
62
+
63
+ model_config = ConfigDict(extra="forbid")
64
+
65
+ schema_version: int = Field(ge=1)
66
+ name: str
67
+ description: Optional[str] = None
68
+ items: List[ExtractionEvaluationItem] = Field(default_factory=list)
69
+
70
+ @model_validator(mode="after")
71
+ def _enforce_schema_version(self) -> "ExtractionEvaluationDataset":
72
+ if self.schema_version != EXTRACTION_DATASET_SCHEMA_VERSION:
73
+ raise ValueError(
74
+ f"Unsupported extraction dataset schema version: {self.schema_version}"
75
+ )
76
+ return self
77
+
78
+
79
+ class ExtractionEvaluationItemReport(BaseModel):
80
+ """
81
+ Per-item report for extraction evaluation.
82
+
83
+ :ivar item_id: Item identifier.
84
+ :vartype item_id: str
85
+ :ivar source_uri: Source uniform resource identifier.
86
+ :vartype source_uri: str
87
+ :ivar expected_text: Expected text from the dataset.
88
+ :vartype expected_text: str
89
+ :ivar extracted_text: Extracted text when available.
90
+ :vartype extracted_text: str or None
91
+ :ivar coverage_status: Coverage status (present, empty, missing).
92
+ :vartype coverage_status: str
93
+ :ivar extraction_status: Extraction status from the run (extracted, skipped, errored, missing).
94
+ :vartype extraction_status: str
95
+ :ivar similarity_score: Similarity score between expected and extracted text.
96
+ :vartype similarity_score: float
97
+ :ivar kind: Label kind from the dataset.
98
+ :vartype kind: str
99
+ """
100
+
101
+ model_config = ConfigDict(extra="forbid")
102
+
103
+ item_id: str
104
+ source_uri: str
105
+ expected_text: str
106
+ extracted_text: Optional[str] = None
107
+ coverage_status: str
108
+ extraction_status: str
109
+ similarity_score: float
110
+ kind: str
111
+
112
+
113
+ class ExtractionEvaluationResult(BaseModel):
114
+ """
115
+ Result bundle for an extraction evaluation.
116
+
117
+ :ivar dataset: Dataset metadata.
118
+ :vartype dataset: dict[str, object]
119
+ :ivar extractor_id: Extractor identifier.
120
+ :vartype extractor_id: str
121
+ :ivar run_id: Extraction run identifier.
122
+ :vartype run_id: str
123
+ :ivar recipe_id: Extraction recipe identifier.
124
+ :vartype recipe_id: str
125
+ :ivar recipe_name: Extraction recipe name.
126
+ :vartype recipe_name: str
127
+ :ivar evaluated_at: International Organization for Standardization 8601 timestamp.
128
+ :vartype evaluated_at: str
129
+ :ivar metrics: Evaluation metrics for coverage and accuracy.
130
+ :vartype metrics: dict[str, float]
131
+ :ivar items: Per-item evaluation reports.
132
+ :vartype items: list[ExtractionEvaluationItemReport]
133
+ """
134
+
135
+ model_config = ConfigDict(extra="forbid")
136
+
137
+ dataset: Dict[str, object]
138
+ extractor_id: str
139
+ run_id: str
140
+ recipe_id: str
141
+ recipe_name: str
142
+ evaluated_at: str
143
+ metrics: Dict[str, float]
144
+ items: List[ExtractionEvaluationItemReport]
145
+
146
+
147
+ def load_extraction_dataset(path: Path) -> ExtractionEvaluationDataset:
148
+ """
149
+ Load an extraction evaluation dataset from JavaScript Object Notation.
150
+
151
+ :param path: Path to the dataset file.
152
+ :type path: Path
153
+ :return: Parsed extraction evaluation dataset.
154
+ :rtype: ExtractionEvaluationDataset
155
+ """
156
+ try:
157
+ data = json.loads(path.read_text(encoding="utf-8"))
158
+ except json.JSONDecodeError as exc:
159
+ raise ValueError("Invalid extraction dataset") from exc
160
+ return ExtractionEvaluationDataset.model_validate(data)
161
+
162
+
163
+ def evaluate_extraction_run(
164
+ *,
165
+ corpus: Corpus,
166
+ run: ExtractionRunManifest,
167
+ extractor_id: str,
168
+ dataset: ExtractionEvaluationDataset,
169
+ ) -> ExtractionEvaluationResult:
170
+ """
171
+ Evaluate an extraction run against a dataset.
172
+
173
+ :param corpus: Corpus associated with the run.
174
+ :type corpus: Corpus
175
+ :param run: Extraction run manifest.
176
+ :type run: ExtractionRunManifest
177
+ :param extractor_id: Extractor identifier for the run.
178
+ :type extractor_id: str
179
+ :param dataset: Extraction evaluation dataset.
180
+ :type dataset: ExtractionEvaluationDataset
181
+ :return: Extraction evaluation result bundle.
182
+ :rtype: ExtractionEvaluationResult
183
+ """
184
+ catalog = corpus.load_catalog()
185
+ item_index = {item.item_id: item for item in run.items}
186
+ coverage_present = 0
187
+ coverage_empty = 0
188
+ coverage_missing = 0
189
+ processable = 0
190
+ similarity_scores: List[float] = []
191
+ item_reports: List[ExtractionEvaluationItemReport] = []
192
+
193
+ for dataset_item in dataset.items:
194
+ item_id = _resolve_item_id(dataset_item, catalog_items=catalog.items)
195
+ catalog_item = catalog.items.get(item_id)
196
+ if catalog_item is None:
197
+ raise ValueError(f"Unknown item identifier: {item_id}")
198
+ extraction_item = item_index.get(item_id)
199
+ extraction_status = extraction_item.status if extraction_item else "missing"
200
+ if extraction_status != "errored" and extraction_status != "missing":
201
+ processable += 1
202
+
203
+ extracted_text = corpus.read_extracted_text(
204
+ extractor_id=extractor_id, run_id=run.run_id, item_id=item_id
205
+ )
206
+ coverage_status = _coverage_status(extracted_text)
207
+ if coverage_status == "present":
208
+ coverage_present += 1
209
+ elif coverage_status == "empty":
210
+ coverage_empty += 1
211
+ else:
212
+ coverage_missing += 1
213
+
214
+ similarity_score = _similarity_score(
215
+ expected_text=dataset_item.expected_text, extracted_text=extracted_text
216
+ )
217
+ similarity_scores.append(similarity_score)
218
+ item_reports.append(
219
+ ExtractionEvaluationItemReport(
220
+ item_id=item_id,
221
+ source_uri=catalog_item.source_uri,
222
+ expected_text=dataset_item.expected_text,
223
+ extracted_text=extracted_text,
224
+ coverage_status=coverage_status,
225
+ extraction_status=extraction_status,
226
+ similarity_score=similarity_score,
227
+ kind=dataset_item.kind,
228
+ )
229
+ )
230
+
231
+ total_items = max(len(dataset.items), 1)
232
+ average_similarity = sum(similarity_scores) / total_items if similarity_scores else 0.0
233
+ metrics = {
234
+ "coverage_present": float(coverage_present),
235
+ "coverage_empty": float(coverage_empty),
236
+ "coverage_missing": float(coverage_missing),
237
+ "processable_fraction": processable / total_items,
238
+ "average_similarity": average_similarity,
239
+ }
240
+ dataset_meta = {
241
+ "name": dataset.name,
242
+ "description": dataset.description,
243
+ "items": len(dataset.items),
244
+ }
245
+ return ExtractionEvaluationResult(
246
+ dataset=dataset_meta,
247
+ extractor_id=extractor_id,
248
+ run_id=run.run_id,
249
+ recipe_id=run.recipe.recipe_id,
250
+ recipe_name=run.recipe.name,
251
+ evaluated_at=utc_now_iso(),
252
+ metrics=metrics,
253
+ items=item_reports,
254
+ )
255
+
256
+
257
+ def write_extraction_evaluation_result(
258
+ *, corpus: Corpus, run_id: str, result: ExtractionEvaluationResult
259
+ ) -> Path:
260
+ """
261
+ Persist extraction evaluation output under the corpus.
262
+
263
+ :param corpus: Corpus associated with the evaluation.
264
+ :type corpus: Corpus
265
+ :param run_id: Extraction run identifier.
266
+ :type run_id: str
267
+ :param result: Evaluation result to write.
268
+ :type result: ExtractionEvaluationResult
269
+ :return: Output path.
270
+ :rtype: Path
271
+ """
272
+ output_dir = corpus.runs_dir / "evaluation" / "extraction" / run_id
273
+ output_dir.mkdir(parents=True, exist_ok=True)
274
+ output_path = output_dir / "output.json"
275
+ output_path.write_text(result.model_dump_json(indent=2) + "\n", encoding="utf-8")
276
+ return output_path
277
+
278
+
279
+ def _resolve_item_id(
280
+ dataset_item: ExtractionEvaluationItem, *, catalog_items: Dict[str, CatalogItem]
281
+ ) -> str:
282
+ if dataset_item.item_id:
283
+ return dataset_item.item_id
284
+ source_uri = dataset_item.source_uri
285
+ if not source_uri:
286
+ raise ValueError("Evaluation item is missing item_id and source_uri")
287
+ for item_id, catalog_item in catalog_items.items():
288
+ if getattr(catalog_item, "source_uri", None) == source_uri:
289
+ return item_id
290
+ raise ValueError(f"Unknown source uniform resource identifier: {source_uri}")
291
+
292
+
293
+ def _coverage_status(extracted_text: Optional[str]) -> str:
294
+ if extracted_text is None:
295
+ return "missing"
296
+ if extracted_text.strip():
297
+ return "present"
298
+ return "empty"
299
+
300
+
301
+ def _normalize_text(text: str) -> str:
302
+ return " ".join(text.lower().split())
303
+
304
+
305
+ def _similarity_score(*, expected_text: str, extracted_text: Optional[str]) -> float:
306
+ if extracted_text is None:
307
+ return 0.0
308
+ expected = _normalize_text(expected_text)
309
+ actual = _normalize_text(extracted_text)
310
+ if not expected and not actual:
311
+ return 1.0
312
+ return SequenceMatcher(None, expected, actual).ratio()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: biblicus
3
- Version: 0.12.0
3
+ Version: 0.13.0
4
4
  Summary: Command line interface and Python library for corpus ingestion, retrieval, and evaluation.
5
5
  License: MIT
6
6
  Requires-Python: >=3.9
@@ -486,10 +486,11 @@ corpus/
486
486
 
487
487
  ## Retrieval backends
488
488
 
489
- Two backends are included.
489
+ Three backends are included.
490
490
 
491
491
  - `scan` is a minimal baseline that scans raw items directly.
492
492
  - `sqlite-full-text-search` is a practical baseline that builds a full text search index in SQLite.
493
+ - `vector` is a deterministic term-frequency vector baseline with cosine similarity scoring.
493
494
 
494
495
  For detailed documentation including configuration options, performance characteristics, and usage examples, see the [Backend Reference][backend-reference].
495
496
 
@@ -535,6 +536,9 @@ These extractors are built in. Optional ones require extra dependencies. See [te
535
536
 
536
537
  For detailed documentation on all extractors, see the [Extractor Reference][extractor-reference].
537
538
 
539
+ For extraction evaluation workflows, dataset formats, and report interpretation, see
540
+ `docs/EXTRACTION_EVALUATION.md`.
541
+
538
542
  ## Topic modeling analysis
539
543
 
540
544
  Biblicus can run analysis pipelines on extracted text without changing the raw corpus. Profiling and topic modeling
@@ -1,7 +1,7 @@
1
- biblicus/__init__.py,sha256=okAXmTSud_hQzaGEURDqX95I66SlcvTERCrWbUZA5ko,496
1
+ biblicus/__init__.py,sha256=pD55sYei6AGGLcN1AWnpUY6-zPIPq1WxOp-sexOOlT0,496
2
2
  biblicus/__main__.py,sha256=ipfkUoTlocVnrQDM69C7TeBqQxmHVeiWMRaT3G9rtnk,117
3
- biblicus/cli.py,sha256=bZV-ZxeWskRL4CFCGzyVpcaFC8KOb0xmxx3bnMqP-1I,36118
4
- biblicus/constants.py,sha256=-JaHI3Dngte2drawx93cGWxFVobbgIuaVhmjUJpf4GI,333
3
+ biblicus/cli.py,sha256=cMoirLFPhTwftNuqaadajCcRUEz_FBaLkupjVxpAxO8,38403
4
+ biblicus/constants.py,sha256=gAlEVJhxdFj-eWWJrlYbP7H1X3c5gwhrIBq9NQ1Vq_E,371
5
5
  biblicus/context.py,sha256=U7qkOwMdqNgYnqaC9hgQY0kv0R-6qcjV6bhXQl2WUkE,10215
6
6
  biblicus/corpus.py,sha256=qSDnYJXhWlF2p_BbFLl6xtI53lIIPxwyKLLGLC432Sg,55612
7
7
  biblicus/crawl.py,sha256=n8rXBMnziBK9vtKQQCXYOpBzqsPCswj2PzVJUb370KY,6250
@@ -9,6 +9,7 @@ biblicus/errors.py,sha256=uMajd5DvgnJ_-jq5sbeom1GV8DPUc-kojBaECFi6CsY,467
9
9
  biblicus/evaluation.py,sha256=5xWpb-8f49Osh9aHzo1ab3AXOmls3Imc5rdnEC0pN-8,8143
10
10
  biblicus/evidence_processing.py,sha256=sJe6T1nLxvU0xs9yMH8JZZS19zHXMR-Fpr5lWi5ndUM,6120
11
11
  biblicus/extraction.py,sha256=qvrsq6zSz2Kg-cap-18HPHC9pQlqEGo7pyID2uKCyBo,19760
12
+ biblicus/extraction_evaluation.py,sha256=cBC2B1nQCtXmOcVWUhHyO2NJRX8QSDuqhVjEc8PXrOA,10400
12
13
  biblicus/frontmatter.py,sha256=JOGjIDzbbOkebQw2RzA-3WDVMAMtJta2INjS4e7-LMg,2463
13
14
  biblicus/hook_logging.py,sha256=IMvde-JhVWrx9tNz3eDJ1CY_rr5Sj7DZ2YNomYCZbz0,5366
14
15
  biblicus/hook_manager.py,sha256=ZCAkE5wLvn4lnQz8jho_o0HGEC9KdQd9qitkAEUQRcw,6997
@@ -57,9 +58,9 @@ biblicus/extractors/select_override.py,sha256=gSpffFmn1ux9pGtFvHD5Uu_LO8TmmJC4L_
57
58
  biblicus/extractors/select_smart_override.py,sha256=-sLMnNoeXbCB3dO9zflQq324eHuLbd6hpveSwduXP-U,6763
58
59
  biblicus/extractors/select_text.py,sha256=w0ATmDy3tWWbOObzW87jGZuHbgXllUhotX5XyySLs-o,3395
59
60
  biblicus/extractors/unstructured_text.py,sha256=l2S_wD_htu7ZHoJQNQtP-kGlEgOeKV_w2IzAC93lePE,3564
60
- biblicus-0.12.0.dist-info/licenses/LICENSE,sha256=lw44GXFG_Q0fS8m5VoEvv_xtdBXK26pBcbSPUCXee_Q,1078
61
- biblicus-0.12.0.dist-info/METADATA,sha256=fhWcCcczfuLn2mZ_Moqe2zMKJ1-Q7KxZtR_x9YaiFO8,27765
62
- biblicus-0.12.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
63
- biblicus-0.12.0.dist-info/entry_points.txt,sha256=BZmO4H8Uz00fyi1RAFryOCGfZgX7eHWkY2NE-G54U5A,47
64
- biblicus-0.12.0.dist-info/top_level.txt,sha256=sUD_XVZwDxZ29-FBv1MknTGh4mgDXznGuP28KJY_WKc,9
65
- biblicus-0.12.0.dist-info/RECORD,,
61
+ biblicus-0.13.0.dist-info/licenses/LICENSE,sha256=lw44GXFG_Q0fS8m5VoEvv_xtdBXK26pBcbSPUCXee_Q,1078
62
+ biblicus-0.13.0.dist-info/METADATA,sha256=Ae0gttdvOggyE1vQVab4IOSmbx-JklxzvBZJ_3UyxIA,27979
63
+ biblicus-0.13.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
64
+ biblicus-0.13.0.dist-info/entry_points.txt,sha256=BZmO4H8Uz00fyi1RAFryOCGfZgX7eHWkY2NE-G54U5A,47
65
+ biblicus-0.13.0.dist-info/top_level.txt,sha256=sUD_XVZwDxZ29-FBv1MknTGh4mgDXznGuP28KJY_WKc,9
66
+ biblicus-0.13.0.dist-info/RECORD,,