biblicus 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biblicus/crawl.py ADDED
@@ -0,0 +1,186 @@
1
+ """
2
+ Website crawl utilities for Biblicus corpora.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from collections import deque
8
+ from html.parser import HTMLParser
9
+ from typing import Deque, List, Optional, Set
10
+ from urllib.parse import urldefrag, urljoin
11
+
12
+ from pydantic import BaseModel, ConfigDict, Field
13
+
14
+ from .ignore import load_corpus_ignore_spec
15
+ from .sources import load_source
16
+
17
+
18
+ class CrawlRequest(BaseModel):
19
+ """
20
+ Request describing a website crawl into a corpus.
21
+
22
+ :ivar root_url: Initial uniform resource locator to fetch.
23
+ :vartype root_url: str
24
+ :ivar allowed_prefix: Uniform resource locator prefix that limits which links are eligible for crawl.
25
+ :vartype allowed_prefix: str
26
+ :ivar max_items: Maximum number of items to store during the crawl.
27
+ :vartype max_items: int
28
+ :ivar tags: Tags to apply to stored items.
29
+ :vartype tags: list[str]
30
+ """
31
+
32
+ model_config = ConfigDict(extra="forbid")
33
+
34
+ root_url: str = Field(min_length=1)
35
+ allowed_prefix: str = Field(min_length=1)
36
+ max_items: int = Field(default=50, ge=1)
37
+ tags: List[str] = Field(default_factory=list)
38
+
39
+
40
+ class CrawlResult(BaseModel):
41
+ """
42
+ Summary result for a crawl execution.
43
+
44
+ :ivar crawl_id: Crawl identifier used in the corpus raw import namespace.
45
+ :vartype crawl_id: str
46
+ :ivar discovered_items: Total number of distinct uniform resource locators discovered.
47
+ :vartype discovered_items: int
48
+ :ivar fetched_items: Number of eligible items fetched over hypertext transfer protocol.
49
+ :vartype fetched_items: int
50
+ :ivar stored_items: Number of items stored into the corpus.
51
+ :vartype stored_items: int
52
+ :ivar skipped_outside_prefix_items: Number of discovered items outside the allowed prefix.
53
+ :vartype skipped_outside_prefix_items: int
54
+ :ivar skipped_ignored_items: Number of eligible items skipped due to corpus ignore rules.
55
+ :vartype skipped_ignored_items: int
56
+ :ivar errored_items: Number of eligible items that failed to fetch or store.
57
+ :vartype errored_items: int
58
+ """
59
+
60
+ model_config = ConfigDict(extra="forbid")
61
+
62
+ crawl_id: str
63
+ discovered_items: int = Field(default=0, ge=0)
64
+ fetched_items: int = Field(default=0, ge=0)
65
+ stored_items: int = Field(default=0, ge=0)
66
+ skipped_outside_prefix_items: int = Field(default=0, ge=0)
67
+ skipped_ignored_items: int = Field(default=0, ge=0)
68
+ errored_items: int = Field(default=0, ge=0)
69
+
70
+
71
+ class _LinkExtractor(HTMLParser):
72
+ def __init__(self) -> None:
73
+ super().__init__()
74
+ self.links: List[str] = []
75
+
76
+ def handle_starttag(self, tag: str, attrs): # type: ignore[no-untyped-def]
77
+ _ = tag
78
+ for key, value in attrs:
79
+ if key in {"href", "src"} and isinstance(value, str) and value.strip():
80
+ self.links.append(value.strip())
81
+
82
+
83
+ def _normalize_crawl_url(candidate: str, *, base_url: str) -> Optional[str]:
84
+ joined = urljoin(base_url, candidate)
85
+ joined, _fragment = urldefrag(joined)
86
+ joined = joined.strip()
87
+ if joined.startswith(("mailto:", "javascript:")):
88
+ return None
89
+ return joined
90
+
91
+
92
+ def _crawl_relative_path(url: str, *, allowed_prefix: str) -> str:
93
+ relative = url[len(allowed_prefix) :].lstrip("/")
94
+ if not relative or relative.endswith("/"):
95
+ relative = relative.rstrip("/") + "/index.html" if relative else "index.html"
96
+ return relative
97
+
98
+
99
+ def _should_parse_links(media_type: str) -> bool:
100
+ return media_type.startswith("text/html")
101
+
102
+
103
+ def _discover_links(html_text: str, *, base_url: str) -> List[str]:
104
+ parser = _LinkExtractor()
105
+ parser.feed(html_text)
106
+ discovered: List[str] = []
107
+ for raw in parser.links:
108
+ normalized = _normalize_crawl_url(raw, base_url=base_url)
109
+ if normalized is not None:
110
+ discovered.append(normalized)
111
+ return discovered
112
+
113
+
114
+ def crawl_into_corpus(*, corpus, request: CrawlRequest) -> CrawlResult: # type: ignore[no-untyped-def]
115
+ """
116
+ Crawl a website prefix into a corpus.
117
+
118
+ :param corpus: Target corpus to receive crawled items.
119
+ :type corpus: biblicus.corpus.Corpus
120
+ :param request: Crawl request describing limits and allowed prefix.
121
+ :type request: CrawlRequest
122
+ :return: Crawl result summary.
123
+ :rtype: CrawlResult
124
+ """
125
+ ignore_spec = load_corpus_ignore_spec(corpus.root)
126
+ allowed_prefix = request.allowed_prefix
127
+ root_url = request.root_url
128
+
129
+ crawl_id = corpus.create_crawl_id()
130
+
131
+ queue: Deque[str] = deque([root_url])
132
+ seen: Set[str] = set()
133
+ stored_count = 0
134
+ fetched_count = 0
135
+ skipped_outside_prefix_count = 0
136
+ skipped_ignored_count = 0
137
+ errored_count = 0
138
+ discovered_urls: Set[str] = set()
139
+
140
+ while queue and stored_count < request.max_items:
141
+ url = queue.popleft()
142
+ if url in seen:
143
+ continue
144
+ seen.add(url)
145
+ discovered_urls.add(url)
146
+
147
+ if not url.startswith(allowed_prefix):
148
+ skipped_outside_prefix_count += 1
149
+ continue
150
+
151
+ relative_path = _crawl_relative_path(url, allowed_prefix=allowed_prefix)
152
+ if ignore_spec.matches(relative_path):
153
+ skipped_ignored_count += 1
154
+ continue
155
+
156
+ try:
157
+ payload = load_source(url)
158
+ fetched_count += 1
159
+ corpus.ingest_crawled_payload(
160
+ crawl_id=crawl_id,
161
+ relative_path=relative_path,
162
+ data=payload.data,
163
+ filename=payload.filename,
164
+ media_type=payload.media_type,
165
+ source_uri=payload.source_uri,
166
+ tags=request.tags,
167
+ )
168
+ stored_count += 1
169
+ except Exception:
170
+ errored_count += 1
171
+ continue
172
+
173
+ if _should_parse_links(payload.media_type):
174
+ text = payload.data.decode("utf-8", errors="replace")
175
+ for discovered in _discover_links(text, base_url=url):
176
+ queue.append(discovered)
177
+
178
+ return CrawlResult(
179
+ crawl_id=crawl_id,
180
+ discovered_items=len(discovered_urls),
181
+ fetched_items=fetched_count,
182
+ stored_items=stored_count,
183
+ skipped_outside_prefix_items=skipped_outside_prefix_count,
184
+ skipped_ignored_items=skipped_ignored_count,
185
+ errored_items=errored_count,
186
+ )
@@ -0,0 +1,201 @@
1
+ """
2
+ Evidence processing stages for Biblicus.
3
+
4
+ Retrieval backends return ranked evidence. Additional stages can be applied without changing the
5
+ backend implementation:
6
+
7
+ - Rerank: reorder evidence.
8
+ - Filter: remove evidence.
9
+
10
+ These stages are explicit so they can be configured, tested, and evaluated independently from the
11
+ retrieval backend.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from abc import ABC, abstractmethod
17
+ from typing import Any, Dict, List
18
+
19
+ from pydantic import BaseModel, ConfigDict, Field
20
+
21
+ from .models import Evidence
22
+
23
+
24
+ class EvidenceReranker(ABC):
25
+ """
26
+ Evidence reranker interface.
27
+
28
+ :param reranker_id: Stable identifier for this reranker implementation.
29
+ :type reranker_id: str
30
+ """
31
+
32
+ reranker_id: str
33
+
34
+ @abstractmethod
35
+ def rerank(self, *, query_text: str, evidence: List[Evidence]) -> List[Evidence]:
36
+ """
37
+ Reorder evidence for the given query.
38
+
39
+ :param query_text: Query text associated with the evidence.
40
+ :type query_text: str
41
+ :param evidence: Evidence objects to rerank.
42
+ :type evidence: list[Evidence]
43
+ :return: Reranked evidence list.
44
+ :rtype: list[Evidence]
45
+ """
46
+
47
+
48
+ class EvidenceFilter(ABC):
49
+ """
50
+ Evidence filter interface.
51
+
52
+ :param filter_id: Stable identifier for this filter implementation.
53
+ :type filter_id: str
54
+ """
55
+
56
+ filter_id: str
57
+
58
+ @abstractmethod
59
+ def filter(
60
+ self, *, query_text: str, evidence: List[Evidence], config: Dict[str, Any]
61
+ ) -> List[Evidence]:
62
+ """
63
+ Filter evidence for the given query.
64
+
65
+ :param query_text: Query text associated with the evidence.
66
+ :type query_text: str
67
+ :param evidence: Evidence objects to filter.
68
+ :type evidence: list[Evidence]
69
+ :param config: Filter-specific configuration values.
70
+ :type config: dict[str, Any]
71
+ :return: Filtered evidence list.
72
+ :rtype: list[Evidence]
73
+ """
74
+
75
+
76
+ class EvidenceRerankLongestText(EvidenceReranker):
77
+ """
78
+ Reranker that prioritizes evidence with longer text.
79
+
80
+ This is a deterministic policy that is useful when a downstream context pack is limited by a
81
+ character or token budget and longer evidence is preferred.
82
+
83
+ :ivar reranker_id: Stable reranker identifier.
84
+ :vartype reranker_id: str
85
+ """
86
+
87
+ reranker_id = "rerank-longest-text"
88
+
89
+ def rerank(self, *, query_text: str, evidence: List[Evidence]) -> List[Evidence]:
90
+ """
91
+ Reorder evidence by descending text length.
92
+
93
+ :param query_text: Query text associated with the evidence.
94
+ :type query_text: str
95
+ :param evidence: Evidence objects to rerank.
96
+ :type evidence: list[Evidence]
97
+ :return: Evidence list ordered by text length.
98
+ :rtype: list[Evidence]
99
+ """
100
+ return sorted(
101
+ evidence,
102
+ key=lambda evidence_item: (-len((evidence_item.text or "").strip()), evidence_item.item_id),
103
+ )
104
+
105
+
106
+ class EvidenceFilterMinimumScoreConfig(BaseModel):
107
+ """
108
+ Configuration for the minimum score evidence filter.
109
+
110
+ :ivar minimum_score: Evidence with score below this threshold is removed.
111
+ :vartype minimum_score: float
112
+ """
113
+
114
+ model_config = ConfigDict(extra="forbid")
115
+
116
+ minimum_score: float = Field(ge=0.0)
117
+
118
+
119
+ class EvidenceFilterMinimumScore(EvidenceFilter):
120
+ """
121
+ Filter that removes evidence below a minimum score threshold.
122
+
123
+ :ivar filter_id: Stable filter identifier.
124
+ :vartype filter_id: str
125
+ """
126
+
127
+ filter_id = "filter-minimum-score"
128
+
129
+ def filter(
130
+ self, *, query_text: str, evidence: List[Evidence], config: Dict[str, Any]
131
+ ) -> List[Evidence]:
132
+ """
133
+ Filter evidence by score threshold.
134
+
135
+ :param query_text: Query text associated with the evidence.
136
+ :type query_text: str
137
+ :param evidence: Evidence objects to filter.
138
+ :type evidence: list[Evidence]
139
+ :param config: Filter configuration values.
140
+ :type config: dict[str, Any]
141
+ :return: Evidence list with low-score items removed.
142
+ :rtype: list[Evidence]
143
+ """
144
+ parsed_config = EvidenceFilterMinimumScoreConfig.model_validate(config)
145
+ return [
146
+ evidence_item
147
+ for evidence_item in evidence
148
+ if float(evidence_item.score) >= parsed_config.minimum_score
149
+ ]
150
+
151
+
152
+ _EVIDENCE_RERANKERS: Dict[str, EvidenceReranker] = {
153
+ EvidenceRerankLongestText.reranker_id: EvidenceRerankLongestText(),
154
+ }
155
+
156
+ _EVIDENCE_FILTERS: Dict[str, EvidenceFilter] = {
157
+ EvidenceFilterMinimumScore.filter_id: EvidenceFilterMinimumScore(),
158
+ }
159
+
160
+
161
+ def apply_evidence_reranker(
162
+ *, reranker_id: str, query_text: str, evidence: List[Evidence]
163
+ ) -> List[Evidence]:
164
+ """
165
+ Apply a reranker to evidence by identifier.
166
+
167
+ :param reranker_id: Reranker identifier.
168
+ :type reranker_id: str
169
+ :param query_text: Query text associated with the evidence.
170
+ :type query_text: str
171
+ :param evidence: Evidence objects to rerank.
172
+ :type evidence: list[Evidence]
173
+ :return: Reranked evidence list.
174
+ :rtype: list[Evidence]
175
+ :raises KeyError: If the reranker identifier is unknown.
176
+ """
177
+ reranker = _EVIDENCE_RERANKERS[reranker_id]
178
+ return reranker.rerank(query_text=query_text, evidence=evidence)
179
+
180
+
181
+ def apply_evidence_filter(
182
+ *, filter_id: str, query_text: str, evidence: List[Evidence], config: Dict[str, Any]
183
+ ) -> List[Evidence]:
184
+ """
185
+ Apply a filter to evidence by identifier.
186
+
187
+ :param filter_id: Filter identifier.
188
+ :type filter_id: str
189
+ :param query_text: Query text associated with the evidence.
190
+ :type query_text: str
191
+ :param evidence: Evidence objects to filter.
192
+ :type evidence: list[Evidence]
193
+ :param config: Filter-specific configuration values.
194
+ :type config: dict[str, Any]
195
+ :return: Filtered evidence list.
196
+ :rtype: list[Evidence]
197
+ :raises KeyError: If the filter identifier is unknown.
198
+ """
199
+ evidence_filter = _EVIDENCE_FILTERS[filter_id]
200
+ return evidence_filter.filter(query_text=query_text, evidence=evidence, config=config)
201
+
biblicus/extraction.py CHANGED
@@ -7,7 +7,6 @@ from __future__ import annotations
7
7
  import json
8
8
  from pathlib import Path
9
9
  from typing import Any, Dict, List, Optional, Tuple
10
- from uuid import uuid4
11
10
 
12
11
  from pydantic import BaseModel, ConfigDict, Field
13
12
 
@@ -196,8 +195,9 @@ def create_extraction_run_manifest(
196
195
  :rtype: ExtractionRunManifest
197
196
  """
198
197
  catalog = corpus.load_catalog()
198
+ run_id = hash_text(f"{recipe.recipe_id}:{catalog.generated_at}")
199
199
  return ExtractionRunManifest(
200
- run_id=str(uuid4()),
200
+ run_id=run_id,
201
201
  recipe=recipe,
202
202
  corpus_uri=corpus.uri,
203
203
  catalog_generated_at=catalog.generated_at,
@@ -341,6 +341,8 @@ def build_extraction_run(
341
341
  )
342
342
  manifest = create_extraction_run_manifest(corpus, recipe=recipe)
343
343
  run_dir = corpus.extraction_run_dir(extractor_id=extractor_id, run_id=manifest.run_id)
344
+ if run_dir.exists():
345
+ return corpus.load_extraction_run_manifest(extractor_id=extractor_id, run_id=manifest.run_id)
344
346
  run_dir.mkdir(parents=True, exist_ok=False)
345
347
 
346
348
  catalog = corpus.load_catalog()
biblicus/models.py CHANGED
@@ -189,6 +189,37 @@ def parse_extraction_run_reference(value: str) -> ExtractionRunReference:
189
189
  return ExtractionRunReference(extractor_id=extractor_id, run_id=run_id)
190
190
 
191
191
 
192
+ class ExtractionRunListEntry(BaseModel):
193
+ """
194
+ Summary entry for an extraction run stored in a corpus.
195
+
196
+ :ivar extractor_id: Extractor plugin identifier.
197
+ :vartype extractor_id: str
198
+ :ivar run_id: Extraction run identifier.
199
+ :vartype run_id: str
200
+ :ivar recipe_id: Deterministic recipe identifier.
201
+ :vartype recipe_id: str
202
+ :ivar recipe_name: Human-readable recipe name.
203
+ :vartype recipe_name: str
204
+ :ivar catalog_generated_at: Catalog timestamp used for the run.
205
+ :vartype catalog_generated_at: str
206
+ :ivar created_at: International Organization for Standardization 8601 timestamp for run creation.
207
+ :vartype created_at: str
208
+ :ivar stats: Run statistics.
209
+ :vartype stats: dict[str, object]
210
+ """
211
+
212
+ model_config = ConfigDict(extra="forbid")
213
+
214
+ extractor_id: str = Field(min_length=1)
215
+ run_id: str = Field(min_length=1)
216
+ recipe_id: str = Field(min_length=1)
217
+ recipe_name: str = Field(min_length=1)
218
+ catalog_generated_at: str = Field(min_length=1)
219
+ created_at: str = Field(min_length=1)
220
+ stats: Dict[str, object] = Field(default_factory=dict)
221
+
222
+
192
223
  class QueryBudget(BaseModel):
193
224
  """
194
225
  Evidence selection budget for retrieval.
biblicus/time.py CHANGED
@@ -14,4 +14,4 @@ def utc_now_iso() -> str:
14
14
  :return: Current Coordinated Universal Time timestamp in International Organization for Standardization 8601 format.
15
15
  :rtype: str
16
16
  """
17
- return datetime.now(timezone.utc).replace(microsecond=0).isoformat()
17
+ return datetime.now(timezone.utc).isoformat(timespec="microseconds")