biblicus 0.9.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biblicus/__init__.py CHANGED
@@ -27,4 +27,4 @@ __all__ = [
27
27
  "RetrievalRun",
28
28
  ]
29
29
 
30
- __version__ = "0.9.0"
30
+ __version__ = "0.11.0"
@@ -11,4 +11,3 @@ from .interpolation import interpolate_env_vars
11
11
  from .loader import ConfigLoader, load_config
12
12
 
13
13
  __all__ = ["ConfigLoader", "interpolate_env_vars", "load_config"]
14
-
@@ -60,4 +60,3 @@ def _interpolate_string(text: str) -> str:
60
60
  raise ValueError(f"Required environment variable '{env_var}' not found")
61
61
 
62
62
  return re.sub(pattern, replace_match, text)
63
-
@@ -178,4 +178,3 @@ class ConfigLoader:
178
178
  for key, value in flat_config.items():
179
179
  if override or key not in os.environ:
180
180
  os.environ[key] = value
181
-
@@ -132,4 +132,3 @@ def convert_string_to_value(value: str) -> Any:
132
132
  return json.loads(value)
133
133
  except (json.JSONDecodeError, ValueError):
134
134
  return value
135
-
@@ -7,6 +7,7 @@ from __future__ import annotations
7
7
  from typing import Dict, Type
8
8
 
9
9
  from .base import CorpusAnalysisBackend
10
+ from .profiling import ProfilingBackend
10
11
  from .topic_modeling import TopicModelingBackend
11
12
 
12
13
 
@@ -18,6 +19,7 @@ def available_analysis_backends() -> Dict[str, Type[CorpusAnalysisBackend]]:
18
19
  :rtype: dict[str, Type[CorpusAnalysisBackend]]
19
20
  """
20
21
  return {
22
+ ProfilingBackend.analysis_id: ProfilingBackend,
21
23
  TopicModelingBackend.analysis_id: TopicModelingBackend,
22
24
  }
23
25
 
@@ -84,6 +84,221 @@ class AnalysisRunManifest(AnalysisSchemaModel):
84
84
  stats: Dict[str, Any] = Field(default_factory=dict)
85
85
 
86
86
 
87
+ class ProfilingRecipeConfig(AnalysisSchemaModel):
88
+ """
89
+ Recipe configuration for profiling analysis.
90
+
91
+ :ivar schema_version: Analysis schema version.
92
+ :vartype schema_version: int
93
+ :ivar sample_size: Optional sample size for distribution metrics.
94
+ :vartype sample_size: int or None
95
+ :ivar min_text_characters: Optional minimum character count for extracted text inclusion.
96
+ :vartype min_text_characters: int or None
97
+ :ivar percentiles: Percentiles to compute for distributions.
98
+ :vartype percentiles: list[int]
99
+ :ivar top_tag_count: Maximum number of tags to include in top tag output.
100
+ :vartype top_tag_count: int
101
+ :ivar tag_filters: Optional tag filters to limit tag coverage metrics.
102
+ :vartype tag_filters: list[str] or None
103
+ """
104
+
105
+ schema_version: int = Field(default=ANALYSIS_SCHEMA_VERSION, ge=1)
106
+ sample_size: Optional[int] = Field(default=None, ge=1)
107
+ min_text_characters: Optional[int] = Field(default=None, ge=1)
108
+ percentiles: List[int] = Field(default_factory=lambda: [50, 90, 99])
109
+ top_tag_count: int = Field(default=10, ge=1)
110
+ tag_filters: Optional[List[str]] = None
111
+
112
+ @model_validator(mode="after")
113
+ def _validate_schema_version(self) -> "ProfilingRecipeConfig":
114
+ if self.schema_version != ANALYSIS_SCHEMA_VERSION:
115
+ raise ValueError(f"Unsupported analysis schema version: {self.schema_version}")
116
+ return self
117
+
118
+ @field_validator("percentiles", mode="after")
119
+ @classmethod
120
+ def _validate_percentiles(cls, value: List[int]) -> List[int]:
121
+ if not value:
122
+ raise ValueError("percentiles must include at least one value")
123
+ if any(percentile < 1 or percentile > 100 for percentile in value):
124
+ raise ValueError("percentiles must be between 1 and 100")
125
+ if value != sorted(value):
126
+ raise ValueError("percentiles must be sorted in ascending order")
127
+ return value
128
+
129
+ @field_validator("tag_filters", mode="before")
130
+ @classmethod
131
+ def _validate_tag_filters(cls, value: object) -> object:
132
+ if value is None:
133
+ return None
134
+ if not isinstance(value, list):
135
+ raise ValueError("tag_filters must be a list of strings")
136
+ cleaned = [str(tag).strip() for tag in value]
137
+ if not cleaned or any(not tag for tag in cleaned):
138
+ raise ValueError("tag_filters must be a list of non-empty strings")
139
+ return cleaned
140
+
141
+
142
+ class ProfilingPercentileValue(AnalysisSchemaModel):
143
+ """
144
+ Percentile entry for a distribution.
145
+
146
+ :ivar percentile: Percentile value between 1 and 100.
147
+ :vartype percentile: int
148
+ :ivar value: Percentile value.
149
+ :vartype value: float
150
+ """
151
+
152
+ percentile: int = Field(ge=1, le=100)
153
+ value: float
154
+
155
+
156
+ class ProfilingDistributionReport(AnalysisSchemaModel):
157
+ """
158
+ Distribution summary for numeric values.
159
+
160
+ :ivar count: Count of values included.
161
+ :vartype count: int
162
+ :ivar min_value: Minimum value observed.
163
+ :vartype min_value: float
164
+ :ivar max_value: Maximum value observed.
165
+ :vartype max_value: float
166
+ :ivar mean_value: Mean value observed.
167
+ :vartype mean_value: float
168
+ :ivar percentiles: Percentile values.
169
+ :vartype percentiles: list[ProfilingPercentileValue]
170
+ """
171
+
172
+ count: int = Field(ge=0)
173
+ min_value: float
174
+ max_value: float
175
+ mean_value: float
176
+ percentiles: List[ProfilingPercentileValue] = Field(default_factory=list)
177
+
178
+
179
+ class ProfilingTagCount(AnalysisSchemaModel):
180
+ """
181
+ Tag count entry for profiling output.
182
+
183
+ :ivar tag: Tag name.
184
+ :vartype tag: str
185
+ :ivar count: Number of items with this tag.
186
+ :vartype count: int
187
+ """
188
+
189
+ tag: str
190
+ count: int = Field(ge=0)
191
+
192
+
193
+ class ProfilingTagReport(AnalysisSchemaModel):
194
+ """
195
+ Tag coverage summary for raw items.
196
+
197
+ :ivar tagged_items: Count of items with tags.
198
+ :vartype tagged_items: int
199
+ :ivar untagged_items: Count of items without tags.
200
+ :vartype untagged_items: int
201
+ :ivar total_unique_tags: Count of unique tags.
202
+ :vartype total_unique_tags: int
203
+ :ivar top_tags: Most frequent tags.
204
+ :vartype top_tags: list[ProfilingTagCount]
205
+ :ivar tag_filters: Optional tag filters applied.
206
+ :vartype tag_filters: list[str] or None
207
+ """
208
+
209
+ tagged_items: int = Field(ge=0)
210
+ untagged_items: int = Field(ge=0)
211
+ total_unique_tags: int = Field(ge=0)
212
+ top_tags: List[ProfilingTagCount] = Field(default_factory=list)
213
+ tag_filters: Optional[List[str]] = None
214
+
215
+
216
+ class ProfilingRawItemsReport(AnalysisSchemaModel):
217
+ """
218
+ Summary of raw corpus items.
219
+
220
+ :ivar total_items: Total number of catalog items.
221
+ :vartype total_items: int
222
+ :ivar media_type_counts: Count of items per media type.
223
+ :vartype media_type_counts: dict[str, int]
224
+ :ivar bytes_distribution: Distribution of raw item sizes in bytes.
225
+ :vartype bytes_distribution: ProfilingDistributionReport
226
+ :ivar tags: Tag coverage summary.
227
+ :vartype tags: ProfilingTagReport
228
+ """
229
+
230
+ total_items: int = Field(ge=0)
231
+ media_type_counts: Dict[str, int] = Field(default_factory=dict)
232
+ bytes_distribution: ProfilingDistributionReport
233
+ tags: ProfilingTagReport
234
+
235
+
236
+ class ProfilingExtractedTextReport(AnalysisSchemaModel):
237
+ """
238
+ Summary of extracted text coverage.
239
+
240
+ :ivar source_items: Count of source items in the extraction run.
241
+ :vartype source_items: int
242
+ :ivar extracted_nonempty_items: Count of extracted items with non-empty text.
243
+ :vartype extracted_nonempty_items: int
244
+ :ivar extracted_empty_items: Count of extracted items with empty text.
245
+ :vartype extracted_empty_items: int
246
+ :ivar extracted_missing_items: Count of items with no extracted text artifact.
247
+ :vartype extracted_missing_items: int
248
+ :ivar characters_distribution: Distribution of extracted text lengths.
249
+ :vartype characters_distribution: ProfilingDistributionReport
250
+ """
251
+
252
+ source_items: int = Field(ge=0)
253
+ extracted_nonempty_items: int = Field(ge=0)
254
+ extracted_empty_items: int = Field(ge=0)
255
+ extracted_missing_items: int = Field(ge=0)
256
+ characters_distribution: ProfilingDistributionReport
257
+
258
+
259
+ class ProfilingReport(AnalysisSchemaModel):
260
+ """
261
+ Report for profiling analysis.
262
+
263
+ :ivar raw_items: Raw corpus item summary.
264
+ :vartype raw_items: ProfilingRawItemsReport
265
+ :ivar extracted_text: Extracted text coverage summary.
266
+ :vartype extracted_text: ProfilingExtractedTextReport
267
+ :ivar warnings: Warning messages.
268
+ :vartype warnings: list[str]
269
+ :ivar errors: Error messages.
270
+ :vartype errors: list[str]
271
+ """
272
+
273
+ raw_items: ProfilingRawItemsReport
274
+ extracted_text: ProfilingExtractedTextReport
275
+ warnings: List[str] = Field(default_factory=list)
276
+ errors: List[str] = Field(default_factory=list)
277
+
278
+
279
+ class ProfilingOutput(AnalysisSchemaModel):
280
+ """
281
+ Output bundle for profiling analysis.
282
+
283
+ :ivar schema_version: Analysis schema version.
284
+ :vartype schema_version: int
285
+ :ivar analysis_id: Analysis backend identifier.
286
+ :vartype analysis_id: str
287
+ :ivar generated_at: International Organization for Standardization 8601 timestamp for output creation.
288
+ :vartype generated_at: str
289
+ :ivar run: Analysis run manifest.
290
+ :vartype run: AnalysisRunManifest
291
+ :ivar report: Profiling report data.
292
+ :vartype report: ProfilingReport
293
+ """
294
+
295
+ schema_version: int = Field(default=ANALYSIS_SCHEMA_VERSION, ge=1)
296
+ analysis_id: str
297
+ generated_at: str
298
+ run: AnalysisRunManifest
299
+ report: ProfilingReport
300
+
301
+
87
302
  class TopicModelingTextSourceConfig(AnalysisSchemaModel):
88
303
  """
89
304
  Configuration for text collection within topic modeling.
@@ -124,7 +339,9 @@ class TopicModelingLlmExtractionConfig(AnalysisSchemaModel):
124
339
  """
125
340
 
126
341
  enabled: bool = Field(default=False)
127
- method: TopicModelingLlmExtractionMethod = Field(default=TopicModelingLlmExtractionMethod.SINGLE)
342
+ method: TopicModelingLlmExtractionMethod = Field(
343
+ default=TopicModelingLlmExtractionMethod.SINGLE
344
+ )
128
345
  client: Optional[LlmClientConfig] = None
129
346
  prompt_template: Optional[str] = None
130
347
  system_prompt: Optional[str] = None
@@ -136,7 +353,9 @@ class TopicModelingLlmExtractionConfig(AnalysisSchemaModel):
136
353
  return value
137
354
  if isinstance(value, str):
138
355
  return TopicModelingLlmExtractionMethod(value)
139
- raise ValueError("llm_extraction.method must be a string or TopicModelingLlmExtractionMethod")
356
+ raise ValueError(
357
+ "llm_extraction.method must be a string or TopicModelingLlmExtractionMethod"
358
+ )
140
359
 
141
360
  @model_validator(mode="after")
142
361
  def _validate_requirements(self) -> "TopicModelingLlmExtractionConfig":
@@ -188,7 +407,9 @@ class TopicModelingVectorizerConfig(AnalysisSchemaModel):
188
407
  def _validate_ngram_range(self) -> "TopicModelingVectorizerConfig":
189
408
  start, end = self.ngram_range
190
409
  if start < 1 or end < start:
191
- raise ValueError("vectorizer.ngram_range must include two integers with start >= 1 and end >= start")
410
+ raise ValueError(
411
+ "vectorizer.ngram_range must include two integers with start >= 1 and end >= start"
412
+ )
192
413
  return self
193
414
 
194
415
  @field_validator("stop_words", mode="before")
@@ -201,7 +422,7 @@ class TopicModelingVectorizerConfig(AnalysisSchemaModel):
201
422
  raise ValueError("vectorizer.stop_words must be 'english' or a list of strings")
202
423
  return value
203
424
  if isinstance(value, list):
204
- if not all(isinstance(entry, str) and entry for entry in value):
425
+ if not value or not all(isinstance(entry, str) and entry for entry in value):
205
426
  raise ValueError("vectorizer.stop_words must be 'english' or a list of strings")
206
427
  return value
207
428
  raise ValueError("vectorizer.stop_words must be 'english' or a list of strings")
@@ -280,7 +501,9 @@ class TopicModelingRecipeConfig(AnalysisSchemaModel):
280
501
  """
281
502
 
282
503
  schema_version: int = Field(default=ANALYSIS_SCHEMA_VERSION, ge=1)
283
- text_source: TopicModelingTextSourceConfig = Field(default_factory=TopicModelingTextSourceConfig)
504
+ text_source: TopicModelingTextSourceConfig = Field(
505
+ default_factory=TopicModelingTextSourceConfig
506
+ )
284
507
  llm_extraction: TopicModelingLlmExtractionConfig = Field(
285
508
  default_factory=TopicModelingLlmExtractionConfig
286
509
  )
@@ -0,0 +1,337 @@
1
+ """
2
+ Profiling analysis backend for Biblicus.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ import math
9
+ from pathlib import Path
10
+ from typing import Dict, Iterable, List, Sequence
11
+
12
+ from pydantic import BaseModel
13
+
14
+ from ..corpus import Corpus
15
+ from ..models import CatalogItem, ExtractionRunReference
16
+ from ..retrieval import hash_text
17
+ from ..time import utc_now_iso
18
+ from .base import CorpusAnalysisBackend
19
+ from .models import (
20
+ AnalysisRecipeManifest,
21
+ AnalysisRunInput,
22
+ AnalysisRunManifest,
23
+ ProfilingDistributionReport,
24
+ ProfilingExtractedTextReport,
25
+ ProfilingOutput,
26
+ ProfilingPercentileValue,
27
+ ProfilingRawItemsReport,
28
+ ProfilingRecipeConfig,
29
+ ProfilingReport,
30
+ ProfilingTagCount,
31
+ ProfilingTagReport,
32
+ )
33
+
34
+
35
+ class ProfilingBackend(CorpusAnalysisBackend):
36
+ """
37
+ Profiling analysis backend for corpus composition and coverage.
38
+
39
+ :ivar analysis_id: Backend identifier.
40
+ :vartype analysis_id: str
41
+ """
42
+
43
+ analysis_id = "profiling"
44
+
45
+ def run_analysis(
46
+ self,
47
+ corpus: Corpus,
48
+ *,
49
+ recipe_name: str,
50
+ config: Dict[str, object],
51
+ extraction_run: ExtractionRunReference,
52
+ ) -> BaseModel:
53
+ """
54
+ Run the profiling analysis pipeline.
55
+
56
+ :param corpus: Corpus to analyze.
57
+ :type corpus: Corpus
58
+ :param recipe_name: Human-readable recipe name.
59
+ :type recipe_name: str
60
+ :param config: Analysis configuration values.
61
+ :type config: dict[str, object]
62
+ :param extraction_run: Extraction run reference for text inputs.
63
+ :type extraction_run: biblicus.models.ExtractionRunReference
64
+ :return: Profiling output model.
65
+ :rtype: pydantic.BaseModel
66
+ """
67
+ parsed_config = (
68
+ config
69
+ if isinstance(config, ProfilingRecipeConfig)
70
+ else ProfilingRecipeConfig.model_validate(config)
71
+ )
72
+ return _run_profiling(
73
+ corpus=corpus,
74
+ recipe_name=recipe_name,
75
+ config=parsed_config,
76
+ extraction_run=extraction_run,
77
+ )
78
+
79
+
80
+ def _run_profiling(
81
+ *,
82
+ corpus: Corpus,
83
+ recipe_name: str,
84
+ config: ProfilingRecipeConfig,
85
+ extraction_run: ExtractionRunReference,
86
+ ) -> ProfilingOutput:
87
+ recipe = _create_recipe_manifest(name=recipe_name, config=config)
88
+ catalog = corpus.load_catalog()
89
+ run_id = _analysis_run_id(
90
+ recipe_id=recipe.recipe_id,
91
+ extraction_run=extraction_run,
92
+ catalog_generated_at=catalog.generated_at,
93
+ )
94
+ run_manifest = AnalysisRunManifest(
95
+ run_id=run_id,
96
+ recipe=recipe,
97
+ corpus_uri=catalog.corpus_uri,
98
+ catalog_generated_at=catalog.generated_at,
99
+ created_at=utc_now_iso(),
100
+ input=AnalysisRunInput(extraction_run=extraction_run),
101
+ artifact_paths=[],
102
+ stats={},
103
+ )
104
+ run_dir = corpus.analysis_run_dir(analysis_id=ProfilingBackend.analysis_id, run_id=run_id)
105
+ output_path = run_dir / "output.json"
106
+ run_dir.mkdir(parents=True, exist_ok=True)
107
+
108
+ ordered_items = _ordered_catalog_items(catalog.items, catalog.order)
109
+ raw_report = _build_raw_items_report(items=ordered_items, config=config)
110
+ extracted_report = _build_extracted_text_report(
111
+ corpus=corpus,
112
+ extraction_run=extraction_run,
113
+ config=config,
114
+ )
115
+
116
+ report = ProfilingReport(
117
+ raw_items=raw_report,
118
+ extracted_text=extracted_report,
119
+ warnings=[],
120
+ errors=[],
121
+ )
122
+
123
+ run_stats = {
124
+ "raw_items": raw_report.total_items,
125
+ "extracted_nonempty_items": extracted_report.extracted_nonempty_items,
126
+ "extracted_missing_items": extracted_report.extracted_missing_items,
127
+ }
128
+ run_manifest = run_manifest.model_copy(
129
+ update={"artifact_paths": ["output.json"], "stats": run_stats}
130
+ )
131
+ _write_analysis_run_manifest(run_dir=run_dir, manifest=run_manifest)
132
+
133
+ output = ProfilingOutput(
134
+ analysis_id=ProfilingBackend.analysis_id,
135
+ generated_at=utc_now_iso(),
136
+ run=run_manifest,
137
+ report=report,
138
+ )
139
+ _write_profiling_output(path=output_path, output=output)
140
+ return output
141
+
142
+
143
+ def _create_recipe_manifest(*, name: str, config: ProfilingRecipeConfig) -> AnalysisRecipeManifest:
144
+ recipe_payload = json.dumps(
145
+ {
146
+ "analysis_id": ProfilingBackend.analysis_id,
147
+ "name": name,
148
+ "config": config.model_dump(),
149
+ },
150
+ sort_keys=True,
151
+ )
152
+ recipe_id = hash_text(recipe_payload)
153
+ return AnalysisRecipeManifest(
154
+ recipe_id=recipe_id,
155
+ analysis_id=ProfilingBackend.analysis_id,
156
+ name=name,
157
+ created_at=utc_now_iso(),
158
+ config=config.model_dump(),
159
+ )
160
+
161
+
162
+ def _analysis_run_id(
163
+ *, recipe_id: str, extraction_run: ExtractionRunReference, catalog_generated_at: str
164
+ ) -> str:
165
+ run_seed = f"{recipe_id}:{extraction_run.as_string()}:{catalog_generated_at}"
166
+ return hash_text(run_seed)
167
+
168
+
169
+ def _ordered_catalog_items(
170
+ items: Dict[str, CatalogItem],
171
+ order: Sequence[str],
172
+ ) -> List[CatalogItem]:
173
+ ordered: List[CatalogItem] = []
174
+ seen = set()
175
+ for item_id in order:
176
+ item = items.get(item_id)
177
+ if item is None:
178
+ continue
179
+ ordered.append(item)
180
+ seen.add(item_id)
181
+ for item_id in sorted(items):
182
+ if item_id in seen:
183
+ continue
184
+ ordered.append(items[item_id])
185
+ return ordered
186
+
187
+
188
+ def _build_raw_items_report(
189
+ *, items: Sequence[CatalogItem], config: ProfilingRecipeConfig
190
+ ) -> ProfilingRawItemsReport:
191
+ media_type_counts: Dict[str, int] = {}
192
+ for item in items:
193
+ media_type_counts[item.media_type] = media_type_counts.get(item.media_type, 0) + 1
194
+
195
+ bytes_values = [item.bytes for item in _apply_sample(items, config.sample_size)]
196
+ bytes_distribution = _build_distribution(bytes_values, config.percentiles)
197
+ tag_report = _build_tag_report(items=items, config=config)
198
+
199
+ return ProfilingRawItemsReport(
200
+ total_items=len(items),
201
+ media_type_counts=media_type_counts,
202
+ bytes_distribution=bytes_distribution,
203
+ tags=tag_report,
204
+ )
205
+
206
+
207
+ def _build_tag_report(
208
+ *, items: Sequence[CatalogItem], config: ProfilingRecipeConfig
209
+ ) -> ProfilingTagReport:
210
+ tag_filters = config.tag_filters
211
+ tag_filter_set = set(tag_filters or [])
212
+ tag_counts: Dict[str, int] = {}
213
+ tagged_items = 0
214
+
215
+ for item in items:
216
+ tags = list(item.tags)
217
+ if tag_filters is not None:
218
+ tags = [tag for tag in tags if tag in tag_filter_set]
219
+ if tags:
220
+ tagged_items += 1
221
+ for tag in tags:
222
+ tag_counts[tag] = tag_counts.get(tag, 0) + 1
223
+
224
+ untagged_items = len(items) - tagged_items
225
+ top_tags = sorted(tag_counts.items(), key=lambda entry: (-entry[1], entry[0]))
226
+ top_tags = top_tags[: config.top_tag_count]
227
+ return ProfilingTagReport(
228
+ tagged_items=tagged_items,
229
+ untagged_items=untagged_items,
230
+ total_unique_tags=len(tag_counts),
231
+ top_tags=[ProfilingTagCount(tag=tag, count=count) for tag, count in top_tags],
232
+ tag_filters=tag_filters,
233
+ )
234
+
235
+
236
+ def _build_extracted_text_report(
237
+ *,
238
+ corpus: Corpus,
239
+ extraction_run: ExtractionRunReference,
240
+ config: ProfilingRecipeConfig,
241
+ ) -> ProfilingExtractedTextReport:
242
+ manifest = corpus.load_extraction_run_manifest(
243
+ extractor_id=extraction_run.extractor_id,
244
+ run_id=extraction_run.run_id,
245
+ )
246
+ nonempty_items = 0
247
+ empty_items = 0
248
+ missing_items = 0
249
+ text_lengths: List[int] = []
250
+ text_dir = corpus.extraction_run_dir(
251
+ extractor_id=extraction_run.extractor_id,
252
+ run_id=extraction_run.run_id,
253
+ )
254
+
255
+ for item_result in manifest.items:
256
+ if item_result.status != "extracted" or item_result.final_text_relpath is None:
257
+ missing_items += 1
258
+ continue
259
+ text_path = text_dir / item_result.final_text_relpath
260
+ text_value = text_path.read_text(encoding="utf-8")
261
+ stripped = text_value.strip()
262
+ if not stripped:
263
+ empty_items += 1
264
+ continue
265
+ if config.min_text_characters is not None and len(stripped) < config.min_text_characters:
266
+ empty_items += 1
267
+ continue
268
+ nonempty_items += 1
269
+ text_lengths.append(len(stripped))
270
+
271
+ sampled_lengths = _apply_sample(text_lengths, config.sample_size)
272
+ characters_distribution = _build_distribution(sampled_lengths, config.percentiles)
273
+ return ProfilingExtractedTextReport(
274
+ source_items=len(manifest.items),
275
+ extracted_nonempty_items=nonempty_items,
276
+ extracted_empty_items=empty_items,
277
+ extracted_missing_items=missing_items,
278
+ characters_distribution=characters_distribution,
279
+ )
280
+
281
+
282
+ def _apply_sample(values: Sequence, sample_size: int | None) -> List:
283
+ if sample_size is None:
284
+ return list(values)
285
+ return list(values[:sample_size])
286
+
287
+
288
+ def _build_distribution(
289
+ values: Sequence[int], percentiles: Iterable[int]
290
+ ) -> ProfilingDistributionReport:
291
+ if not values:
292
+ percentile_values = [
293
+ ProfilingPercentileValue(percentile=percentile, value=0.0) for percentile in percentiles
294
+ ]
295
+ return ProfilingDistributionReport(
296
+ count=0,
297
+ min_value=0.0,
298
+ max_value=0.0,
299
+ mean_value=0.0,
300
+ percentiles=percentile_values,
301
+ )
302
+ sorted_values = sorted(values)
303
+ count = len(sorted_values)
304
+ min_value = float(sorted_values[0])
305
+ max_value = float(sorted_values[-1])
306
+ mean_value = float(sum(sorted_values)) / count
307
+ percentile_values = [
308
+ ProfilingPercentileValue(
309
+ percentile=percentile,
310
+ value=float(_percentile_value(sorted_values, percentile)),
311
+ )
312
+ for percentile in percentiles
313
+ ]
314
+ return ProfilingDistributionReport(
315
+ count=count,
316
+ min_value=min_value,
317
+ max_value=max_value,
318
+ mean_value=mean_value,
319
+ percentiles=percentile_values,
320
+ )
321
+
322
+
323
+ def _percentile_value(sorted_values: Sequence[int], percentile: int) -> int:
324
+ if not sorted_values:
325
+ return 0
326
+ index = max(0, math.ceil((percentile / 100) * len(sorted_values)) - 1)
327
+ index = min(index, len(sorted_values) - 1)
328
+ return int(sorted_values[index])
329
+
330
+
331
+ def _write_analysis_run_manifest(*, run_dir: Path, manifest: AnalysisRunManifest) -> None:
332
+ manifest_path = run_dir / "manifest.json"
333
+ manifest_path.write_text(manifest.model_dump_json(indent=2) + "\n", encoding="utf-8")
334
+
335
+
336
+ def _write_profiling_output(*, path: Path, output: ProfilingOutput) -> None:
337
+ path.write_text(output.model_dump_json(indent=2) + "\n", encoding="utf-8")
@@ -452,7 +452,7 @@ def _run_bertopic(
452
452
  except ImportError as import_error:
453
453
  raise ValueError(
454
454
  "Vectorizer configuration requires scikit-learn. "
455
- "Install with pip install \"biblicus[topic-modeling]\"."
455
+ 'Install with pip install "biblicus[topic-modeling]".'
456
456
  ) from import_error
457
457
  bertopic_kwargs["vectorizer_model"] = CountVectorizer(
458
458
  ngram_range=tuple(config.vectorizer.ngram_range),
@@ -504,13 +504,10 @@ def _group_documents_by_topic(
504
504
  return grouped
505
505
 
506
506
 
507
- def _resolve_topic_keywords(
508
- *, topic_model: Any, topic_id: int
509
- ) -> List[TopicModelingKeyword]:
507
+ def _resolve_topic_keywords(*, topic_model: Any, topic_id: int) -> List[TopicModelingKeyword]:
510
508
  raw_keywords = topic_model.get_topic(topic_id) or []
511
509
  return [
512
- TopicModelingKeyword(keyword=str(entry[0]), score=float(entry[1]))
513
- for entry in raw_keywords
510
+ TopicModelingKeyword(keyword=str(entry[0]), score=float(entry[1])) for entry in raw_keywords
514
511
  ]
515
512
 
516
513