osmosis-ai 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of osmosis-ai might be problematic. Click here for more details.
- osmosis_ai/__init__.py +13 -4
- osmosis_ai/cli.py +50 -0
- osmosis_ai/cli_commands.py +181 -0
- osmosis_ai/cli_services/__init__.py +67 -0
- osmosis_ai/cli_services/config.py +407 -0
- osmosis_ai/cli_services/dataset.py +229 -0
- osmosis_ai/cli_services/engine.py +251 -0
- osmosis_ai/cli_services/errors.py +7 -0
- osmosis_ai/cli_services/reporting.py +307 -0
- osmosis_ai/cli_services/session.py +174 -0
- osmosis_ai/cli_services/shared.py +209 -0
- osmosis_ai/consts.py +1 -1
- osmosis_ai/providers/__init__.py +36 -0
- osmosis_ai/providers/anthropic_provider.py +85 -0
- osmosis_ai/providers/base.py +60 -0
- osmosis_ai/providers/gemini_provider.py +314 -0
- osmosis_ai/providers/openai_family.py +607 -0
- osmosis_ai/providers/shared.py +92 -0
- osmosis_ai/rubric_eval.py +498 -0
- osmosis_ai/rubric_types.py +49 -0
- osmosis_ai/utils.py +392 -5
- osmosis_ai-0.2.3.dist-info/METADATA +303 -0
- osmosis_ai-0.2.3.dist-info/RECORD +27 -0
- osmosis_ai-0.2.3.dist-info/entry_points.txt +4 -0
- osmosis_ai-0.2.1.dist-info/METADATA +0 -143
- osmosis_ai-0.2.1.dist-info/RECORD +0 -8
- {osmosis_ai-0.2.1.dist-info → osmosis_ai-0.2.3.dist-info}/WHEEL +0 -0
- {osmosis_ai-0.2.1.dist-info → osmosis_ai-0.2.3.dist-info}/licenses/LICENSE +0 -0
- {osmosis_ai-0.2.1.dist-info → osmosis_ai-0.2.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Optional, Sequence
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
from yaml.representer import SafeRepresenter
|
|
10
|
+
|
|
11
|
+
from .errors import CLIError
|
|
12
|
+
from .shared import coerce_optional_float
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(frozen=True)
|
|
16
|
+
class ParsedItem:
|
|
17
|
+
label: Optional[str]
|
|
18
|
+
payload: Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass(frozen=True)
|
|
22
|
+
class RubricConfig:
|
|
23
|
+
rubric_id: str
|
|
24
|
+
rubric_text: str
|
|
25
|
+
model_info: dict[str, Any]
|
|
26
|
+
score_min: Optional[float]
|
|
27
|
+
score_max: Optional[float]
|
|
28
|
+
system_message: Optional[str]
|
|
29
|
+
extra_info: Optional[dict[str, Any]]
|
|
30
|
+
original_input: Optional[str]
|
|
31
|
+
ground_truth: Optional[str]
|
|
32
|
+
source_label: str
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass(frozen=True)
|
|
36
|
+
class RubricSuite:
|
|
37
|
+
source_path: Path
|
|
38
|
+
version: Optional[int]
|
|
39
|
+
configs: dict[str, RubricConfig]
|
|
40
|
+
|
|
41
|
+
def get(self, rubric_id: str) -> RubricConfig:
|
|
42
|
+
if rubric_id not in self.configs:
|
|
43
|
+
available = ", ".join(self.available_ids()) or "none"
|
|
44
|
+
raise CLIError(
|
|
45
|
+
f"Rubric '{rubric_id}' not found in '{self.source_path}'. Available IDs: {available}"
|
|
46
|
+
)
|
|
47
|
+
return self.configs[rubric_id]
|
|
48
|
+
|
|
49
|
+
def available_ids(self) -> list[str]:
|
|
50
|
+
return sorted(self.configs)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass(frozen=True)
|
|
54
|
+
class RubricConfigDocumentResult:
|
|
55
|
+
configs: dict[str, RubricConfig]
|
|
56
|
+
items: list[ParsedItem]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class RubricConfigDocumentSchema:
|
|
60
|
+
"""Base interface for schema-specific rubric config parsing."""
|
|
61
|
+
|
|
62
|
+
version: Optional[int] = None
|
|
63
|
+
|
|
64
|
+
def parse_document(
|
|
65
|
+
self,
|
|
66
|
+
document: Any,
|
|
67
|
+
*,
|
|
68
|
+
path: Path,
|
|
69
|
+
doc_index: int,
|
|
70
|
+
strict: bool,
|
|
71
|
+
) -> RubricConfigDocumentResult:
|
|
72
|
+
raise NotImplementedError
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class LegacyRubricConfigSchema(RubricConfigDocumentSchema):
|
|
76
|
+
"""Schema handling documents without an explicit version."""
|
|
77
|
+
|
|
78
|
+
version = None
|
|
79
|
+
|
|
80
|
+
def parse_document(
|
|
81
|
+
self,
|
|
82
|
+
document: Any,
|
|
83
|
+
*,
|
|
84
|
+
path: Path,
|
|
85
|
+
doc_index: int,
|
|
86
|
+
strict: bool,
|
|
87
|
+
) -> RubricConfigDocumentResult:
|
|
88
|
+
defaults = _extract_config_defaults(document, path, doc_index)
|
|
89
|
+
entries = _extract_rubric_items(document, context=None, doc_index=doc_index)
|
|
90
|
+
return _build_document_configs(entries, defaults, path=path, doc_index=doc_index, strict=strict)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class Version1RubricConfigSchema(LegacyRubricConfigSchema):
|
|
94
|
+
"""Schema for version 1 documents (currently aligned with legacy layout)."""
|
|
95
|
+
|
|
96
|
+
version = 1
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class RubricConfigParser:
|
|
100
|
+
"""Parses rubric configuration files and produces typed suites."""
|
|
101
|
+
|
|
102
|
+
def __init__(self, *, schemas: Optional[dict[Optional[int], RubricConfigDocumentSchema]] = None):
|
|
103
|
+
self._schemas = schemas or {
|
|
104
|
+
None: LegacyRubricConfigSchema(),
|
|
105
|
+
1: Version1RubricConfigSchema(),
|
|
106
|
+
}
|
|
107
|
+
if None not in self._schemas:
|
|
108
|
+
raise ValueError("At least one default schema (key=None) must be provided.")
|
|
109
|
+
|
|
110
|
+
def parse(self, path: Path, *, strict: bool = True) -> tuple[RubricSuite, list[ParsedItem]]:
|
|
111
|
+
documents = _load_yaml_documents(path)
|
|
112
|
+
configs: dict[str, RubricConfig] = {}
|
|
113
|
+
parsed_items: list[ParsedItem] = []
|
|
114
|
+
detected_version: Optional[int] = None
|
|
115
|
+
document_indices = []
|
|
116
|
+
|
|
117
|
+
for doc_index, document in enumerate(documents):
|
|
118
|
+
if document:
|
|
119
|
+
document_indices.append(doc_index)
|
|
120
|
+
if not document:
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
doc_version = self._coerce_optional_version(document, path, doc_index)
|
|
124
|
+
if doc_version is not None:
|
|
125
|
+
if detected_version is None:
|
|
126
|
+
detected_version = doc_version
|
|
127
|
+
elif detected_version != doc_version:
|
|
128
|
+
raise CLIError(
|
|
129
|
+
f"Rubric config '{path}' mixes different version numbers across documents."
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
schema = self._select_schema(detected_version)
|
|
133
|
+
|
|
134
|
+
for doc_index in document_indices:
|
|
135
|
+
document = documents[doc_index]
|
|
136
|
+
if not document:
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
result = schema.parse_document(
|
|
140
|
+
document,
|
|
141
|
+
path=path,
|
|
142
|
+
doc_index=doc_index,
|
|
143
|
+
strict=strict,
|
|
144
|
+
)
|
|
145
|
+
parsed_items.extend(result.items)
|
|
146
|
+
for rubric_id, config in result.configs.items():
|
|
147
|
+
if rubric_id in configs:
|
|
148
|
+
raise CLIError(f"Duplicate rubric id '{rubric_id}' detected in '{path}'.")
|
|
149
|
+
configs[rubric_id] = config
|
|
150
|
+
|
|
151
|
+
if strict and not configs:
|
|
152
|
+
raise CLIError(f"No rubric entries found in '{path}'.")
|
|
153
|
+
|
|
154
|
+
suite = RubricSuite(source_path=path, version=detected_version, configs=configs)
|
|
155
|
+
return suite, parsed_items
|
|
156
|
+
|
|
157
|
+
def _select_schema(self, version: Optional[int]) -> RubricConfigDocumentSchema:
|
|
158
|
+
if version in self._schemas:
|
|
159
|
+
return self._schemas[version]
|
|
160
|
+
if version is None:
|
|
161
|
+
return self._schemas[None]
|
|
162
|
+
raise CLIError(f"Unsupported rubric config version '{version}'.")
|
|
163
|
+
|
|
164
|
+
@staticmethod
|
|
165
|
+
def _coerce_optional_version(document: Any, path: Path, doc_index: int) -> Optional[int]:
|
|
166
|
+
if not isinstance(document, dict):
|
|
167
|
+
return None
|
|
168
|
+
version_value = document.get("version")
|
|
169
|
+
if version_value is None:
|
|
170
|
+
return None
|
|
171
|
+
if isinstance(version_value, int):
|
|
172
|
+
if version_value < 0:
|
|
173
|
+
raise CLIError(
|
|
174
|
+
f"Version number in '{path}' document {doc_index} must be non-negative."
|
|
175
|
+
)
|
|
176
|
+
return version_value
|
|
177
|
+
raise CLIError(
|
|
178
|
+
f"Version field in '{path}' document {doc_index} must be an integer."
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _build_document_configs(
|
|
183
|
+
entries: Sequence[ParsedItem],
|
|
184
|
+
defaults: dict[str, Any],
|
|
185
|
+
*,
|
|
186
|
+
path: Path,
|
|
187
|
+
doc_index: int,
|
|
188
|
+
strict: bool,
|
|
189
|
+
) -> RubricConfigDocumentResult:
|
|
190
|
+
configs: dict[str, RubricConfig] = {}
|
|
191
|
+
parsed_items: list[ParsedItem] = []
|
|
192
|
+
|
|
193
|
+
for item in entries:
|
|
194
|
+
payload = item.payload
|
|
195
|
+
parsed_items.append(ParsedItem(label=item.label, payload=payload))
|
|
196
|
+
if not isinstance(payload, dict):
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
rubric_key_raw = payload.get("id")
|
|
200
|
+
if not isinstance(rubric_key_raw, str) or not rubric_key_raw.strip():
|
|
201
|
+
if strict:
|
|
202
|
+
raise CLIError(
|
|
203
|
+
f"Rubric entry in '{path}' (document {doc_index}) is missing a non-empty 'id'."
|
|
204
|
+
)
|
|
205
|
+
continue
|
|
206
|
+
rubric_key = rubric_key_raw.strip()
|
|
207
|
+
if rubric_key in configs:
|
|
208
|
+
raise CLIError(f"Duplicate rubric id '{rubric_key}' detected in '{path}'.")
|
|
209
|
+
|
|
210
|
+
rubric_text = payload.get("rubric")
|
|
211
|
+
if not isinstance(rubric_text, str) or not rubric_text.strip():
|
|
212
|
+
if strict:
|
|
213
|
+
raise CLIError(
|
|
214
|
+
f"Rubric '{rubric_key}' in '{path}' must include a non-empty 'rubric' string."
|
|
215
|
+
)
|
|
216
|
+
continue
|
|
217
|
+
|
|
218
|
+
model_info = payload.get("model_info", defaults.get("model_info"))
|
|
219
|
+
if not isinstance(model_info, dict):
|
|
220
|
+
if strict:
|
|
221
|
+
raise CLIError(
|
|
222
|
+
f"Rubric '{rubric_key}' in '{path}' must include a 'model_info' mapping."
|
|
223
|
+
)
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
extra_info_value = payload.get("extra_info", defaults.get("extra_info"))
|
|
227
|
+
if extra_info_value is not None and not isinstance(extra_info_value, dict):
|
|
228
|
+
if strict:
|
|
229
|
+
raise CLIError(
|
|
230
|
+
f"'extra_info' for rubric '{rubric_key}' in '{path}' must be a mapping."
|
|
231
|
+
)
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
score_min = coerce_optional_float(
|
|
236
|
+
payload.get("score_min", defaults.get("score_min")),
|
|
237
|
+
"score_min",
|
|
238
|
+
f"rubric '{rubric_key}' in {path}",
|
|
239
|
+
)
|
|
240
|
+
score_max = coerce_optional_float(
|
|
241
|
+
payload.get("score_max", defaults.get("score_max")),
|
|
242
|
+
"score_max",
|
|
243
|
+
f"rubric '{rubric_key}' in {path}",
|
|
244
|
+
)
|
|
245
|
+
except CLIError:
|
|
246
|
+
if strict:
|
|
247
|
+
raise
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
system_message = payload.get("system_message", defaults.get("system_message"))
|
|
251
|
+
original_input = payload.get("original_input", defaults.get("original_input"))
|
|
252
|
+
ground_truth = payload.get("ground_truth", defaults.get("ground_truth"))
|
|
253
|
+
|
|
254
|
+
label = item.label or f"document[{doc_index}]"
|
|
255
|
+
source_label = f"{path}:{label}"
|
|
256
|
+
|
|
257
|
+
configs[rubric_key] = RubricConfig(
|
|
258
|
+
rubric_id=rubric_key,
|
|
259
|
+
rubric_text=rubric_text,
|
|
260
|
+
model_info=copy.deepcopy(model_info),
|
|
261
|
+
score_min=score_min,
|
|
262
|
+
score_max=score_max,
|
|
263
|
+
system_message=system_message if isinstance(system_message, str) else None,
|
|
264
|
+
extra_info=copy.deepcopy(extra_info_value) if isinstance(extra_info_value, dict) else None,
|
|
265
|
+
original_input=original_input if isinstance(original_input, str) else None,
|
|
266
|
+
ground_truth=ground_truth if isinstance(ground_truth, str) else None,
|
|
267
|
+
source_label=source_label,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return RubricConfigDocumentResult(configs=configs, items=parsed_items)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def discover_rubric_config_path(config_arg: Optional[str], data_path: Path) -> Path:
|
|
274
|
+
if config_arg:
|
|
275
|
+
candidate = Path(config_arg).expanduser()
|
|
276
|
+
if not candidate.exists():
|
|
277
|
+
raise CLIError(f"Rubric config path '{candidate}' does not exist.")
|
|
278
|
+
if candidate.is_dir():
|
|
279
|
+
raise CLIError(f"Rubric config path '{candidate}' is a directory.")
|
|
280
|
+
return candidate
|
|
281
|
+
|
|
282
|
+
candidates: list[Path] = []
|
|
283
|
+
candidates.append(data_path.parent / "rubric_configs.yaml")
|
|
284
|
+
candidates.append(Path.cwd() / "rubric_configs.yaml")
|
|
285
|
+
candidates.append(Path.cwd() / "examples" / "rubric_configs.yaml")
|
|
286
|
+
|
|
287
|
+
checked: list[Path] = []
|
|
288
|
+
for candidate in dict.fromkeys(candidates):
|
|
289
|
+
checked.append(candidate)
|
|
290
|
+
if candidate.exists() and candidate.is_file():
|
|
291
|
+
return candidate
|
|
292
|
+
|
|
293
|
+
searched = ", ".join(str(path) for path in checked)
|
|
294
|
+
raise CLIError(
|
|
295
|
+
"Unable to locate a rubric config file. Provide --config explicitly. "
|
|
296
|
+
f"Paths checked: {searched}"
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def load_rubric_configs(path: Path) -> list[ParsedItem]:
|
|
301
|
+
parser = RubricConfigParser()
|
|
302
|
+
_, items = parser.parse(path, strict=False)
|
|
303
|
+
return items
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def load_rubric_suite(path: Path) -> RubricSuite:
|
|
307
|
+
parser = RubricConfigParser()
|
|
308
|
+
suite, _ = parser.parse(path)
|
|
309
|
+
return suite
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def render_yaml_items(items: Sequence[ParsedItem], label: str) -> str:
|
|
313
|
+
blocks: list[str] = []
|
|
314
|
+
total = len(items)
|
|
315
|
+
|
|
316
|
+
for index, item in enumerate(items, start=1):
|
|
317
|
+
header = f"{label} #{index}"
|
|
318
|
+
if item.label:
|
|
319
|
+
header += f" ({item.label})"
|
|
320
|
+
dumped = yaml.dump(
|
|
321
|
+
item.payload,
|
|
322
|
+
Dumper=_LiteralSafeDumper,
|
|
323
|
+
sort_keys=False,
|
|
324
|
+
indent=2,
|
|
325
|
+
allow_unicode=True,
|
|
326
|
+
).rstrip()
|
|
327
|
+
|
|
328
|
+
snippet = [header, dumped]
|
|
329
|
+
if index != total:
|
|
330
|
+
snippet.append("")
|
|
331
|
+
blocks.append("\n".join(snippet))
|
|
332
|
+
|
|
333
|
+
return "\n".join(blocks)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def _load_yaml_documents(path: Path) -> list[Any]:
|
|
337
|
+
try:
|
|
338
|
+
with path.open("r", encoding="utf-8") as fh:
|
|
339
|
+
return list(yaml.safe_load_all(fh))
|
|
340
|
+
except yaml.YAMLError as exc:
|
|
341
|
+
raise CLIError(f"Failed to parse YAML in '{path}': {exc}") from exc
|
|
342
|
+
except OSError as exc:
|
|
343
|
+
raise CLIError(f"Unable to read rubric config '{path}': {exc}") from exc
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def _extract_config_defaults(document: Any, path: Path, doc_index: int) -> dict[str, Any]:
|
|
347
|
+
if not isinstance(document, dict):
|
|
348
|
+
return {
|
|
349
|
+
"model_info": None,
|
|
350
|
+
"extra_info": None,
|
|
351
|
+
"score_min": None,
|
|
352
|
+
"score_max": None,
|
|
353
|
+
"system_message": None,
|
|
354
|
+
"original_input": None,
|
|
355
|
+
"ground_truth": None,
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
source = f"document[{doc_index}] in {path}"
|
|
359
|
+
|
|
360
|
+
defaults: dict[str, Any] = {}
|
|
361
|
+
defaults["model_info"] = document.get("default_model_info")
|
|
362
|
+
defaults["extra_info"] = document.get("default_extra_info")
|
|
363
|
+
defaults["score_min"] = coerce_optional_float(
|
|
364
|
+
document.get("default_score_min"), "default_score_min", source
|
|
365
|
+
)
|
|
366
|
+
defaults["score_max"] = coerce_optional_float(
|
|
367
|
+
document.get("default_score_max"), "default_score_max", source
|
|
368
|
+
)
|
|
369
|
+
defaults["system_message"] = document.get("default_system_message")
|
|
370
|
+
defaults["original_input"] = document.get("default_original_input")
|
|
371
|
+
defaults["ground_truth"] = document.get("default_ground_truth")
|
|
372
|
+
return defaults
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def _extract_rubric_items(node: Any, context: Optional[str], doc_index: int) -> list[ParsedItem]:
|
|
376
|
+
items: list[ParsedItem] = []
|
|
377
|
+
|
|
378
|
+
if node is None:
|
|
379
|
+
return items
|
|
380
|
+
|
|
381
|
+
if isinstance(node, dict):
|
|
382
|
+
if "rubric" in node and isinstance(node["rubric"], str):
|
|
383
|
+
label = context or f"document[{doc_index}]"
|
|
384
|
+
items.append(ParsedItem(label=label, payload=node))
|
|
385
|
+
else:
|
|
386
|
+
for key, value in node.items():
|
|
387
|
+
next_context = str(key) if isinstance(key, str) else context
|
|
388
|
+
items.extend(_extract_rubric_items(value, context=next_context, doc_index=doc_index))
|
|
389
|
+
elif isinstance(node, list):
|
|
390
|
+
for index, value in enumerate(node):
|
|
391
|
+
idx_context = f"{context}[{index}]" if context else None
|
|
392
|
+
items.extend(_extract_rubric_items(value, context=idx_context, doc_index=doc_index))
|
|
393
|
+
|
|
394
|
+
return items
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
class _LiteralSafeDumper(yaml.SafeDumper):
|
|
398
|
+
"""YAML dumper that preserves multiline strings with literal blocks."""
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def _represent_str(dumper: yaml.Dumper, data: str):
|
|
402
|
+
if "\n" in data:
|
|
403
|
+
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
|
404
|
+
return SafeRepresenter.represent_str(dumper, data)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
_LiteralSafeDumper.add_representer(str, _represent_str)
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import json
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Optional, Sequence
|
|
8
|
+
|
|
9
|
+
from .errors import CLIError
|
|
10
|
+
from .shared import coerce_optional_float, gather_text_fragments
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass(frozen=True)
|
|
14
|
+
class ConversationMessage:
|
|
15
|
+
"""Normalized conversation message with preserved raw payload fields."""
|
|
16
|
+
|
|
17
|
+
role: str
|
|
18
|
+
content: Any
|
|
19
|
+
metadata: dict[str, Any]
|
|
20
|
+
|
|
21
|
+
def to_payload(self) -> dict[str, Any]:
|
|
22
|
+
payload: dict[str, Any] = copy.deepcopy(self.metadata)
|
|
23
|
+
payload["role"] = self.role
|
|
24
|
+
if self.content is None:
|
|
25
|
+
payload.pop("content", None)
|
|
26
|
+
else:
|
|
27
|
+
payload["content"] = copy.deepcopy(self.content)
|
|
28
|
+
return payload
|
|
29
|
+
|
|
30
|
+
def text_fragments(self) -> list[str]:
|
|
31
|
+
fragments: list[str] = []
|
|
32
|
+
seen: set[int] = set()
|
|
33
|
+
gather_text_fragments(self.content, fragments, allow_free_strings=True, seen=seen)
|
|
34
|
+
for value in self.metadata.values():
|
|
35
|
+
gather_text_fragments(value, fragments, seen=seen)
|
|
36
|
+
return fragments
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def from_raw(cls, raw: dict[str, Any], *, source_label: str, index: int) -> "ConversationMessage":
|
|
40
|
+
role_value = raw.get("role")
|
|
41
|
+
if not isinstance(role_value, str) or not role_value.strip():
|
|
42
|
+
raise CLIError(
|
|
43
|
+
f"Message {index} in {source_label} must include a non-empty string 'role'."
|
|
44
|
+
)
|
|
45
|
+
content_value = copy.deepcopy(raw.get("content"))
|
|
46
|
+
metadata: dict[str, Any] = {}
|
|
47
|
+
for key, value in raw.items():
|
|
48
|
+
if key in {"role", "content"}:
|
|
49
|
+
continue
|
|
50
|
+
metadata[str(key)] = copy.deepcopy(value)
|
|
51
|
+
return cls(role=role_value.strip().lower(), content=content_value, metadata=metadata)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass(frozen=True)
|
|
55
|
+
class DatasetRecord:
|
|
56
|
+
payload: dict[str, Any]
|
|
57
|
+
rubric_id: str
|
|
58
|
+
conversation_id: Optional[str]
|
|
59
|
+
record_id: Optional[str]
|
|
60
|
+
messages: tuple[ConversationMessage, ...]
|
|
61
|
+
ground_truth: Optional[str]
|
|
62
|
+
system_message: Optional[str]
|
|
63
|
+
original_input: Optional[str]
|
|
64
|
+
metadata: Optional[dict[str, Any]]
|
|
65
|
+
extra_info: Optional[dict[str, Any]]
|
|
66
|
+
score_min: Optional[float]
|
|
67
|
+
score_max: Optional[float]
|
|
68
|
+
|
|
69
|
+
def message_payloads(self) -> list[dict[str, Any]]:
|
|
70
|
+
"""Return messages as provider-ready payloads."""
|
|
71
|
+
return [message.to_payload() for message in self.messages]
|
|
72
|
+
|
|
73
|
+
def merged_extra_info(self, config_extra: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]:
|
|
74
|
+
merged: dict[str, Any] = {}
|
|
75
|
+
if isinstance(config_extra, dict):
|
|
76
|
+
merged.update(copy.deepcopy(config_extra))
|
|
77
|
+
if isinstance(self.extra_info, dict):
|
|
78
|
+
merged.update(copy.deepcopy(self.extra_info))
|
|
79
|
+
if isinstance(self.metadata, dict) and self.metadata:
|
|
80
|
+
merged.setdefault("dataset_metadata", copy.deepcopy(self.metadata))
|
|
81
|
+
return merged or None
|
|
82
|
+
|
|
83
|
+
def assistant_preview(self, *, max_length: int = 140) -> Optional[str]:
|
|
84
|
+
for message in reversed(self.messages):
|
|
85
|
+
if message.role != "assistant":
|
|
86
|
+
continue
|
|
87
|
+
fragments = message.text_fragments()
|
|
88
|
+
if not fragments:
|
|
89
|
+
continue
|
|
90
|
+
preview = " ".join(" ".join(fragments).split())
|
|
91
|
+
if not preview:
|
|
92
|
+
continue
|
|
93
|
+
if len(preview) > max_length:
|
|
94
|
+
preview = preview[: max_length - 3].rstrip() + "..."
|
|
95
|
+
return preview
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
def conversation_label(self, fallback_index: int) -> str:
|
|
99
|
+
if isinstance(self.conversation_id, str) and self.conversation_id.strip():
|
|
100
|
+
return self.conversation_id.strip()
|
|
101
|
+
return f"record[{fallback_index}]"
|
|
102
|
+
|
|
103
|
+
def record_identifier(self, conversation_label: str) -> str:
|
|
104
|
+
if isinstance(self.record_id, str) and self.record_id.strip():
|
|
105
|
+
return self.record_id.strip()
|
|
106
|
+
raw_id = self.payload.get("id")
|
|
107
|
+
if isinstance(raw_id, str) and raw_id.strip():
|
|
108
|
+
return raw_id.strip()
|
|
109
|
+
if raw_id is not None:
|
|
110
|
+
return str(raw_id)
|
|
111
|
+
return conversation_label
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class DatasetLoader:
|
|
115
|
+
"""Loads dataset records from JSONL files."""
|
|
116
|
+
|
|
117
|
+
def load(self, path: Path) -> list[DatasetRecord]:
|
|
118
|
+
records: list[DatasetRecord] = []
|
|
119
|
+
with path.open("r", encoding="utf-8") as fh:
|
|
120
|
+
for line_number, raw_line in enumerate(fh, start=1):
|
|
121
|
+
stripped = raw_line.strip()
|
|
122
|
+
if not stripped:
|
|
123
|
+
continue
|
|
124
|
+
try:
|
|
125
|
+
payload = json.loads(stripped)
|
|
126
|
+
except json.JSONDecodeError as exc:
|
|
127
|
+
raise CLIError(
|
|
128
|
+
f"Invalid JSON on line {line_number} of '{path}': {exc.msg}"
|
|
129
|
+
) from exc
|
|
130
|
+
if not isinstance(payload, dict):
|
|
131
|
+
raise CLIError(
|
|
132
|
+
f"Expected JSON object on line {line_number} of '{path}'."
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
records.append(self._create_record(payload))
|
|
136
|
+
|
|
137
|
+
if not records:
|
|
138
|
+
raise CLIError(f"No JSON records found in '{path}'.")
|
|
139
|
+
|
|
140
|
+
return records
|
|
141
|
+
|
|
142
|
+
@staticmethod
|
|
143
|
+
def _create_record(payload: dict[str, Any]) -> DatasetRecord:
|
|
144
|
+
rubric_id = payload.get("rubric_id")
|
|
145
|
+
rubric_id_str = str(rubric_id).strip() if isinstance(rubric_id, str) else ""
|
|
146
|
+
|
|
147
|
+
conversation_id_raw = payload.get("conversation_id")
|
|
148
|
+
conversation_id = None
|
|
149
|
+
if isinstance(conversation_id_raw, str) and conversation_id_raw.strip():
|
|
150
|
+
conversation_id = conversation_id_raw.strip()
|
|
151
|
+
|
|
152
|
+
record_id_raw = payload.get("id")
|
|
153
|
+
record_id = str(record_id_raw).strip() if isinstance(record_id_raw, str) else None
|
|
154
|
+
|
|
155
|
+
score_min = coerce_optional_float(
|
|
156
|
+
payload.get("score_min"), "score_min", f"record '{conversation_id or rubric_id or '<record>'}'"
|
|
157
|
+
)
|
|
158
|
+
score_max = coerce_optional_float(
|
|
159
|
+
payload.get("score_max"), "score_max", f"record '{conversation_id or rubric_id or '<record>'}'"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
metadata = payload.get("metadata") if isinstance(payload.get("metadata"), dict) else None
|
|
163
|
+
extra_info = payload.get("extra_info") if isinstance(payload.get("extra_info"), dict) else None
|
|
164
|
+
record_label = conversation_id or record_id or rubric_id_str or "<record>"
|
|
165
|
+
messages = _parse_messages(payload.get("messages"), source_label=record_label)
|
|
166
|
+
|
|
167
|
+
return DatasetRecord(
|
|
168
|
+
payload=payload,
|
|
169
|
+
rubric_id=rubric_id_str,
|
|
170
|
+
conversation_id=conversation_id,
|
|
171
|
+
record_id=record_id,
|
|
172
|
+
messages=messages,
|
|
173
|
+
ground_truth=payload.get("ground_truth") if isinstance(payload.get("ground_truth"), str) else None,
|
|
174
|
+
system_message=payload.get("system_message") if isinstance(payload.get("system_message"), str) else None,
|
|
175
|
+
original_input=payload.get("original_input") if isinstance(payload.get("original_input"), str) else None,
|
|
176
|
+
metadata=metadata,
|
|
177
|
+
extra_info=extra_info,
|
|
178
|
+
score_min=score_min,
|
|
179
|
+
score_max=score_max,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def load_jsonl_records(path: Path) -> list[dict[str, Any]]:
|
|
184
|
+
records: list[dict[str, Any]] = []
|
|
185
|
+
with path.open("r", encoding="utf-8") as fh:
|
|
186
|
+
for line_number, raw_line in enumerate(fh, start=1):
|
|
187
|
+
stripped = raw_line.strip()
|
|
188
|
+
if not stripped:
|
|
189
|
+
continue
|
|
190
|
+
try:
|
|
191
|
+
record = json.loads(stripped)
|
|
192
|
+
except json.JSONDecodeError as exc:
|
|
193
|
+
raise CLIError(f"Invalid JSON on line {line_number} of '{path}': {exc.msg}") from exc
|
|
194
|
+
if not isinstance(record, dict):
|
|
195
|
+
raise CLIError(f"Expected JSON object on line {line_number} of '{path}'.")
|
|
196
|
+
records.append(record)
|
|
197
|
+
|
|
198
|
+
if not records:
|
|
199
|
+
raise CLIError(f"No JSON records found in '{path}'.")
|
|
200
|
+
|
|
201
|
+
return records
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def render_json_records(records: Sequence[dict[str, Any]]) -> str:
|
|
205
|
+
segments: list[str] = []
|
|
206
|
+
total = len(records)
|
|
207
|
+
|
|
208
|
+
for index, record in enumerate(records, start=1):
|
|
209
|
+
body = json.dumps(record, indent=2, ensure_ascii=False)
|
|
210
|
+
snippet = [f"JSONL record #{index}", body]
|
|
211
|
+
if index != total:
|
|
212
|
+
snippet.append("")
|
|
213
|
+
segments.append("\n".join(snippet))
|
|
214
|
+
|
|
215
|
+
return "\n".join(segments)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _parse_messages(messages: Any, *, source_label: str) -> tuple[ConversationMessage, ...]:
|
|
219
|
+
if not isinstance(messages, list) or not messages:
|
|
220
|
+
raise CLIError(f"Record '{source_label}' must include a non-empty 'messages' list.")
|
|
221
|
+
|
|
222
|
+
normalized: list[ConversationMessage] = []
|
|
223
|
+
for index, entry in enumerate(messages):
|
|
224
|
+
if not isinstance(entry, dict):
|
|
225
|
+
raise CLIError(
|
|
226
|
+
f"Message {index} in {source_label} must be an object, got {type(entry).__name__}."
|
|
227
|
+
)
|
|
228
|
+
normalized.append(ConversationMessage.from_raw(entry, source_label=source_label, index=index))
|
|
229
|
+
return tuple(normalized)
|