osmosis-ai 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of osmosis-ai might be problematic. Click here for more details.
- osmosis_ai/cli.py +50 -0
- osmosis_ai/cli_commands.py +181 -0
- osmosis_ai/cli_services/__init__.py +67 -0
- osmosis_ai/cli_services/config.py +407 -0
- osmosis_ai/cli_services/dataset.py +229 -0
- osmosis_ai/cli_services/engine.py +251 -0
- osmosis_ai/cli_services/errors.py +7 -0
- osmosis_ai/cli_services/reporting.py +307 -0
- osmosis_ai/cli_services/session.py +174 -0
- osmosis_ai/cli_services/shared.py +209 -0
- osmosis_ai/providers/gemini_provider.py +73 -28
- osmosis_ai/rubric_eval.py +27 -66
- osmosis_ai/utils.py +0 -4
- {osmosis_ai-0.2.2.dist-info → osmosis_ai-0.2.3.dist-info}/METADATA +64 -2
- osmosis_ai-0.2.3.dist-info/RECORD +27 -0
- osmosis_ai-0.2.3.dist-info/entry_points.txt +4 -0
- osmosis_ai-0.2.2.dist-info/RECORD +0 -16
- {osmosis_ai-0.2.2.dist-info → osmosis_ai-0.2.3.dist-info}/WHEEL +0 -0
- {osmosis_ai-0.2.2.dist-info → osmosis_ai-0.2.3.dist-info}/licenses/LICENSE +0 -0
- {osmosis_ai-0.2.2.dist-info → osmosis_ai-0.2.3.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from contextlib import contextmanager
|
|
4
|
+
import inspect
|
|
4
5
|
import time
|
|
5
6
|
import warnings
|
|
6
7
|
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Tuple
|
|
@@ -167,6 +168,14 @@ def _seconds_to_millis(seconds: float) -> int:
|
|
|
167
168
|
return max(int(round(seconds * 1000)), 1)
|
|
168
169
|
|
|
169
170
|
|
|
171
|
+
def _supports_request_options(generate_content: Any) -> bool:
|
|
172
|
+
try:
|
|
173
|
+
signature = inspect.signature(generate_content)
|
|
174
|
+
except (TypeError, ValueError):
|
|
175
|
+
return False
|
|
176
|
+
return "request_options" in signature.parameters
|
|
177
|
+
|
|
178
|
+
|
|
170
179
|
class GeminiProvider(RubricProvider):
|
|
171
180
|
name = "gemini"
|
|
172
181
|
|
|
@@ -188,11 +197,31 @@ class GeminiProvider(RubricProvider):
|
|
|
188
197
|
retry_timeouts = _build_retry_timeouts(requested_timeout)
|
|
189
198
|
max_timeout = max(retry_timeouts)
|
|
190
199
|
|
|
200
|
+
supports_request_options = False
|
|
201
|
+
shared_client: Any | None = None
|
|
202
|
+
|
|
191
203
|
with _suppress_pydantic_any_warning():
|
|
192
|
-
|
|
204
|
+
probe_client = genai.Client(
|
|
193
205
|
api_key=request.api_key,
|
|
194
206
|
http_options={"timeout": _seconds_to_millis(max_timeout)},
|
|
195
207
|
)
|
|
208
|
+
try:
|
|
209
|
+
supports_request_options = _supports_request_options(probe_client.models.generate_content)
|
|
210
|
+
except Exception:
|
|
211
|
+
try:
|
|
212
|
+
probe_client.close()
|
|
213
|
+
except Exception:
|
|
214
|
+
pass
|
|
215
|
+
raise
|
|
216
|
+
|
|
217
|
+
if supports_request_options:
|
|
218
|
+
shared_client = probe_client
|
|
219
|
+
else:
|
|
220
|
+
try:
|
|
221
|
+
probe_client.close()
|
|
222
|
+
except Exception:
|
|
223
|
+
pass
|
|
224
|
+
|
|
196
225
|
schema_definition = reward_schema_definition()
|
|
197
226
|
gemini_schema = _json_schema_to_genai(schema_definition, genai_types)
|
|
198
227
|
config = genai_types.GenerateContentConfig(
|
|
@@ -206,33 +235,49 @@ class GeminiProvider(RubricProvider):
|
|
|
206
235
|
response: Any | None = None
|
|
207
236
|
last_error: Exception | None = None
|
|
208
237
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
238
|
+
try:
|
|
239
|
+
for attempt_index, attempt_timeout in enumerate(retry_timeouts, start=1):
|
|
240
|
+
per_attempt_client: Any | None = None
|
|
241
|
+
http_timeout_ms = _seconds_to_millis(attempt_timeout)
|
|
242
|
+
try:
|
|
243
|
+
call_kwargs = {
|
|
244
|
+
"model": _normalize_gemini_model(request.model),
|
|
245
|
+
"contents": combined_prompt,
|
|
246
|
+
"config": config,
|
|
247
|
+
}
|
|
248
|
+
if supports_request_options and shared_client is not None:
|
|
249
|
+
call_client = shared_client
|
|
250
|
+
call_kwargs["request_options"] = {"timeout": http_timeout_ms}
|
|
251
|
+
else:
|
|
252
|
+
with _suppress_pydantic_any_warning():
|
|
253
|
+
per_attempt_client = genai.Client(
|
|
254
|
+
api_key=request.api_key,
|
|
255
|
+
http_options={"timeout": http_timeout_ms},
|
|
256
|
+
)
|
|
257
|
+
call_client = per_attempt_client
|
|
258
|
+
|
|
259
|
+
with _suppress_pydantic_any_warning():
|
|
260
|
+
response = call_client.models.generate_content(**call_kwargs)
|
|
261
|
+
break
|
|
262
|
+
except Exception as err: # pragma: no cover - network failures depend on runtime
|
|
263
|
+
last_error = err
|
|
264
|
+
if attempt_index >= len(retry_timeouts):
|
|
265
|
+
detail = str(err).strip() or "Gemini request failed."
|
|
266
|
+
raise ProviderRequestError(self.name, request.model, detail) from err
|
|
267
|
+
sleep_idx = min(attempt_index - 1, len(GEMINI_RETRY_SLEEP_SECONDS) - 1)
|
|
268
|
+
time.sleep(GEMINI_RETRY_SLEEP_SECONDS[sleep_idx])
|
|
269
|
+
finally:
|
|
270
|
+
if per_attempt_client is not None:
|
|
271
|
+
try:
|
|
272
|
+
per_attempt_client.close()
|
|
273
|
+
except Exception:
|
|
274
|
+
pass
|
|
275
|
+
finally:
|
|
276
|
+
if shared_client is not None:
|
|
277
|
+
try:
|
|
278
|
+
shared_client.close()
|
|
279
|
+
except Exception:
|
|
280
|
+
pass
|
|
236
281
|
|
|
237
282
|
if response is None and last_error is not None:
|
|
238
283
|
detail = str(last_error).strip() or "Gemini request failed."
|
osmosis_ai/rubric_eval.py
CHANGED
|
@@ -206,74 +206,13 @@ def _build_user_prompt(
|
|
|
206
206
|
|
|
207
207
|
|
|
208
208
|
def _collect_text_from_message(message: Dict[str, Any]) -> str:
|
|
209
|
+
from .cli_services.shared import collect_text_fragments
|
|
210
|
+
|
|
209
211
|
content = message.get("content")
|
|
210
212
|
if not isinstance(content, list):
|
|
211
213
|
return ""
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def _append_text(value: str) -> None:
|
|
215
|
-
stripped = value.strip()
|
|
216
|
-
if stripped:
|
|
217
|
-
texts.append(stripped)
|
|
218
|
-
|
|
219
|
-
def _walk(node: Any) -> None:
|
|
220
|
-
if isinstance(node, str):
|
|
221
|
-
_append_text(node)
|
|
222
|
-
return
|
|
223
|
-
|
|
224
|
-
if isinstance(node, list):
|
|
225
|
-
for item in node:
|
|
226
|
-
_walk(item)
|
|
227
|
-
return
|
|
228
|
-
|
|
229
|
-
if isinstance(node, dict):
|
|
230
|
-
# Prioritise common OpenAI / tool shapes, only escalating if a prior key yielded no text.
|
|
231
|
-
for key in ("text", "value"):
|
|
232
|
-
if key not in node:
|
|
233
|
-
continue
|
|
234
|
-
before_count = len(texts)
|
|
235
|
-
_walk(node[key])
|
|
236
|
-
if len(texts) > before_count:
|
|
237
|
-
break
|
|
238
|
-
if node.get("type") == "tool_result" and "content" in node:
|
|
239
|
-
_walk(node["content"])
|
|
240
|
-
elif "content" in node:
|
|
241
|
-
_walk(node["content"])
|
|
242
|
-
# Additional fallbacks (e.g., message wrappers).
|
|
243
|
-
for key in ("message", "parts", "input_text", "output_text"):
|
|
244
|
-
if key in node:
|
|
245
|
-
_walk(node[key])
|
|
246
|
-
# Inspect remaining nested structures without re-traversing handled keys.
|
|
247
|
-
handled = {
|
|
248
|
-
"text",
|
|
249
|
-
"value",
|
|
250
|
-
"content",
|
|
251
|
-
"message",
|
|
252
|
-
"parts",
|
|
253
|
-
"input_text",
|
|
254
|
-
"output_text",
|
|
255
|
-
"type",
|
|
256
|
-
"role",
|
|
257
|
-
"name",
|
|
258
|
-
"id",
|
|
259
|
-
"index",
|
|
260
|
-
"finish_reason",
|
|
261
|
-
"reason",
|
|
262
|
-
"tool_call_id",
|
|
263
|
-
"metadata",
|
|
264
|
-
}
|
|
265
|
-
for key, value in node.items():
|
|
266
|
-
if key in handled:
|
|
267
|
-
continue
|
|
268
|
-
if isinstance(value, (list, dict)):
|
|
269
|
-
_walk(value)
|
|
270
|
-
elif isinstance(value, str) and key.lower() in {"text", "value", "message"}:
|
|
271
|
-
_append_text(value)
|
|
272
|
-
|
|
273
|
-
for block in content:
|
|
274
|
-
_walk(block)
|
|
275
|
-
|
|
276
|
-
return " ".join(texts)
|
|
214
|
+
fragments = collect_text_fragments(content, allow_free_strings=True)
|
|
215
|
+
return " ".join(fragments)
|
|
277
216
|
|
|
278
217
|
|
|
279
218
|
def _extract_latest_text(messages: List[Dict[str, Any]], role: str) -> Optional[str]:
|
|
@@ -386,6 +325,22 @@ def _resolve_api_key(provider: str, model_info: ModelInfo) -> str:
|
|
|
386
325
|
return api_key
|
|
387
326
|
|
|
388
327
|
|
|
328
|
+
def ensure_api_key_available(model_info: ModelInfo) -> None:
|
|
329
|
+
"""
|
|
330
|
+
Validate that the provider specified in `model_info` has an accessible API key.
|
|
331
|
+
|
|
332
|
+
Raises:
|
|
333
|
+
MissingAPIKeyError: When the lookup fails or the environment variable is unset.
|
|
334
|
+
TypeError: When `model_info` is missing required fields.
|
|
335
|
+
"""
|
|
336
|
+
provider_raw = model_info.get("provider")
|
|
337
|
+
if not isinstance(provider_raw, str) or not provider_raw.strip():
|
|
338
|
+
raise TypeError("'model_info' must include a 'provider' string")
|
|
339
|
+
|
|
340
|
+
provider = provider_raw.strip().lower()
|
|
341
|
+
_resolve_api_key(provider, model_info)
|
|
342
|
+
|
|
343
|
+
|
|
389
344
|
def _run_reward_rubric(
|
|
390
345
|
provider_name: str,
|
|
391
346
|
provider_impl: RubricProvider,
|
|
@@ -534,4 +489,10 @@ def evaluate_rubric(
|
|
|
534
489
|
return result if return_details else result["score"]
|
|
535
490
|
|
|
536
491
|
|
|
537
|
-
__all__ = [
|
|
492
|
+
__all__ = [
|
|
493
|
+
"evaluate_rubric",
|
|
494
|
+
"ensure_api_key_available",
|
|
495
|
+
"ModelInfo",
|
|
496
|
+
"RewardRubricRunResult",
|
|
497
|
+
"MissingAPIKeyError",
|
|
498
|
+
]
|
osmosis_ai/utils.py
CHANGED
|
@@ -28,10 +28,6 @@ def osmosis_reward(func: Callable) -> Callable:
|
|
|
28
28
|
sig = inspect.signature(func)
|
|
29
29
|
params = list(sig.parameters.values())
|
|
30
30
|
|
|
31
|
-
# Check parameter count
|
|
32
|
-
if len(params) < 2 or len(params) > 3:
|
|
33
|
-
raise TypeError(f"Function {func.__name__} must have 2-3 parameters, got {len(params)}")
|
|
34
|
-
|
|
35
31
|
# Check first parameter: solution_str: str
|
|
36
32
|
if params[0].name != 'solution_str':
|
|
37
33
|
raise TypeError(f"First parameter must be named 'solution_str', got '{params[0].name}'")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: osmosis-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: A Python library for reward function validation with strict type enforcement.
|
|
5
5
|
Author-email: Osmosis AI <jake@osmosis.ai>
|
|
6
6
|
License: MIT License
|
|
@@ -29,9 +29,18 @@ Project-URL: Issues, https://github.com/Osmosis-AI/osmosis-sdk-python/issues
|
|
|
29
29
|
Classifier: Programming Language :: Python :: 3
|
|
30
30
|
Classifier: License :: OSI Approved :: MIT License
|
|
31
31
|
Classifier: Operating System :: OS Independent
|
|
32
|
-
Requires-Python: >=3.
|
|
32
|
+
Requires-Python: >=3.9
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
License-File: LICENSE
|
|
35
|
+
Requires-Dist: PyYAML<7.0,>=6.0
|
|
36
|
+
Requires-Dist: python-dotenv<2.0.0,>=0.1.0
|
|
37
|
+
Requires-Dist: requests<3.0.0,>=2.0.0
|
|
38
|
+
Requires-Dist: xxhash<4.0.0,>=3.0.0
|
|
39
|
+
Requires-Dist: anthropic<0.50.0,>=0.36.0
|
|
40
|
+
Requires-Dist: openai>=2.0.0
|
|
41
|
+
Requires-Dist: google-genai>=1.0.0
|
|
42
|
+
Requires-Dist: xai-sdk>=1.2.0
|
|
43
|
+
Requires-Dist: tqdm<5.0.0,>=4.0.0
|
|
35
44
|
Dynamic: license-file
|
|
36
45
|
|
|
37
46
|
# osmosis-ai
|
|
@@ -44,6 +53,10 @@ A Python library that provides reward and rubric validation helpers for LLM appl
|
|
|
44
53
|
pip install osmosis-ai
|
|
45
54
|
```
|
|
46
55
|
|
|
56
|
+
Requires Python 3.9 or newer.
|
|
57
|
+
|
|
58
|
+
This installs the Osmosis CLI and pulls in the required provider SDKs (`openai`, `anthropic`, `google-genai`, `xai-sdk`) along with supporting utilities such as `PyYAML`, `python-dotenv`, `requests`, and `xxhash`.
|
|
59
|
+
|
|
47
60
|
For development:
|
|
48
61
|
```bash
|
|
49
62
|
git clone https://github.com/Osmosis-AI/osmosis-sdk-python
|
|
@@ -211,6 +224,55 @@ def numeric_tolerance(solution_str: str, ground_truth: str, extra_info: dict = N
|
|
|
211
224
|
|
|
212
225
|
- `examples/rubric_functions.py` demonstrates `evaluate_rubric` with OpenAI, Anthropic, Gemini, and xAI using the schema-enforced SDK integrations.
|
|
213
226
|
- `examples/reward_functions.py` keeps local reward helpers that showcase the decorator contract without external calls.
|
|
227
|
+
- `examples/rubric_configs.yaml` bundles two rubric definitions, each with its own provider configuration and extra prompt context.
|
|
228
|
+
- `examples/sample_data.jsonl` contains two conversation payloads mapped to those rubrics so you can trial dataset validation.
|
|
229
|
+
|
|
230
|
+
```yaml
|
|
231
|
+
# examples/rubric_configs.yaml (excerpt)
|
|
232
|
+
version: 1
|
|
233
|
+
rubrics:
|
|
234
|
+
- id: support_followup
|
|
235
|
+
model_info:
|
|
236
|
+
provider: openai
|
|
237
|
+
model: gpt-5-mini
|
|
238
|
+
api_key_env: OPENAI_API_KEY
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
```jsonl
|
|
242
|
+
{"conversation_id": "ticket-001", "rubric_id": "support_followup", "...": "..."}
|
|
243
|
+
{"conversation_id": "ticket-047", "rubric_id": "policy_grounding", "...": "..."}
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## CLI Tools
|
|
247
|
+
|
|
248
|
+
Installing the SDK also provides a lightweight CLI available as `osmosis` (aliases: `osmosis_ai`, `osmosis-ai`) for inspecting rubric YAML files and JSONL test payloads.
|
|
249
|
+
|
|
250
|
+
Preview a rubric file and print every configuration discovered, including nested entries:
|
|
251
|
+
|
|
252
|
+
```bash
|
|
253
|
+
osmosis preview --path path/to/rubric.yaml
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
Preview a dataset of chat transcripts stored as JSONL:
|
|
257
|
+
|
|
258
|
+
```bash
|
|
259
|
+
osmosis preview --path path/to/data.jsonl
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
Evaluate a dataset against a hosted rubric configuration and print the returned scores:
|
|
263
|
+
|
|
264
|
+
```bash
|
|
265
|
+
osmosis eval --rubric support_followup --data examples/sample_data.jsonl
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
- Supply the dataset with `-d`/`--data path/to/data.jsonl`; the path is resolved relative to the current working directory.
|
|
269
|
+
- Use `--config path/to/rubric_configs.yaml` when the rubric definitions are not located alongside the dataset.
|
|
270
|
+
- Pass `-n`/`--number` to sample the provider multiple times per record; the CLI prints every run along with aggregate statistics (average, variance, standard deviation, and min/max).
|
|
271
|
+
- Provide `--output path/to/dir` to create the directory (if needed) and emit `rubric_eval_result_<unix_timestamp>.json`, or supply a full file path (any extension) to control the filename; each file captures every run, provider payloads, timestamps, and aggregate statistics for downstream analysis.
|
|
272
|
+
- Skip `--output` to collect results under `~/.cache/osmosis/eval_result/<rubric_id>/rubric_eval_result_<identifier>.json`; the CLI writes this JSON whether the evaluation finishes cleanly or hits provider/runtime errors so you can inspect failures later (only a manual Ctrl+C interrupt leaves no file behind).
|
|
273
|
+
- Dataset rows whose `rubric_id` does not match the requested rubric are skipped automatically.
|
|
274
|
+
|
|
275
|
+
Both commands validate the file, echo a short summary (`Loaded <n> ...`), and pretty-print the parsed records so you can confirm that new rubrics or test fixtures look correct before committing them. Invalid files raise a descriptive error and exit with a non-zero status code.
|
|
214
276
|
|
|
215
277
|
## Running Examples
|
|
216
278
|
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
osmosis_ai/__init__.py,sha256=2_qXxu18Yc7UicqxFZds8PjR4q0mTY1Xt17iR38OFbw,725
|
|
2
|
+
osmosis_ai/cli.py,sha256=EPCttBnj1TEqQuO2gmS9iHadYcudiizVM38jACztRFE,1320
|
|
3
|
+
osmosis_ai/cli_commands.py,sha256=CmTcb5N3delW7z3fwucss89xw5MHgIrJJ2Z5xdAuIeU,6165
|
|
4
|
+
osmosis_ai/consts.py,sha256=-NDo9FaqBTebkCnhiFDxne6BY0W7BL3oM8HnGQDDgSE,73
|
|
5
|
+
osmosis_ai/rubric_eval.py,sha256=PE2MvJygMbxelsJSTRzlW0bf-YUrtc8lCh6iTpHkjnU,17029
|
|
6
|
+
osmosis_ai/rubric_types.py,sha256=kJvNAjLd3Y-1Q-_Re9HLTprLAUO3qtwR-IWOBeMkFI8,1279
|
|
7
|
+
osmosis_ai/utils.py,sha256=IfTicRfa2Ybut4OzV4pHGSLBv-sGcmdT4eKIrIq4Pj8,19758
|
|
8
|
+
osmosis_ai/cli_services/__init__.py,sha256=QQBwlI4KXoXK1X_e7kwW5sAVSh1VqBVuPllCwUOGXDM,1534
|
|
9
|
+
osmosis_ai/cli_services/config.py,sha256=5hW2taAMhO9BkOfXvUCclnKLKtGTPaytS0oAgxCqymY,13965
|
|
10
|
+
osmosis_ai/cli_services/dataset.py,sha256=qA0WHuOlJZCZdDbFX7ltimaZ3ujZpVUah4pxMvd4lVk,9042
|
|
11
|
+
osmosis_ai/cli_services/engine.py,sha256=DbdJ24e5njk_lihe00cUgLO7yyJ6YgekFy6MAg_uq0k,9157
|
|
12
|
+
osmosis_ai/cli_services/errors.py,sha256=nI6jlICyA4MMNKmwDHQBwyJVah5PVwstmra1HpGkVLE,136
|
|
13
|
+
osmosis_ai/cli_services/reporting.py,sha256=H2g0BmEE2stVey4RmurQM713VowH8984a9r7oDstSkA,12499
|
|
14
|
+
osmosis_ai/cli_services/session.py,sha256=Ru3HA80eqRYZGD1e38N8yd96FiAY8cIYpJvEOHKakM0,6597
|
|
15
|
+
osmosis_ai/cli_services/shared.py,sha256=PilPfW5oDvNL5VG8oObSq2ZL35QPFmhBDf0V4gfd2Ro,5942
|
|
16
|
+
osmosis_ai/providers/__init__.py,sha256=yLSExLbJToZ8AUOVxt4LDplxtIuwv-etSJJyZOcOE2Q,927
|
|
17
|
+
osmosis_ai/providers/anthropic_provider.py,sha256=zrWCVP8co4v8xhcJDFLASwvwEADKN-1p34cY_GH4q5M,3758
|
|
18
|
+
osmosis_ai/providers/base.py,sha256=fN5cnWXYAHN53RR_x6ykbUkM4bictNPDj4U8yd4b2a0,1492
|
|
19
|
+
osmosis_ai/providers/gemini_provider.py,sha256=QANSCmkKungpkpDP2RClmKYnwNVrGv3MKxJwkh68IhY,12045
|
|
20
|
+
osmosis_ai/providers/openai_family.py,sha256=DeQWPMcafEvG4xcI97m3AADTKP2pYw9KwcQTcQg-h_4,26078
|
|
21
|
+
osmosis_ai/providers/shared.py,sha256=dmVe8JDgafPmo6HkP-Kl0aWfffhAT6u3ElV_wLlYD34,2957
|
|
22
|
+
osmosis_ai-0.2.3.dist-info/licenses/LICENSE,sha256=FV2ZmyhdCYinoLLvU_ci-7pZ3DeNYY9XqZjVjOd3h94,1064
|
|
23
|
+
osmosis_ai-0.2.3.dist-info/METADATA,sha256=tgRpinJ60KxX1OlT_9JMCFl2hY0A5I4rZLPRtXQ4p-U,13669
|
|
24
|
+
osmosis_ai-0.2.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
25
|
+
osmosis_ai-0.2.3.dist-info/entry_points.txt,sha256=aF1CR36a9I9_vcF7nlK9JnK1Iqu614vPy2_jh4QU26A,114
|
|
26
|
+
osmosis_ai-0.2.3.dist-info/top_level.txt,sha256=UPNRTKIBSrxsJVNxwXnLCqSoBS4bAiL_3jMtjvf5zEY,11
|
|
27
|
+
osmosis_ai-0.2.3.dist-info/RECORD,,
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
osmosis_ai/__init__.py,sha256=2_qXxu18Yc7UicqxFZds8PjR4q0mTY1Xt17iR38OFbw,725
|
|
2
|
-
osmosis_ai/consts.py,sha256=-NDo9FaqBTebkCnhiFDxne6BY0W7BL3oM8HnGQDDgSE,73
|
|
3
|
-
osmosis_ai/rubric_eval.py,sha256=bFgxgnbQeD-7K2LkTJfnSk5aG9s4lefLfmvQt4GQSnM,18332
|
|
4
|
-
osmosis_ai/rubric_types.py,sha256=kJvNAjLd3Y-1Q-_Re9HLTprLAUO3qtwR-IWOBeMkFI8,1279
|
|
5
|
-
osmosis_ai/utils.py,sha256=yjC_oQt1wwTJsX7lCx0ZGMa5txHURByuBDuU37WPAO0,19927
|
|
6
|
-
osmosis_ai/providers/__init__.py,sha256=yLSExLbJToZ8AUOVxt4LDplxtIuwv-etSJJyZOcOE2Q,927
|
|
7
|
-
osmosis_ai/providers/anthropic_provider.py,sha256=zrWCVP8co4v8xhcJDFLASwvwEADKN-1p34cY_GH4q5M,3758
|
|
8
|
-
osmosis_ai/providers/base.py,sha256=fN5cnWXYAHN53RR_x6ykbUkM4bictNPDj4U8yd4b2a0,1492
|
|
9
|
-
osmosis_ai/providers/gemini_provider.py,sha256=xqklXRO5K1YZ4SKq5lfU3bDUaF8QN2MIBP4DHGKwLVo,10611
|
|
10
|
-
osmosis_ai/providers/openai_family.py,sha256=DeQWPMcafEvG4xcI97m3AADTKP2pYw9KwcQTcQg-h_4,26078
|
|
11
|
-
osmosis_ai/providers/shared.py,sha256=dmVe8JDgafPmo6HkP-Kl0aWfffhAT6u3ElV_wLlYD34,2957
|
|
12
|
-
osmosis_ai-0.2.2.dist-info/licenses/LICENSE,sha256=FV2ZmyhdCYinoLLvU_ci-7pZ3DeNYY9XqZjVjOd3h94,1064
|
|
13
|
-
osmosis_ai-0.2.2.dist-info/METADATA,sha256=MPovk4NSQ_viTMd-zx1lp7Uo2EGB3EotjftcNddy4HU,10448
|
|
14
|
-
osmosis_ai-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
15
|
-
osmosis_ai-0.2.2.dist-info/top_level.txt,sha256=UPNRTKIBSrxsJVNxwXnLCqSoBS4bAiL_3jMtjvf5zEY,11
|
|
16
|
-
osmosis_ai-0.2.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|