rakam-eval-sdk 0.2.4rc3__tar.gz → 0.2.4rc5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rakam-eval-sdk
3
- Version: 0.2.4rc3
3
+ Version: 0.2.4rc5
4
4
  Summary: Evaluation Framework SDK
5
5
  Author: Mohamed Bachar Touil
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "uv_build"
4
4
 
5
5
  [project]
6
6
  name = "rakam-eval-sdk"
7
- version = "0.2.4rc3"
7
+ version = "0.2.4rc5"
8
8
  description = "Evaluation Framework SDK"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -6,7 +6,7 @@ import uuid
6
6
  from datetime import datetime
7
7
  from pathlib import Path
8
8
  from pprint import pprint
9
- from typing import Any, Dict, List, Optional, Tuple, Union
9
+ from typing import Any, Dict, List, Optional, Tuple, Union, Set
10
10
 
11
11
  import typer
12
12
  from dotenv import load_dotenv
@@ -37,14 +37,14 @@ metrics_app = typer.Typer(help="Metrics utilities")
37
37
  app.add_typer(metrics_app, name="metrics")
38
38
 
39
39
 
40
- def extract_metric_names(config: Any) -> list[tuple[str, Optional[str]]]:
40
+ def extract_metric_names(config: Any) -> List[Tuple[str, Optional[str]]]:
41
41
  """
42
42
  Returns [(type, name)] from EvalConfig / SchemaEvalConfig
43
43
  """
44
44
  if not hasattr(config, "metrics"):
45
45
  return []
46
46
 
47
- results: list[tuple[str, Optional[str]]] = []
47
+ results: List[Tuple[str, Optional[str]]] = []
48
48
 
49
49
  for metric in config.metrics or []:
50
50
  metric_type = getattr(metric, "type", None)
@@ -77,7 +77,7 @@ def metrics(
77
77
  files = directory.rglob("*.py") if recursive else directory.glob("*.py")
78
78
  TARGET_DECORATOR = eval_run.__name__
79
79
 
80
- all_metrics: set[tuple[str, Optional[str]]] = set()
80
+ all_metrics: Set[Tuple[str, Optional[str]]] = set()
81
81
  found_any = False
82
82
 
83
83
  for file in sorted(files):
@@ -537,7 +537,7 @@ def compare(
537
537
  "--tag",
538
538
  help="Label identifying a reference testcase",
539
539
  ),
540
- run: list[int] = typer.Option(
540
+ run: List[int] = typer.Option(
541
541
  [],
542
542
  "--run",
543
543
  help="Run ID identifying an evaluation testcase",
@@ -113,7 +113,6 @@ class TextInputItem(InputItem):
113
113
 
114
114
  class SchemaInputItem(InputItem):
115
115
  expected_output: Optional[str] = None
116
- # retrieval_context: list[Json[Any]] = None
117
116
 
118
117
 
119
118
  class EvalConfig(BaseModel):