scorebook 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. scorebook/__init__.py +10 -5
  2. scorebook/cli/auth.py +1 -1
  3. scorebook/dashboard/__init__.py +1 -0
  4. scorebook/dashboard/create_project.py +91 -0
  5. scorebook/{trismik → dashboard}/credentials.py +24 -9
  6. scorebook/{trismik → dashboard}/upload_results.py +1 -1
  7. scorebook/eval_datasets/__init__.py +0 -4
  8. scorebook/eval_datasets/eval_dataset.py +4 -2
  9. scorebook/evaluate/__init__.py +1 -15
  10. scorebook/evaluate/_async/evaluate_async.py +9 -8
  11. scorebook/evaluate/_sync/evaluate.py +9 -8
  12. scorebook/evaluate/evaluate_helpers.py +4 -3
  13. scorebook/inference/__init__.py +1 -11
  14. scorebook/inference/clients/__init__.py +1 -8
  15. scorebook/inference/inference_pipeline.py +1 -1
  16. scorebook/metrics/__init__.py +1 -18
  17. scorebook/metrics/metric_registry.py +2 -0
  18. scorebook/score/__init__.py +0 -5
  19. scorebook/score/_async/score_async.py +3 -2
  20. scorebook/score/_sync/score.py +3 -2
  21. scorebook/score/score_helpers.py +1 -1
  22. scorebook/types.py +1 -1
  23. scorebook/utils/__init__.py +0 -22
  24. scorebook/utils/common_helpers.py +1 -1
  25. scorebook/utils/mock_llm/__init__.py +41 -0
  26. scorebook/utils/mock_llm/data/mock_llm_data.json +21970 -0
  27. scorebook-0.0.14.dist-info/METADATA +292 -0
  28. scorebook-0.0.14.dist-info/RECORD +53 -0
  29. scorebook/trismik/__init__.py +0 -10
  30. scorebook-0.0.13.dist-info/METADATA +0 -389
  31. scorebook-0.0.13.dist-info/RECORD +0 -50
  32. {scorebook-0.0.13.dist-info → scorebook-0.0.14.dist-info}/WHEEL +0 -0
  33. {scorebook-0.0.13.dist-info → scorebook-0.0.14.dist-info}/entry_points.txt +0 -0
  34. {scorebook-0.0.13.dist-info → scorebook-0.0.14.dist-info}/licenses/LICENSE +0 -0
scorebook/__init__.py CHANGED
@@ -9,12 +9,15 @@ import importlib.metadata
9
9
  # get version from pyproject.toml
10
10
  __version__ = importlib.metadata.version(__package__ or __name__)
11
11
 
12
- from scorebook.eval_datasets import EvalDataset
13
- from scorebook.evaluate import evaluate, evaluate_async
12
+ from scorebook.dashboard.create_project import create_project, create_project_async
13
+ from scorebook.dashboard.credentials import login, logout, whoami
14
+ from scorebook.dashboard.upload_results import upload_result, upload_result_async
15
+ from scorebook.eval_datasets.eval_dataset import EvalDataset
16
+ from scorebook.evaluate._async.evaluate_async import evaluate_async
17
+ from scorebook.evaluate._sync.evaluate import evaluate
14
18
  from scorebook.inference.inference_pipeline import InferencePipeline
15
- from scorebook.score import score, score_async
16
- from scorebook.trismik.credentials import login, logout, whoami
17
- from scorebook.trismik.upload_results import upload_result, upload_result_async
19
+ from scorebook.score._async.score_async import score_async
20
+ from scorebook.score._sync.score import score
18
21
  from scorebook.utils.render_template import render_template
19
22
 
20
23
  __all__ = [
@@ -28,6 +31,8 @@ __all__ = [
28
31
  "logout",
29
32
  "whoami",
30
33
  "InferencePipeline",
34
+ "create_project",
35
+ "create_project_async",
31
36
  "upload_result",
32
37
  "upload_result_async",
33
38
  ]
scorebook/cli/auth.py CHANGED
@@ -4,7 +4,7 @@ import argparse
4
4
  import getpass
5
5
  import sys
6
6
 
7
- from scorebook.trismik.credentials import get_stored_token, get_token_path, login, logout, whoami
7
+ from scorebook.dashboard.credentials import get_stored_token, get_token_path, login, logout, whoami
8
8
 
9
9
 
10
10
  def auth_command(args: argparse.Namespace) -> int:
@@ -0,0 +1 @@
1
+ """Trismik authentication and API integration."""
@@ -0,0 +1,91 @@
1
+ """Create projects in Trismik's experimentation platform."""
2
+
3
+ import logging
4
+ from typing import Optional
5
+
6
+ from trismik.types import TrismikProject
7
+
8
+ from scorebook.evaluate.evaluate_helpers import (
9
+ create_trismik_async_client,
10
+ create_trismik_sync_client,
11
+ )
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ def create_project(
17
+ name: str,
18
+ team_id: Optional[str] = None,
19
+ description: Optional[str] = None,
20
+ ) -> TrismikProject:
21
+ """Create a new project in Trismik's experimentation platform (synchronous).
22
+
23
+ This function creates a new project that can be used to organize experiments
24
+ and evaluation runs in the Trismik platform.
25
+
26
+ Args:
27
+ name: Name of the project
28
+ team_id: Optional ID of the team to create the project in. If not provided,
29
+ the project will be created in the user's default team.
30
+ description: Optional description of the project
31
+
32
+ Returns:
33
+ TrismikProject: Created project object containing project details including
34
+ id, name, description, accountId, createdAt, and updatedAt fields
35
+
36
+ Raises:
37
+ TrismikValidationError: If the request fails validation
38
+ TrismikApiError: If the API request fails
39
+ """
40
+ # Create Trismik client
41
+ trismik_client = create_trismik_sync_client()
42
+
43
+ # Create project via Trismik API
44
+ project = trismik_client.create_project(
45
+ name=name,
46
+ team_id=team_id,
47
+ description=description,
48
+ )
49
+
50
+ logger.info(f"Project '{name}' created successfully with ID: {project.id}")
51
+
52
+ return project
53
+
54
+
55
+ async def create_project_async(
56
+ name: str,
57
+ team_id: Optional[str] = None,
58
+ description: Optional[str] = None,
59
+ ) -> TrismikProject:
60
+ """Create a new project in Trismik's experimentation platform (asynchronous).
61
+
62
+ This function creates a new project that can be used to organize experiments
63
+ and evaluation runs in the Trismik platform.
64
+
65
+ Args:
66
+ name: Name of the project
67
+ team_id: Optional ID of the team to create the project in. If not provided,
68
+ the project will be created in the user's default team.
69
+ description: Optional description of the project
70
+
71
+ Returns:
72
+ TrismikProject: Created project object containing project details including
73
+ id, name, description, accountId, createdAt, and updatedAt fields
74
+
75
+ Raises:
76
+ TrismikValidationError: If the request fails validation
77
+ TrismikApiError: If the API request fails
78
+ """
79
+ # Create Trismik async client
80
+ trismik_client = create_trismik_async_client()
81
+
82
+ # Create project via Trismik API (async)
83
+ project = await trismik_client.create_project(
84
+ name=name,
85
+ team_id=team_id,
86
+ description=description,
87
+ )
88
+
89
+ logger.info(f"Project '{name}' created successfully with ID: {project.id}")
90
+
91
+ return project
@@ -5,6 +5,10 @@ import os
5
5
  import pathlib
6
6
  from typing import Optional
7
7
 
8
+ from trismik import TrismikClient
9
+
10
+ from scorebook.settings import TRISMIK_SERVICE_URL
11
+
8
12
  logger = logging.getLogger(__name__)
9
13
 
10
14
 
@@ -66,15 +70,26 @@ def get_token() -> Optional[str]:
66
70
 
67
71
 
68
72
  def validate_token(token: str) -> bool:
69
- """Validate the token by making a test API call to trismik."""
70
- # TODO: Implement actual API validation once you have an endpoint
71
- # This would typically make a request to something like:
72
- # response = requests.get("https://api.trismik.com/whoami",
73
- # headers={"Authorization": f"Bearer {token}"})
74
- # return response.status_code == 200
75
-
76
- # For now, just check it's not empty
77
- return bool(token and token.strip())
73
+ """Validate the token by making a test API call to trismik.
74
+
75
+ Args:
76
+ token: The API token to validate.
77
+
78
+ Returns:
79
+ bool: True if the token is valid, False otherwise.
80
+ """
81
+ if not token or not token.strip():
82
+ return False
83
+
84
+ try:
85
+ # Create a client with the token and verify it works
86
+ client = TrismikClient(service_url=TRISMIK_SERVICE_URL, api_key=token)
87
+ client.me()
88
+ client.close()
89
+ return True
90
+ except Exception as e:
91
+ logger.debug(f"Token validation failed: {e}")
92
+ return False
78
93
 
79
94
 
80
95
  def login(trismik_api_key: str) -> None:
@@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
21
21
 
22
22
  # Known fields that are not metrics or hyperparameters
23
23
  KNOWN_AGGREGATE_FIELDS = {"dataset", "run_id", "run_completed"}
24
- KNOWN_ITEM_FIELDS = {"id", "dataset_name", "input", "output", "label", "run_id"}
24
+ KNOWN_ITEM_FIELDS = {"id", "dataset", "input", "output", "label", "run_id"}
25
25
 
26
26
 
27
27
  def upload_result(
@@ -1,5 +1 @@
1
1
  """Dataset utilities for scorebook."""
2
-
3
- from scorebook.eval_datasets.eval_dataset import EvalDataset
4
-
5
- __all__ = ["EvalDataset"]
@@ -18,8 +18,10 @@ from scorebook.exceptions import (
18
18
  DatasetSampleError,
19
19
  MissingFieldError,
20
20
  )
21
- from scorebook.metrics import MetricBase, MetricRegistry
22
- from scorebook.utils import render_template, validate_path
21
+ from scorebook.metrics.metric_base import MetricBase
22
+ from scorebook.metrics.metric_registry import MetricRegistry
23
+ from scorebook.utils.io_helpers import validate_path
24
+ from scorebook.utils.render_template import render_template
23
25
 
24
26
 
25
27
  class EvalDataset:
@@ -1,15 +1 @@
1
- """
2
- Evaluation module for Scorebook.
3
-
4
- This module provides both synchronous and asynchronous evaluation functions.
5
- The async version serves as the source of truth, with the sync version
6
- automatically generated using unasync.
7
- """
8
-
9
- # Import from async module
10
- from ._async.evaluate_async import evaluate_async
11
-
12
- # Import from generated sync module
13
- from ._sync.evaluate import evaluate
14
-
15
- __all__ = ["evaluate", "evaluate_async"]
1
+ """Evaluation module for Scorebook."""
@@ -6,7 +6,7 @@ from trismik import TrismikAsyncClient, TrismikClient
6
6
  from trismik.settings import evaluation_settings
7
7
  from trismik.types import TrismikRunMetadata
8
8
 
9
- from scorebook.eval_datasets import EvalDataset
9
+ from scorebook.eval_datasets.eval_dataset import EvalDataset
10
10
  from scorebook.evaluate.evaluate_helpers import (
11
11
  build_eval_run_specs,
12
12
  create_trismik_async_client,
@@ -28,12 +28,9 @@ from scorebook.types import (
28
28
  EvalResult,
29
29
  EvalRunSpec,
30
30
  )
31
- from scorebook.utils import (
32
- async_nullcontext,
33
- evaluation_progress_context,
34
- resolve_show_progress,
35
- resolve_upload_results,
36
- )
31
+ from scorebook.utils.async_utils import async_nullcontext
32
+ from scorebook.utils.common_helpers import resolve_show_progress, resolve_upload_results
33
+ from scorebook.utils.progress_bars import evaluation_progress_context
37
34
 
38
35
  logger = logging.getLogger(__name__)
39
36
 
@@ -392,6 +389,10 @@ async def run_adaptive_evaluation(
392
389
  available_splits=available_splits,
393
390
  )
394
391
 
392
+ # Create inference function with bound hyperparameters
393
+ async def inference_with_hyperparams(items: Any) -> Any:
394
+ return await inference(items, **adaptive_run_spec.hyperparameter_config)
395
+
395
396
  trismik_results = await trismik_client.run(
396
397
  test_id=adaptive_run_spec.dataset,
397
398
  split=resolved_split,
@@ -402,7 +403,7 @@ async def run_adaptive_evaluation(
402
403
  test_configuration={},
403
404
  inference_setup={},
404
405
  ),
405
- item_processor=make_trismik_inference(inference),
406
+ item_processor=make_trismik_inference(inference_with_hyperparams),
406
407
  return_dict=False,
407
408
  )
408
409
 
@@ -5,7 +5,7 @@ from trismik import TrismikAsyncClient, TrismikClient
5
5
  from trismik.settings import evaluation_settings
6
6
  from trismik.types import TrismikRunMetadata
7
7
 
8
- from scorebook.eval_datasets import EvalDataset
8
+ from scorebook.eval_datasets.eval_dataset import EvalDataset
9
9
  from scorebook.evaluate.evaluate_helpers import (
10
10
  build_eval_run_specs,
11
11
  create_trismik_sync_client,
@@ -27,12 +27,9 @@ from scorebook.types import (
27
27
  EvalResult,
28
28
  EvalRunSpec,
29
29
  )
30
- from scorebook.utils import (
31
- nullcontext,
32
- evaluation_progress_context,
33
- resolve_show_progress,
34
- resolve_upload_results,
35
- )
30
+ from contextlib import nullcontext
31
+ from scorebook.utils.common_helpers import resolve_show_progress, resolve_upload_results
32
+ from scorebook.utils.progress_bars import evaluation_progress_context
36
33
 
37
34
  logger = logging.getLogger(__name__)
38
35
 
@@ -391,6 +388,10 @@ def run_adaptive_evaluation(
391
388
  available_splits=available_splits,
392
389
  )
393
390
 
391
+ # Create inference function with bound hyperparameters
392
+ def inference_with_hyperparams(items: Any) -> Any:
393
+ return inference(items, **adaptive_run_spec.hyperparameter_config)
394
+
394
395
  trismik_results = trismik_client.run(
395
396
  test_id=adaptive_run_spec.dataset,
396
397
  split=resolved_split,
@@ -401,7 +402,7 @@ def run_adaptive_evaluation(
401
402
  test_configuration={},
402
403
  inference_setup={},
403
404
  ),
404
- item_processor=make_trismik_inference(inference),
405
+ item_processor=make_trismik_inference(inference_with_hyperparams),
405
406
  return_dict=False,
406
407
  )
407
408
 
@@ -9,7 +9,8 @@ from trismik._async.client import TrismikAsyncClient
9
9
  from trismik._sync.client import TrismikClient
10
10
  from trismik.types import TrismikMultipleChoiceTextItem
11
11
 
12
- from scorebook import EvalDataset
12
+ from scorebook.dashboard.credentials import get_token
13
+ from scorebook.eval_datasets.eval_dataset import EvalDataset
13
14
  from scorebook.exceptions import (
14
15
  DataMismatchError,
15
16
  MetricComputationError,
@@ -17,9 +18,9 @@ from scorebook.exceptions import (
17
18
  ScoreBookError,
18
19
  )
19
20
  from scorebook.settings import TRISMIK_SERVICE_URL
20
- from scorebook.trismik.credentials import get_token
21
21
  from scorebook.types import AdaptiveEvalDataset, AdaptiveEvalRunSpec, EvalResult, EvalRunSpec
22
- from scorebook.utils import expand_dict, is_awaitable
22
+ from scorebook.utils.async_utils import is_awaitable
23
+ from scorebook.utils.transform_helpers import expand_dict
23
24
 
24
25
  logger = logging.getLogger(__name__)
25
26
 
@@ -1,11 +1 @@
1
- """
2
- Inference module for model execution and predictions.
3
-
4
- This module provides functionality for running inference with various models
5
- and processing their responses. It includes utilities for both single and
6
- batch inference operations.
7
- """
8
-
9
- from scorebook.inference.inference_pipeline import InferencePipeline
10
-
11
- __all__ = ["InferencePipeline"]
1
+ """Inference module for model execution and predictions."""
@@ -1,8 +1 @@
1
- """
2
- Inference clients for various LLM providers.
3
-
4
- This module provides client implementations for different LLM providers including
5
- OpenAI, AWS Bedrock, Google Vertex AI, and Portkey.
6
- """
7
-
8
- __all__ = ["bedrock", "openai", "portkey", "vertex"]
1
+ """Inference clients for various LLM providers."""
@@ -9,7 +9,7 @@ configurable way.
9
9
  import asyncio
10
10
  from typing import Any, Callable, Dict, List, Optional, cast
11
11
 
12
- from scorebook.utils import is_awaitable
12
+ from scorebook.utils.async_utils import is_awaitable
13
13
 
14
14
 
15
15
  class InferencePipeline:
@@ -1,18 +1 @@
1
- """
2
- Metrics for evaluating model predictions.
3
-
4
- This module provides a collection of evaluation metrics for comparing model outputs
5
- against ground truth labels. Available metrics include standard classification and
6
- generation metrics like accuracy, precision, recall, F1-score, etc.
7
-
8
- Metrics can be accessed by name through the `get_metrics()` function or used
9
- directly by instantiating specific metric classes. All metrics implement a
10
- common interface for scoring predictions against references.
11
- """
12
-
13
- from scorebook.metrics.accuracy import Accuracy
14
- from scorebook.metrics.metric_base import MetricBase
15
- from scorebook.metrics.metric_registry import MetricRegistry
16
- from scorebook.metrics.precision import Precision
17
-
18
- __all__ = ["MetricBase", "Precision", "Accuracy", "MetricRegistry"]
1
+ """Metrics for evaluating model predictions."""
@@ -85,8 +85,10 @@ class MetricRegistry:
85
85
  # If input is a string, look up the class in the registry
86
86
  if isinstance(name_or_class, str):
87
87
  key = name_or_class.lower()
88
+
88
89
  if key not in cls._registry:
89
90
  raise ValueError(f"Metric '{name_or_class}' not registered.")
91
+
90
92
  return cls._registry[key](**kwargs)
91
93
 
92
94
  raise ValueError(
@@ -1,6 +1 @@
1
1
  """Score module for computing metrics on pre-computed outputs."""
2
-
3
- from scorebook.score._async.score_async import score_async
4
- from scorebook.score._sync.score import score
5
-
6
- __all__ = ["score", "score_async"]
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  from typing import Any, Dict, List, Literal, Optional, Union, cast
3
3
 
4
+ from scorebook.dashboard.upload_results import upload_result_async
4
5
  from scorebook.exceptions import DataMismatchError, ParameterValidationError
5
6
  from scorebook.score.score_helpers import (
6
7
  calculate_metric_scores_async,
@@ -8,9 +9,9 @@ from scorebook.score.score_helpers import (
8
9
  resolve_metrics,
9
10
  validate_items,
10
11
  )
11
- from scorebook.trismik.upload_results import upload_result_async
12
12
  from scorebook.types import Metrics
13
- from scorebook.utils import resolve_show_progress, resolve_upload_results, scoring_progress_context
13
+ from scorebook.utils.common_helpers import resolve_show_progress, resolve_upload_results
14
+ from scorebook.utils.progress_bars import scoring_progress_context
14
15
 
15
16
  logger = logging.getLogger(__name__)
16
17
 
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  from typing import Any, Dict, List, Literal, Optional, Union, cast
3
3
 
4
+ from scorebook.dashboard.upload_results import upload_result
4
5
  from scorebook.exceptions import DataMismatchError, ParameterValidationError
5
6
  from scorebook.score.score_helpers import (
6
7
  calculate_metric_scores,
@@ -8,9 +9,9 @@ from scorebook.score.score_helpers import (
8
9
  resolve_metrics,
9
10
  validate_items,
10
11
  )
11
- from scorebook.trismik.upload_results import upload_result
12
12
  from scorebook.types import Metrics
13
- from scorebook.utils import resolve_show_progress, resolve_upload_results, scoring_progress_context
13
+ from scorebook.utils.common_helpers import resolve_show_progress, resolve_upload_results
14
+ from scorebook.utils.progress_bars import scoring_progress_context
14
15
 
15
16
  logger = logging.getLogger(__name__)
16
17
 
@@ -7,7 +7,7 @@ from scorebook.exceptions import DataMismatchError, ParameterValidationError
7
7
  from scorebook.metrics.metric_base import MetricBase
8
8
  from scorebook.metrics.metric_registry import MetricRegistry
9
9
  from scorebook.types import MetricScore
10
- from scorebook.utils import is_awaitable
10
+ from scorebook.utils.async_utils import is_awaitable
11
11
 
12
12
  logger = logging.getLogger(__name__)
13
13
 
scorebook/types.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from dataclasses import dataclass
4
4
  from typing import Any, Dict, List, Optional, Sequence, Type, Union
5
5
 
6
- from scorebook.eval_datasets import EvalDataset
6
+ from scorebook.eval_datasets.eval_dataset import EvalDataset
7
7
  from scorebook.metrics.metric_base import MetricBase
8
8
 
9
9
  # Type alias for metrics parameter
@@ -1,23 +1 @@
1
1
  """Utility functions and common helpers for the Scorebook framework."""
2
-
3
- from contextlib import nullcontext
4
-
5
- from scorebook.utils.async_utils import async_nullcontext, is_awaitable
6
- from scorebook.utils.common_helpers import resolve_show_progress, resolve_upload_results
7
- from scorebook.utils.io_helpers import validate_path
8
- from scorebook.utils.progress_bars import evaluation_progress_context, scoring_progress_context
9
- from scorebook.utils.render_template import render_template
10
- from scorebook.utils.transform_helpers import expand_dict
11
-
12
- __all__ = [
13
- "async_nullcontext",
14
- "nullcontext",
15
- "is_awaitable",
16
- "resolve_show_progress",
17
- "resolve_upload_results",
18
- "validate_path",
19
- "expand_dict",
20
- "evaluation_progress_context",
21
- "scoring_progress_context",
22
- "render_template",
23
- ]
@@ -17,7 +17,7 @@ def resolve_upload_results(upload_results: Union[Literal["auto"], bool]) -> bool
17
17
  bool: Whether to upload results to Trismik
18
18
  """
19
19
  if upload_results == "auto":
20
- from scorebook.trismik.credentials import get_token
20
+ from scorebook.dashboard.credentials import get_token
21
21
 
22
22
  upload_results = get_token() is not None
23
23
  logger.debug("Auto upload results resolved to: %s", upload_results)
@@ -0,0 +1,41 @@
1
+ """Mock LLM utilities for testing and demonstrations."""
2
+
3
+ import json
4
+ import random
5
+ from pathlib import Path
6
+ from typing import Any, List
7
+
8
+ # Load the mock data once at module initialization
9
+ _DATA_PATH = Path(__file__).parent / "data" / "mock_llm_data.json"
10
+ with open(_DATA_PATH, "r", encoding="utf-8") as f:
11
+ _MOCK_DATA = json.load(f)
12
+
13
+
14
+ def mock_llm(inputs: List[Any], **hyperparameters: Any) -> List[str]:
15
+ """Mock LLM that returns answers based on pre-recorded accuracy data."""
16
+
17
+ results = []
18
+ all_choices = ["A", "B", "C", "D", "E"]
19
+
20
+ for item in inputs:
21
+ item_id = item["id"]
22
+
23
+ # Look up the item in our mock data
24
+ if item_id not in _MOCK_DATA:
25
+ # If item not found, return random answer
26
+ results.append(random.choice(all_choices))
27
+ continue
28
+
29
+ item_data = _MOCK_DATA[item_id]
30
+ correct_answer = item_data["answer"]
31
+ was_accurate = item_data["accuracy"]
32
+
33
+ if was_accurate:
34
+ # Return the correct answer
35
+ results.append(correct_answer)
36
+ else:
37
+ # Return a random incorrect answer
38
+ incorrect_choices = [choice for choice in all_choices if choice != correct_answer]
39
+ results.append(random.choice(incorrect_choices))
40
+
41
+ return results