levelapp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of levelapp might be problematic. Click here for more details.
- levelapp/__init__.py +0 -0
- levelapp/aspects/__init__.py +8 -0
- levelapp/aspects/loader.py +253 -0
- levelapp/aspects/logger.py +59 -0
- levelapp/aspects/monitor.py +614 -0
- levelapp/aspects/sanitizer.py +168 -0
- levelapp/clients/__init__.py +119 -0
- levelapp/clients/anthropic.py +112 -0
- levelapp/clients/ionos.py +116 -0
- levelapp/clients/mistral.py +106 -0
- levelapp/clients/openai.py +102 -0
- levelapp/comparator/__init__.py +5 -0
- levelapp/comparator/comparator.py +232 -0
- levelapp/comparator/extractor.py +108 -0
- levelapp/comparator/schemas.py +61 -0
- levelapp/comparator/scorer.py +271 -0
- levelapp/comparator/utils.py +136 -0
- levelapp/config/__init__.py +5 -0
- levelapp/config/endpoint.py +190 -0
- levelapp/config/prompts.py +35 -0
- levelapp/core/__init__.py +0 -0
- levelapp/core/base.py +386 -0
- levelapp/core/session.py +214 -0
- levelapp/evaluator/__init__.py +3 -0
- levelapp/evaluator/evaluator.py +265 -0
- levelapp/metrics/__init__.py +67 -0
- levelapp/metrics/embedding.py +2 -0
- levelapp/metrics/exact.py +182 -0
- levelapp/metrics/fuzzy.py +80 -0
- levelapp/metrics/token.py +103 -0
- levelapp/plugins/__init__.py +0 -0
- levelapp/repository/__init__.py +3 -0
- levelapp/repository/firestore.py +282 -0
- levelapp/simulator/__init__.py +3 -0
- levelapp/simulator/schemas.py +89 -0
- levelapp/simulator/simulator.py +441 -0
- levelapp/simulator/utils.py +201 -0
- levelapp/workflow/__init__.py +5 -0
- levelapp/workflow/base.py +113 -0
- levelapp/workflow/factory.py +51 -0
- levelapp/workflow/registration.py +6 -0
- levelapp/workflow/schemas.py +121 -0
- levelapp-0.1.0.dist-info/METADATA +254 -0
- levelapp-0.1.0.dist-info/RECORD +46 -0
- levelapp-0.1.0.dist-info/WHEEL +4 -0
- levelapp-0.1.0.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from pydantic import ValidationError
|
|
5
|
+
from functools import partial
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from levelapp.core.base import BaseProcess
|
|
10
|
+
from levelapp.simulator.schemas import ScriptsBatch
|
|
11
|
+
from levelapp.simulator.simulator import ConversationSimulator
|
|
12
|
+
from levelapp.aspects.loader import DataLoader
|
|
13
|
+
from levelapp.workflow.schemas import WorkflowContext
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseWorkflow(ABC):
|
|
17
|
+
"""Abstract base class for evaluation workflows."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, name: str, context: WorkflowContext) -> None:
|
|
20
|
+
self.name = name
|
|
21
|
+
self.context = context
|
|
22
|
+
self.process: BaseProcess | None = None
|
|
23
|
+
self._input_data: Any | None = None
|
|
24
|
+
self._results: Any | None = None
|
|
25
|
+
self._initialized: bool = False
|
|
26
|
+
|
|
27
|
+
def setup(self) -> None:
|
|
28
|
+
"""Validate and initialize workflow-specific settings."""
|
|
29
|
+
if self._initialized:
|
|
30
|
+
return
|
|
31
|
+
self.process = self._setup_process(context=self.context)
|
|
32
|
+
self._initialized = True
|
|
33
|
+
|
|
34
|
+
def load_data(self) -> None:
|
|
35
|
+
"""Load and preprocess input data."""
|
|
36
|
+
if not self._initialized:
|
|
37
|
+
raise RuntimeError(f"[{self.name}] Workflow not initialized. Call setup() first.")
|
|
38
|
+
self._input_data = self._load_input_data(context=self.context)
|
|
39
|
+
|
|
40
|
+
def execute(self) -> None:
|
|
41
|
+
"""Run the workflow evaluation steps."""
|
|
42
|
+
if not self._input_data:
|
|
43
|
+
raise RuntimeError(f"[{self.name}] No reference data available.")
|
|
44
|
+
|
|
45
|
+
if asyncio.iscoroutinefunction(self.process.run):
|
|
46
|
+
self._results = asyncio.run(self.process.run(**self._input_data))
|
|
47
|
+
else:
|
|
48
|
+
self._results = self.process.run(**self._input_data)
|
|
49
|
+
|
|
50
|
+
async def aexecute(self) -> None:
|
|
51
|
+
if not self._input_data:
|
|
52
|
+
raise RuntimeError(f"[{self.name}] No reference data available.")
|
|
53
|
+
|
|
54
|
+
if asyncio.iscoroutinefunction(self.process.run):
|
|
55
|
+
self._results = await self.process.run(**self._input_data)
|
|
56
|
+
else:
|
|
57
|
+
loop = asyncio.get_running_loop()
|
|
58
|
+
func = partial(self.process.run, **self._input_data)
|
|
59
|
+
self._results = await loop.run_in_executor(None, func)
|
|
60
|
+
|
|
61
|
+
def collect_results(self) -> Any:
|
|
62
|
+
"""Return unified results structure."""
|
|
63
|
+
return self._results
|
|
64
|
+
|
|
65
|
+
@abstractmethod
|
|
66
|
+
def _setup_process(self, context: WorkflowContext) -> BaseProcess:
|
|
67
|
+
raise NotImplementedError
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
def _load_input_data(self, context: WorkflowContext) -> Any:
|
|
71
|
+
raise NotImplementedError
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class SimulatorWorkflow(BaseWorkflow):
|
|
75
|
+
def __init__(self, context: WorkflowContext) -> None:
|
|
76
|
+
super().__init__(name="ConversationSimulator", context=context)
|
|
77
|
+
|
|
78
|
+
def _setup_process(self, context: WorkflowContext) -> BaseProcess:
|
|
79
|
+
simulator = ConversationSimulator()
|
|
80
|
+
simulator.setup(
|
|
81
|
+
repository=context.repository,
|
|
82
|
+
evaluators=context.evaluators,
|
|
83
|
+
endpoint_config=context.endpoint_config,
|
|
84
|
+
)
|
|
85
|
+
return simulator
|
|
86
|
+
|
|
87
|
+
def _load_input_data(self, context: WorkflowContext) -> Any:
|
|
88
|
+
loader = DataLoader()
|
|
89
|
+
reference_data_path = context.inputs.get("reference_data_path", "no-path-provided")
|
|
90
|
+
file_path = Path(reference_data_path)
|
|
91
|
+
|
|
92
|
+
if not file_path.exists():
|
|
93
|
+
raise FileNotFoundError(f"[{self.name}] Reference data file not found.")
|
|
94
|
+
|
|
95
|
+
evaluation_params = context.inputs.get("evaluation_params", {})
|
|
96
|
+
data_config = loader.load_raw_data(path=reference_data_path)
|
|
97
|
+
try:
|
|
98
|
+
scripts_batch = ScriptsBatch.model_validate(data_config)
|
|
99
|
+
except ValidationError as e:
|
|
100
|
+
raise RuntimeError(f"[{self.name}] Validation error: {e}")
|
|
101
|
+
|
|
102
|
+
return {"test_batch": scripts_batch, "attempts": evaluation_params.get("attempts", 1)}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class ComparatorWorkflow(BaseWorkflow):
|
|
106
|
+
def _setup_process(self, context: WorkflowContext) -> BaseProcess:
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
def _load_input_data(self, context: WorkflowContext) -> Any:
|
|
110
|
+
pass
|
|
111
|
+
|
|
112
|
+
def __init__(self, context: WorkflowContext) -> None:
|
|
113
|
+
super().__init__(name="MetadataComparator", context=context)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from typing import Callable, Dict
|
|
2
|
+
from levelapp.workflow.schemas import WorkflowType, RepositoryType, EvaluatorType, WorkflowConfig, WorkflowContext
|
|
3
|
+
from levelapp.core.base import BaseRepository, BaseEvaluator
|
|
4
|
+
from levelapp.workflow.base import BaseWorkflow
|
|
5
|
+
|
|
6
|
+
from levelapp.repository.firestore import FirestoreRepository
|
|
7
|
+
from levelapp.evaluator.evaluator import JudgeEvaluator, MetadataEvaluator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MainFactory:
|
|
11
|
+
"""Central factory for repositories, evaluators, and workflows."""
|
|
12
|
+
|
|
13
|
+
_repository_map: dict[RepositoryType, Callable[[WorkflowConfig], BaseRepository]] = {
|
|
14
|
+
RepositoryType.FIRESTORE: lambda cfg: FirestoreRepository(),
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
_evaluator_map: dict[EvaluatorType, Callable[[WorkflowConfig], BaseEvaluator]] = {
|
|
18
|
+
EvaluatorType.JUDGE: lambda cfg: JudgeEvaluator(),
|
|
19
|
+
EvaluatorType.REFERENCE: lambda cfg: MetadataEvaluator(),
|
|
20
|
+
# Next is the RAG evaluator..
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
_workflow_map: dict[WorkflowType, Callable[["WorkflowContext"], BaseWorkflow]] = {}
|
|
24
|
+
|
|
25
|
+
@classmethod
|
|
26
|
+
def create_repository(cls, config: WorkflowConfig) -> BaseRepository:
|
|
27
|
+
fn = cls._repository_map.get(config.repository)
|
|
28
|
+
if not fn:
|
|
29
|
+
raise NotImplementedError(f"Repository {config.repository} not implemented")
|
|
30
|
+
return fn(config)
|
|
31
|
+
|
|
32
|
+
@classmethod
|
|
33
|
+
def create_evaluator(cls, config: WorkflowConfig) -> Dict[EvaluatorType, BaseEvaluator]:
|
|
34
|
+
evaluators: dict[EvaluatorType, BaseEvaluator] = {}
|
|
35
|
+
for ev in config.evaluators:
|
|
36
|
+
fn = cls._evaluator_map.get(ev)
|
|
37
|
+
if not fn:
|
|
38
|
+
raise NotImplementedError(f"Evaluator {config.evaluators} not implemented")
|
|
39
|
+
evaluators[ev] = fn(config)
|
|
40
|
+
return evaluators
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def create_workflow(cls, wf_type: WorkflowType, context: "WorkflowContext") -> BaseWorkflow:
|
|
44
|
+
fn = cls._workflow_map.get(wf_type)
|
|
45
|
+
if not fn:
|
|
46
|
+
raise NotImplementedError(f"Workflow {wf_type} not implemented")
|
|
47
|
+
return fn(context)
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def register_workflow(cls, wf_type: WorkflowType, builder: Callable[["WorkflowContext"], BaseWorkflow]) -> None:
|
|
51
|
+
cls._workflow_map[wf_type] = builder
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
from levelapp.workflow.factory import MainFactory
|
|
2
|
+
from levelapp.workflow.schemas import WorkflowType
|
|
3
|
+
from levelapp.workflow.base import SimulatorWorkflow, ComparatorWorkflow
|
|
4
|
+
|
|
5
|
+
MainFactory.register_workflow(WorkflowType.SIMULATOR, lambda ctx: SimulatorWorkflow(ctx))
|
|
6
|
+
MainFactory.register_workflow(WorkflowType.COMPARATOR, lambda ctx: ComparatorWorkflow(ctx))
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from levelapp.config.endpoint import EndpointConfig
|
|
7
|
+
from levelapp.core.base import BaseRepository, BaseEvaluator
|
|
8
|
+
from levelapp.aspects import DataLoader
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ExtendedEnum(Enum):
|
|
12
|
+
@classmethod
|
|
13
|
+
def list(cls):
|
|
14
|
+
return [e.value for e in cls]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class WorkflowType(ExtendedEnum):
|
|
18
|
+
SIMULATOR = "SIMULATOR"
|
|
19
|
+
COMPARATOR = "COMPARATOR"
|
|
20
|
+
ASSESSOR = "ASSESSOR"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class RepositoryType(ExtendedEnum):
|
|
24
|
+
FIRESTORE = "FIRESTORE"
|
|
25
|
+
FILESYSTEM = "FILESYSTEM"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class EvaluatorType(ExtendedEnum):
|
|
29
|
+
JUDGE = "JUDGE"
|
|
30
|
+
REFERENCE = "REFERENCE"
|
|
31
|
+
RAG = "RAG"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class WorkflowConfig:
|
|
35
|
+
"""Configuration for a workflow, loaded from JSON/YAML via DataLoader."""
|
|
36
|
+
|
|
37
|
+
# Class-level constant
|
|
38
|
+
_fields_list: List[str] = [
|
|
39
|
+
"project_name",
|
|
40
|
+
"evaluation_params",
|
|
41
|
+
"workflow",
|
|
42
|
+
"repository",
|
|
43
|
+
"evaluators",
|
|
44
|
+
"reference_data",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
workflow: WorkflowType,
|
|
50
|
+
repository: RepositoryType,
|
|
51
|
+
evaluators: List[EvaluatorType],
|
|
52
|
+
endpoint_config: EndpointConfig,
|
|
53
|
+
inputs: Dict[str, Any],
|
|
54
|
+
):
|
|
55
|
+
self.workflow = workflow
|
|
56
|
+
self.repository = repository
|
|
57
|
+
self.evaluators = evaluators
|
|
58
|
+
self.endpoint_config = endpoint_config
|
|
59
|
+
self.inputs = inputs
|
|
60
|
+
|
|
61
|
+
@classmethod
|
|
62
|
+
def load(cls, path: str | None = None) -> "WorkflowConfig":
|
|
63
|
+
"""Load and validate workflow configuration from a file."""
|
|
64
|
+
loader = DataLoader()
|
|
65
|
+
config_dict = loader.load_raw_data(path=path)
|
|
66
|
+
model_config: BaseModel = loader.create_dynamic_model(data=config_dict, model_name="WorkflowConfiguration")
|
|
67
|
+
|
|
68
|
+
cls._check_fields(model_config)
|
|
69
|
+
cls._check_values(model_config)
|
|
70
|
+
|
|
71
|
+
workflow = WorkflowType(model_config.workflow)
|
|
72
|
+
repository = RepositoryType(model_config.repository)
|
|
73
|
+
|
|
74
|
+
if isinstance(model_config.evaluators, str):
|
|
75
|
+
print(f"evaluators: {model_config.evaluators}")
|
|
76
|
+
evaluators = [EvaluatorType(model_config.evaluators)]
|
|
77
|
+
else:
|
|
78
|
+
evaluators = [EvaluatorType(e) for e in model_config.evaluators]
|
|
79
|
+
|
|
80
|
+
evaluation_params = model_config.evaluation_params.model_dump()
|
|
81
|
+
reference_data_path = getattr(model_config.reference_data, "path", None)
|
|
82
|
+
endpoint_config = EndpointConfig.model_validate(model_config.endpoint_configuration.model_dump())
|
|
83
|
+
|
|
84
|
+
return cls(
|
|
85
|
+
workflow=workflow,
|
|
86
|
+
repository=repository,
|
|
87
|
+
evaluators=evaluators,
|
|
88
|
+
endpoint_config=endpoint_config,
|
|
89
|
+
inputs={'reference_data_path': reference_data_path, 'evaluation_params': evaluation_params},
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
@classmethod
|
|
93
|
+
def _check_fields(cls, config: BaseModel) -> None:
|
|
94
|
+
for field_name in cls._fields_list:
|
|
95
|
+
if field_name not in config.model_fields:
|
|
96
|
+
raise ValueError(f"[WorkflowConfig] Field '{field_name}' missing in configuration")
|
|
97
|
+
|
|
98
|
+
@staticmethod
|
|
99
|
+
def _check_values(config: BaseModel) -> None:
|
|
100
|
+
if config.workflow not in WorkflowType.list():
|
|
101
|
+
raise ValueError(f"[WorkflowConfig] Unsupported workflow type '{config.workflow}'")
|
|
102
|
+
if config.repository not in RepositoryType.list():
|
|
103
|
+
raise ValueError(f"[WorkflowConfig] Unsupported repository type '{config.repository}'")
|
|
104
|
+
|
|
105
|
+
evals = config.evaluators
|
|
106
|
+
if isinstance(evals, str):
|
|
107
|
+
evals = [evals]
|
|
108
|
+
|
|
109
|
+
for e in evals:
|
|
110
|
+
if e not in EvaluatorType.list():
|
|
111
|
+
raise ValueError(f"[WorkflowConfig] Unsupported evaluator type '{config.evaluators}'")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@dataclass(frozen=True)
|
|
115
|
+
class WorkflowContext:
|
|
116
|
+
"""Immutable data holder for workflow execution context."""
|
|
117
|
+
config: WorkflowConfig
|
|
118
|
+
repository: BaseRepository
|
|
119
|
+
evaluators: Dict[str, BaseEvaluator]
|
|
120
|
+
endpoint_config: EndpointConfig
|
|
121
|
+
inputs: Dict[str, Any]
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: levelapp
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: LevelApp is an evaluation framework for AI/LLM-based software application. [Powered by Norma]
|
|
5
|
+
Project-URL: Homepage, https://github.com/levelapp-org
|
|
6
|
+
Project-URL: Repository, https://github.com/levelapp-org/levelapp-framework
|
|
7
|
+
Project-URL: Documentation, https://levelapp.readthedocs.io
|
|
8
|
+
Project-URL: Issues, https://github.com/levelapp-org/levelapp-framework/issues
|
|
9
|
+
Author-email: KadriSof <kadrisofyen@gmail.com>
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: ai,evaluation,framework,llm,testing
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Classifier: Topic :: Software Development :: Testing
|
|
19
|
+
Requires-Python: >=3.12
|
|
20
|
+
Requires-Dist: arrow>=1.3.0
|
|
21
|
+
Requires-Dist: httpx>=0.28.1
|
|
22
|
+
Requires-Dist: numpy>=2.3.2
|
|
23
|
+
Requires-Dist: openai>=1.99.9
|
|
24
|
+
Requires-Dist: pandas-stubs==2.3.0.250703
|
|
25
|
+
Requires-Dist: pandas>=2.3.1
|
|
26
|
+
Requires-Dist: pydantic>=2.11.7
|
|
27
|
+
Requires-Dist: python-dotenv>=1.1.1
|
|
28
|
+
Requires-Dist: pyyaml>=6.0.2
|
|
29
|
+
Requires-Dist: rapid>=0.0.3
|
|
30
|
+
Requires-Dist: rapidfuzz>=3.13.0
|
|
31
|
+
Requires-Dist: requests>=2.32.4
|
|
32
|
+
Requires-Dist: tenacity>=9.1.2
|
|
33
|
+
Provides-Extra: dev
|
|
34
|
+
Requires-Dist: arrow>=1.3.0; extra == 'dev'
|
|
35
|
+
Requires-Dist: httpx>=0.28.1; extra == 'dev'
|
|
36
|
+
Requires-Dist: numpy>=2.3.2; extra == 'dev'
|
|
37
|
+
Requires-Dist: openai>=1.99.9; extra == 'dev'
|
|
38
|
+
Requires-Dist: pandas-stubs==2.3.0.250703; extra == 'dev'
|
|
39
|
+
Requires-Dist: pandas>=2.3.1; extra == 'dev'
|
|
40
|
+
Requires-Dist: pydantic>=2.11.7; extra == 'dev'
|
|
41
|
+
Requires-Dist: python-dotenv>=1.1.1; extra == 'dev'
|
|
42
|
+
Requires-Dist: pyyaml>=6.0.2; extra == 'dev'
|
|
43
|
+
Requires-Dist: rapid>=0.0.3; extra == 'dev'
|
|
44
|
+
Requires-Dist: rapidfuzz>=3.13.0; extra == 'dev'
|
|
45
|
+
Requires-Dist: requests>=2.32.4; extra == 'dev'
|
|
46
|
+
Requires-Dist: tenacity>=9.1.2; extra == 'dev'
|
|
47
|
+
Description-Content-Type: text/markdown
|
|
48
|
+
|
|
49
|
+
# LevelApp: AI/LLM Evaluation Framework for Regression Testing
|
|
50
|
+
|
|
51
|
+
[](https://badge.fury.io/py/levelapp)
|
|
52
|
+
[](https://opensource.org/licenses/MIT)
|
|
53
|
+
[](https://www.python.org/downloads/)
|
|
54
|
+
|
|
55
|
+
## Overview
|
|
56
|
+
|
|
57
|
+
LevelApp is an evaluation framework designed for regression testing (black-box) of already built LLM-based systems in production or testing phases. It focuses on assessing the performance and reliability of AI/LLM applications through simulation and comparison modules. Powered by Norma.
|
|
58
|
+
|
|
59
|
+
Key benefits:
|
|
60
|
+
- Configuration-driven: Minimal coding required; define evaluations via YAML files.
|
|
61
|
+
- Supports LLM-as-a-judge for qualitative assessments and quantitative metrics for metadata evaluation.
|
|
62
|
+
- Modular architecture for easy extension to new workflows, evaluators, and repositories.
|
|
63
|
+
|
|
64
|
+
## Features
|
|
65
|
+
|
|
66
|
+
- **Simulator Module**: Evaluates dialogue systems by simulating conversations using predefined scripts. It uses an LLM as a judge to score replies against references and supports metrics (e.g., Exact, Embedded, Token-based, Fuzzy) for comparing extracted metadata to ground truth.
|
|
67
|
+
- **Comparator Module**: Evaluates metadata extraction from JSON outputs (e.g., from legal/financial document processing with LLMs) by comparing against reference/ground-truth data.
|
|
68
|
+
- **Configuration-Based Workflow**: Users provide YAML configs for endpoints, parameters, data sources, and metrics, reducing the need for custom code.
|
|
69
|
+
- **Supported Workflows**: SIMULATOR, COMPARATOR, ASSESSOR (coming soon!).
|
|
70
|
+
- **Repositories**: FIRESTORE, FILESYSTEM, MONGODB.
|
|
71
|
+
- **Evaluators**: JUDGE, REFERENCE, RAG.
|
|
72
|
+
- **Metrics**: Exact, Levenshtein, and more (see docs for full list).
|
|
73
|
+
- **Data Sources**: Local or remote JSON for conversation scripts.
|
|
74
|
+
|
|
75
|
+
## Installation
|
|
76
|
+
|
|
77
|
+
Install LevelApp via pip:
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
pip install levelapp
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Prerequisites
|
|
84
|
+
- Python 3.12 or higher.
|
|
85
|
+
- API keys for LLM providers (e.g., OpenAI, Anthropic) if using external clients—store in a `.env` file.
|
|
86
|
+
- Optional: Google Cloud credentials for Firestore repository.
|
|
87
|
+
- Dependencies are automatically installed, including `openai`, `pydantic`, `numpy`, etc. (see `pyproject.toml` for full list).
|
|
88
|
+
|
|
89
|
+
## Configuration
|
|
90
|
+
|
|
91
|
+
LevelApp uses a YAML configuration file to define the evaluation setup. Create a `workflow_config.yaml` with the following structure:
|
|
92
|
+
|
|
93
|
+
```yaml
|
|
94
|
+
project_name: "test-project"
|
|
95
|
+
evaluation_params:
|
|
96
|
+
attempts: 1 # Number of simulation attempts.
|
|
97
|
+
|
|
98
|
+
workflow: SIMULATOR # SIMULATOR, COMPARATOR, ASSESSOR.
|
|
99
|
+
repository: FIRESTORE # FIRESTORE, FILESYSTEM, MONGODB.
|
|
100
|
+
evaluators: # JUDGE, REFERENCE, RAG.
|
|
101
|
+
- JUDGE
|
|
102
|
+
- REFERENCE
|
|
103
|
+
|
|
104
|
+
endpoint_configuration:
|
|
105
|
+
base_url: "http://127.0.0.1:8000"
|
|
106
|
+
url_path: ''
|
|
107
|
+
api_key: "<API-KEY>"
|
|
108
|
+
bearer_token: "<BEARER-TOKEN>"
|
|
109
|
+
model_id: "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
|
110
|
+
payload_path: "../../src/data/payload_example_1.yaml"
|
|
111
|
+
default_request_payload_template:
|
|
112
|
+
prompt: "${user_message}"
|
|
113
|
+
details: "${request_payload}" # Rest of the request payload data.
|
|
114
|
+
default_response_payload_template:
|
|
115
|
+
agent_reply: "${agent_reply}"
|
|
116
|
+
guardrail_flag: "${guardrail_flag}"
|
|
117
|
+
generated_metadata: "${generated_metadata}"
|
|
118
|
+
|
|
119
|
+
reference_data:
|
|
120
|
+
source: LOCAL # LOCAL or REMOTE.
|
|
121
|
+
path: "../../src/data/conversation_example_1.json"
|
|
122
|
+
|
|
123
|
+
metrics_map:
|
|
124
|
+
field_1: EXACT
|
|
125
|
+
field_2: LEVENSHTEIN
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
- **Endpoint Configuration**: Define how to interact with your LLM-based system (base URL, auth, payload templates).
|
|
129
|
+
- **Placeholders**: For the request payload, change the field names (e.g., 'prompt' to 'message') according to your API specs. For the response payload, change the place holders values (e.g., `${agent_reply}` to `${generated_reply}`).
|
|
130
|
+
- **Secrets**: Store API keys in `.env` and load via `python-dotenv` (e.g., `API_KEY=your_key_here`).
|
|
131
|
+
|
|
132
|
+
For conversation scripts (used in Simulator), provide a JSON file with this schema:
|
|
133
|
+
|
|
134
|
+
```json
|
|
135
|
+
{
|
|
136
|
+
"id": "1fa6f6ed-3cfe-4c0b-b389-7292f58879d4",
|
|
137
|
+
"scripts": [
|
|
138
|
+
{
|
|
139
|
+
"id": "65f58cec-d55d-4a24-bf16-fa8327a3aa6b",
|
|
140
|
+
"interactions": [
|
|
141
|
+
{
|
|
142
|
+
"id": "e99a2898-6a79-4a20-ac85-dfe977ea9935",
|
|
143
|
+
"user_message": "Hello, I would like to book an appointment with a doctor.",
|
|
144
|
+
"reference_reply": "Sure, I can help with that. Could you please specify the type of doctor you need to see?",
|
|
145
|
+
"interaction_type": "initial",
|
|
146
|
+
"reference_metadata": {},
|
|
147
|
+
"generated_metadata": {},
|
|
148
|
+
"guardrail_flag": false,
|
|
149
|
+
"request_payload": {"user_id": "0001", "user_role": "ADMIN"}
|
|
150
|
+
},
|
|
151
|
+
{
|
|
152
|
+
"id": "fe5c539a-d0a1-40ee-97bd-dbe456703ccc",
|
|
153
|
+
"user_message": "I need to see a cardiologist.",
|
|
154
|
+
"reference_reply": "When would you like to schedule your appointment?",
|
|
155
|
+
"interaction_type": "intermediate",
|
|
156
|
+
"reference_metadata": {},
|
|
157
|
+
"generated_metadata": {},
|
|
158
|
+
"guardrail_flag": false,
|
|
159
|
+
"request_payload": {"user_id": "0001", "user_role": "ADMIN"}
|
|
160
|
+
},
|
|
161
|
+
{
|
|
162
|
+
"id": "2cfdbd1c-a065-48bb-9aa9-b958342154b1",
|
|
163
|
+
"user_message": "I would like to book it for next Monday morning.",
|
|
164
|
+
"reference_reply": "We have an available slot at 10 AM next Monday. Does that work for you?",
|
|
165
|
+
"interaction_type": "intermediate",
|
|
166
|
+
"reference_metadata": {
|
|
167
|
+
"appointment_type": "Cardiology",
|
|
168
|
+
"date": "next Monday",
|
|
169
|
+
"time": "10 AM"
|
|
170
|
+
},
|
|
171
|
+
"generated_metadata": {
|
|
172
|
+
"appointment_type": "Cardiology",
|
|
173
|
+
"date": "next Monday",
|
|
174
|
+
"time": "morning"
|
|
175
|
+
},
|
|
176
|
+
"guardrail_flag": false,
|
|
177
|
+
"request_payload": {"user_id": "0001", "user_role": "ADMIN"}
|
|
178
|
+
},
|
|
179
|
+
{
|
|
180
|
+
"id": "f4f2dd35-71d7-4b75-ba2b-93a4f546004a",
|
|
181
|
+
"user_message": "Yes, please book it for 10 AM then.",
|
|
182
|
+
"reference_reply": "Your appointment with the cardiologist is booked for 10 AM next Monday. Is there anything else I can help you with?",
|
|
183
|
+
"interaction_type": "final",
|
|
184
|
+
"reference_metadata": {},
|
|
185
|
+
"generated_metadata": {},
|
|
186
|
+
"guardrail_flag": false,
|
|
187
|
+
"request_payload": {"user_id": "0001", "user_role": "ADMIN"}
|
|
188
|
+
}
|
|
189
|
+
],
|
|
190
|
+
"description": "A conversation about booking a doctor appointment.",
|
|
191
|
+
"details": {
|
|
192
|
+
"context": "Booking a doctor appointment"
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
]
|
|
196
|
+
}
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
- **Fields**: Include user messages, reference/references replies, metadata for comparison, guardrail flags, and request payloads.
|
|
200
|
+
|
|
201
|
+
## Usage Example
|
|
202
|
+
|
|
203
|
+
To run an evaluation:
|
|
204
|
+
|
|
205
|
+
1. Prepare your YAML config and JSON data files.
|
|
206
|
+
2. Use the following Python script:
|
|
207
|
+
|
|
208
|
+
```python
|
|
209
|
+
if __name__ == "__main__":
|
|
210
|
+
from levelapp.workflow.schemas import WorkflowConfig
|
|
211
|
+
from levelapp.core.session import EvaluationSession
|
|
212
|
+
|
|
213
|
+
# Load configuration from YAML
|
|
214
|
+
config = WorkflowConfig.load(path="../data/workflow_config.yaml")
|
|
215
|
+
|
|
216
|
+
# Run evaluation session
|
|
217
|
+
with EvaluationSession(session_name="sim-test", workflow_config=config) as session:
|
|
218
|
+
session.run()
|
|
219
|
+
results = session.workflow.collect_results()
|
|
220
|
+
print("Results:", results)
|
|
221
|
+
|
|
222
|
+
stats = session.get_stats()
|
|
223
|
+
print(f"session stats:\n{stats}")
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
- This loads the config, runs the specified workflow (e.g., Simulator), collects results, and prints stats.
|
|
227
|
+
|
|
228
|
+
For more examples, see the `examples/` directory.
|
|
229
|
+
|
|
230
|
+
## Documentation
|
|
231
|
+
|
|
232
|
+
Detailed docs are in the `docs/` directory, including API references and advanced configuration.
|
|
233
|
+
|
|
234
|
+
## Contributing
|
|
235
|
+
|
|
236
|
+
Contributions are welcome! Please follow these steps:
|
|
237
|
+
- Fork the repository on GitHub.
|
|
238
|
+
- Create a feature branch (`git checkout -b feature/new-feature`).
|
|
239
|
+
- Commit changes (`git commit -am 'Add new feature'`).
|
|
240
|
+
- Push to the branch (`git push origin feature/new-feature`).
|
|
241
|
+
- Open a pull request.
|
|
242
|
+
|
|
243
|
+
Report issues via GitHub Issues. Follow the code of conduct (if applicable).
|
|
244
|
+
|
|
245
|
+
## Acknowledgments
|
|
246
|
+
|
|
247
|
+
- Powered by Norma.
|
|
248
|
+
- Thanks to contributors and open-source libraries like Pydantic, NumPy, and OpenAI SDK.
|
|
249
|
+
|
|
250
|
+
## License
|
|
251
|
+
|
|
252
|
+
This project is licensed under the MIT License - see the [LICENCE](LICENCE) file for details.
|
|
253
|
+
|
|
254
|
+
---
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
levelapp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
levelapp/aspects/__init__.py,sha256=_OaPcjTWBizqcUdDVj5aYue7lG9ytjQGLhPvReriKnU,326
|
|
3
|
+
levelapp/aspects/loader.py,sha256=xWpcWtS25zbVhZ0UnIJEcQA9klajKk10TLK4j1IStH0,9543
|
|
4
|
+
levelapp/aspects/logger.py,sha256=MJ9HphyHYkTE5-ajA_WuMUTM0qQzd0WIP243vF-pj3M,1698
|
|
5
|
+
levelapp/aspects/monitor.py,sha256=lXGgxxeKZNkL5XbC_0ybES75gaRA9SuspHLHAEDbMR8,21981
|
|
6
|
+
levelapp/aspects/sanitizer.py,sha256=zUqgb76tXJ8UUYtHp0Rz7q9PZjAHpSpHPPFfGTjjQNg,5229
|
|
7
|
+
levelapp/clients/__init__.py,sha256=bbSFkYIODxEFatET4CFsKGC4m7Ng_tUkTGNcVBMa9h0,4169
|
|
8
|
+
levelapp/clients/anthropic.py,sha256=Bxp-HffcIPLwM9BLcTR7n-D8ZXYVWCmbr2oH33fKV04,4030
|
|
9
|
+
levelapp/clients/ionos.py,sha256=GFkLSeu8epFZV44GbNO3h1fRCKcfxscHMTFY0kPfh3o,4267
|
|
10
|
+
levelapp/clients/mistral.py,sha256=e1NRvP9qN7O2zWAzBbgdQmmUDHQfCRLtVKDJCrh0DNA,3777
|
|
11
|
+
levelapp/clients/openai.py,sha256=J9srwepIGMDasUyeUvL9ETxxyCyOyUsL01YOdEAyF8w,3656
|
|
12
|
+
levelapp/comparator/__init__.py,sha256=ynmc0mrx-JbcCqLH-z4hOVezqGocDbDQGqgbhWy2xzI,187
|
|
13
|
+
levelapp/comparator/comparator.py,sha256=yk0FWREnWKhIbXlsYpieqPJPqrlWXzyBMjVSznGqKY8,8183
|
|
14
|
+
levelapp/comparator/extractor.py,sha256=vJ9iEoWAtXo2r9r7X72uUQPKW3UZE9Kx3uIjCufEp9k,3910
|
|
15
|
+
levelapp/comparator/schemas.py,sha256=lUAQzEyStidt2ePQgV2zq-An5MLBrVSw6t8fB0FQKJs,1803
|
|
16
|
+
levelapp/comparator/scorer.py,sha256=x5tU28SQekCNXmtK6wxr9ht7MZkqI78eYmZA6srkp4o,9167
|
|
17
|
+
levelapp/comparator/utils.py,sha256=Eu48nDrNzFr0lwAJJS0aNhKsAWQ72syTEWYMNYfg764,4331
|
|
18
|
+
levelapp/config/__init__.py,sha256=9oaajE5zW-OVWOszUzMAG6nHDSbLQWa3KT6bVoSvzRA,137
|
|
19
|
+
levelapp/config/endpoint.py,sha256=ll34rZ0KRmUwI81EWJ3HX9i6pziq2YrQb84kv4ErymI,7649
|
|
20
|
+
levelapp/config/prompts.py,sha256=crjOk01weLz5_IdF6dDZWPfSmiKNL8SgnbThyf4Jz2o,1345
|
|
21
|
+
levelapp/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
|
+
levelapp/core/base.py,sha256=oh4OkKgwGxmw_jgjX6wrBoK0KPc1JvCMZfbZP_mGmIg,12453
|
|
23
|
+
levelapp/core/session.py,sha256=0pE8iUNyA6__RYGJlfaqSr0MtOT0vYIwr496KvC1VlA,7885
|
|
24
|
+
levelapp/evaluator/__init__.py,sha256=K-P75Q1FXXLCNqH1wyhT9sf4y2R9a1qR5449AXEsY1k,109
|
|
25
|
+
levelapp/evaluator/evaluator.py,sha256=JlqszyfOvA8JbgHmQNhMSBkminF95_AjXe14jg_OO7s,9908
|
|
26
|
+
levelapp/metrics/__init__.py,sha256=1y4gDLOu2Jz4QVIgPH-v9YMgaWOFr263tYLUTiFJ-fc,1965
|
|
27
|
+
levelapp/metrics/embedding.py,sha256=wvlT8Q5DjDT6GrAIFtc5aFbA_80hDLUXMP4RbSpSwHE,115
|
|
28
|
+
levelapp/metrics/exact.py,sha256=Kb13nD2OVLrl3iYHaXrxDfrxDuhW0SMVvLAEXPaJtlY,6235
|
|
29
|
+
levelapp/metrics/fuzzy.py,sha256=Rg8ashzMxtQwKO-z_LLzdj2PDIRqL4CBw6PGRf9IBrI,2598
|
|
30
|
+
levelapp/metrics/token.py,sha256=yQi9hxT_fXTGjLiCCemDxQ4Uk2zD-wQYtSnDlI2AuuY,3521
|
|
31
|
+
levelapp/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
+
levelapp/repository/__init__.py,sha256=hNmFRZ7kKJN1mMlOHeW9xf0j9Q7gqTXYJ3hMCzk9to4,79
|
|
33
|
+
levelapp/repository/firestore.py,sha256=6HuBCgcBBg-E8QHr06SVDXKY0QBhsWPOhmO7Ps3BWMs,10060
|
|
34
|
+
levelapp/simulator/__init__.py,sha256=8Dz8g7rbpBZX3WoknVmMVoWm_VT72ZL9BABOF1xFpqs,83
|
|
35
|
+
levelapp/simulator/schemas.py,sha256=YD6yu3UDXNoApX3R9VpCnpe3sWF7Gs8TNNUgsnqTSXg,4026
|
|
36
|
+
levelapp/simulator/simulator.py,sha256=GhoJYcl9GS9vfK58uLwPiI5_cFExFb_Xw_EjElesmvY,17120
|
|
37
|
+
levelapp/simulator/utils.py,sha256=qx0JdV1ZDQdTRVKa9xfq278ASrE44GBXSnJZJuhICqo,7365
|
|
38
|
+
levelapp/workflow/__init__.py,sha256=X73ulbq3Vk_Vsug1eU2m7LsBCmbR7ehQlwUVCh4Rbb0,142
|
|
39
|
+
levelapp/workflow/base.py,sha256=4LFrJyGnuSUJSZ1oOVI_uF1cwEpbYeFFZXHPSh3aEGo,4289
|
|
40
|
+
levelapp/workflow/factory.py,sha256=gW5wLv_kJfzrSTQeSLFoUunk87rw0NzW4wmJLIfcwX8,2228
|
|
41
|
+
levelapp/workflow/registration.py,sha256=33tPCyzP-XeI1vefjA3PmVdBl7x4wtsVGg05Bh_j7os,363
|
|
42
|
+
levelapp/workflow/schemas.py,sha256=Q4TmjqTz_tKzMPXD_fJ2hc7K3IVsn_U56BzHG94WwIA,4090
|
|
43
|
+
levelapp-0.1.0.dist-info/METADATA,sha256=6XNgxGHG8oNc-yjg82ZAaGi_UVvrnOalIpUmS5viaW8,10242
|
|
44
|
+
levelapp-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
45
|
+
levelapp-0.1.0.dist-info/licenses/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
|
+
levelapp-0.1.0.dist-info/RECORD,,
|
|
File without changes
|