judgeval 0.0.21__tar.gz → 0.0.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {judgeval-0.0.21 → judgeval-0.0.23}/PKG-INFO +1 -1
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/custom_scorers.mdx +3 -3
- {judgeval-0.0.21 → judgeval-0.0.23}/pyproject.toml +1 -1
- judgeval-0.0.23/src/demo/cookbooks/JNPR_Mist/test.py +21 -0
- judgeval-0.0.23/src/demo/cookbooks/linkd/text2sql.py +14 -0
- judgeval-0.0.23/src/demo/custom_example_demo/qodo_example.py +39 -0
- judgeval-0.0.23/src/demo/custom_example_demo/test.py +16 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/__init__.py +2 -3
- judgeval-0.0.23/src/judgeval/data/custom_example.py +98 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/datasets/dataset.py +17 -124
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/datasets/eval_dataset_client.py +5 -11
- judgeval-0.0.23/src/judgeval/data/ground_truth.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judgment_client.py +3 -4
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorer.py +2 -2
- judgeval-0.0.23/src/judgeval/scorers/judgeval_scorers/local_implementations/comparison/__init__.py +0 -0
- judgeval-0.0.21/src/demo/cookbooks/test.py +0 -152
- judgeval-0.0.21/src/judgeval/data/datasets/utils.py +0 -73
- judgeval-0.0.21/src/judgeval/data/ground_truth.py +0 -54
- {judgeval-0.0.21 → judgeval-0.0.23}/.github/workflows/ci.yaml +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/.gitignore +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/LICENSE.md +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/Pipfile +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/Pipfile.lock +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/README.md +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/README.md +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/api_reference/judgment_client.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/api_reference/trace.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/development.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/code.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/images.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/markdown.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/navigation.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/reusable-snippets.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/essentials/settings.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/data_datasets.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/data_examples.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/introduction.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/judges.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/answer_correctness.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/answer_relevancy.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/classifier_scorer.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/comparison.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/contextual_precision.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/contextual_recall.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/contextual_relevancy.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/execution_order.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/faithfulness.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/hallucination.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/introduction.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/json_correctness.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/scorers/summarization.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/evaluation/unit_testing.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/favicon.svg +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/getting_started.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/basic_trace_example.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/checks-passed.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/create_aggressive_scorer.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/create_scorer.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/evaluation_diagram.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/hero-dark.svg +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/hero-light.svg +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/online_eval_fault.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/images/trace_ss.png +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/integration/langgraph.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/introduction.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/judgment/introduction.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/logo/dark.svg +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/logo/light.svg +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/mint.json +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/monitoring/introduction.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/monitoring/production_insights.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/monitoring/tracing.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/notebooks/create_dataset.ipynb +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/notebooks/create_scorer.ipynb +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/notebooks/demo.ipynb +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/notebooks/prompt_scorer.ipynb +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/notebooks/quickstart.ipynb +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/quickstart.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/docs/snippets/snippet-intro.mdx +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/pytest.ini +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/clients.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/common/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/common/exceptions.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/common/logger.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/common/tracer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/common/utils.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/constants.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/api_example.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/datasets/__init__.py +0 -0
- /judgeval-0.0.21/src/judgeval/scorers/judgeval_scorers/local_implementations/comparison/__init__.py → /judgeval-0.0.23/src/judgeval/data/datasets/utils.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/example.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/result.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/data/scorer_data.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/evaluation_run.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/base_judge.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/litellm_judge.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/mixture_of_judges.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/together_judge.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/judges/utils.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/rules.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/run_evaluation.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/api_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/base_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/exceptions.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/answer_correctness.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/answer_relevancy.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/comparison.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/contextual_precision.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/contextual_recall.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/contextual_relevancy.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/execution_order.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/faithfulness.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/groundedness.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/hallucination.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/instruction_adherence.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/json_correctness.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/api_scorers/summarization.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/classifiers/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/classifiers/text2sql/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/classifiers/text2sql/text2sql_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/answer_correctness_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/answer_relevancy_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/comparison/comparison_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/comparison/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/contextual_precision_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/contextual_recall_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/contextual_relevancy_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/execution_order/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/execution_order/execution_order.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/faithfulness_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/hallucination/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/hallucination/hallucination_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/hallucination/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/instruction_adherence/instruction_adherence.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/instruction_adherence/prompt.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/json_correctness/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/json_correctness/json_correctness_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/summarization/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/summarization/prompts.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/judgeval_scorers/local_implementations/summarization/summarization_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/prompt_scorer.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/score.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/scorers/utils.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/tracer/__init__.py +0 -0
- {judgeval-0.0.21 → judgeval-0.0.23}/src/judgeval/utils/alerts.py +0 -0
@@ -116,9 +116,9 @@ class SampleScorer(JudgevalScorer):
|
|
116
116
|
```
|
117
117
|
|
118
118
|
|
119
|
-
### 4. Implement the `
|
119
|
+
### 4. Implement the `_success_check()` method
|
120
120
|
|
121
|
-
When executing an evaluation run, `judgeval` will check if your scorer has passed the `
|
121
|
+
When executing an evaluation run, `judgeval` will check if your scorer has passed the `_success_check()` method.
|
122
122
|
|
123
123
|
You can implement this method in any way you want, but **it should return a `bool`.** Here's a perfectly valid implementation:
|
124
124
|
|
@@ -126,7 +126,7 @@ You can implement this method in any way you want, but **it should return a `boo
|
|
126
126
|
class SampleScorer(JudgevalScorer):
|
127
127
|
...
|
128
128
|
|
129
|
-
def
|
129
|
+
def _success_check(self):
|
130
130
|
if self.error is not None:
|
131
131
|
return False
|
132
132
|
return self.score >= self.threshold # or you can do self.success if set
|
@@ -0,0 +1,21 @@
|
|
1
|
+
from judgeval import JudgmentClient
|
2
|
+
from judgeval.data import Example
|
3
|
+
from judgeval.scorers import FaithfulnessScorer
|
4
|
+
|
5
|
+
client = JudgmentClient()
|
6
|
+
|
7
|
+
example = Example(
|
8
|
+
input="What if these shoes don't fit?",
|
9
|
+
actual_output="We offer a 30-day full refund at no extra cost.",
|
10
|
+
retrieval_context=["All customers are eligible for a 30 day full refund at no extra cost."],
|
11
|
+
)
|
12
|
+
|
13
|
+
scorer = FaithfulnessScorer(threshold=0.5)
|
14
|
+
results = client.run_evaluation(
|
15
|
+
examples=[example],
|
16
|
+
scorers=[scorer],
|
17
|
+
model="gpt-4o",
|
18
|
+
eval_run_name="TestRun",
|
19
|
+
project_name="TestProject",
|
20
|
+
)
|
21
|
+
print(results)
|
@@ -0,0 +1,14 @@
|
|
1
|
+
"""
|
2
|
+
ClassifierScorer implementation for basic Text-to-SQL evaluation.
|
3
|
+
|
4
|
+
Takes a natural language query, a corresponding LLM-generated SQL query, and a table schema + (optional) metadata.
|
5
|
+
Determines if the LLM-generated SQL query is valid and works for the natural language query.
|
6
|
+
"""
|
7
|
+
from judgeval.scorers import ClassifierScorer
|
8
|
+
from judgeval import JudgmentClient
|
9
|
+
from judgeval.scorers.judgeval_scorers.classifiers.text2sql.text2sql_scorer import Text2SQLScorer
|
10
|
+
|
11
|
+
judgment_client = JudgmentClient()
|
12
|
+
|
13
|
+
print(judgment_client.push_classifier_scorer(Text2SQLScorer, slug="text2sql-eric-linkd"))
|
14
|
+
print(judgment_client.fetch_classifier_scorer("text2sql-eric-linkd"))
|
@@ -0,0 +1,39 @@
|
|
1
|
+
from judgeval.data import CustomExample
|
2
|
+
from pydantic import field_validator
|
3
|
+
|
4
|
+
class QodoExample(CustomExample):
|
5
|
+
code: str
|
6
|
+
original_code: str
|
7
|
+
|
8
|
+
def __init__(self, **data):
|
9
|
+
super().__init__(**data)
|
10
|
+
|
11
|
+
@field_validator('code', 'original_code', mode='before')
|
12
|
+
@classmethod
|
13
|
+
def validate_code(cls, v):
|
14
|
+
if v is not None and not isinstance(v, str):
|
15
|
+
raise ValueError(f"Code must be a string or None but got {v} of type {type(v)}")
|
16
|
+
return v
|
17
|
+
|
18
|
+
def to_dict(self):
|
19
|
+
return {
|
20
|
+
"code": self.code,
|
21
|
+
"original_code": self.original_code,
|
22
|
+
**super().to_dict()
|
23
|
+
}
|
24
|
+
|
25
|
+
def model_dump(self, **kwargs):
|
26
|
+
"""
|
27
|
+
Custom serialization that handles special cases for fields that might fail standard serialization.
|
28
|
+
"""
|
29
|
+
data = super().model_dump(**kwargs)
|
30
|
+
|
31
|
+
# Do any additional serialization here
|
32
|
+
data["code"] = self.code
|
33
|
+
data["original_code"] = self.original_code
|
34
|
+
|
35
|
+
return data
|
36
|
+
|
37
|
+
|
38
|
+
|
39
|
+
|
@@ -0,0 +1,16 @@
|
|
1
|
+
from judgeval.data import CustomExample
|
2
|
+
from judgeval import JudgmentClient
|
3
|
+
from qodo_example import QodoExample
|
4
|
+
|
5
|
+
custom_example = CustomExample(
|
6
|
+
code="print('Hello, world!')",
|
7
|
+
original_code="print('Hello, world!')",
|
8
|
+
)
|
9
|
+
|
10
|
+
qodo_example = QodoExample(
|
11
|
+
code="print('Hello, world!')",
|
12
|
+
original_code="print('Hello, world!')",
|
13
|
+
)
|
14
|
+
|
15
|
+
print(f"{custom_example=}")
|
16
|
+
print(f"{qodo_example=}")
|
@@ -2,8 +2,7 @@ from judgeval.data.example import Example, ExampleParams
|
|
2
2
|
from judgeval.data.api_example import ProcessExample, create_process_example
|
3
3
|
from judgeval.data.scorer_data import ScorerData, create_scorer_data
|
4
4
|
from judgeval.data.result import ScoringResult, generate_scoring_result
|
5
|
-
from judgeval.data.
|
6
|
-
|
5
|
+
from judgeval.data.custom_example import CustomExample
|
7
6
|
__all__ = [
|
8
7
|
"Example",
|
9
8
|
"ExampleParams",
|
@@ -13,5 +12,5 @@ __all__ = [
|
|
13
12
|
"create_scorer_data",
|
14
13
|
"ScoringResult",
|
15
14
|
"generate_scoring_result",
|
16
|
-
"
|
15
|
+
"CustomExample",
|
17
16
|
]
|
@@ -0,0 +1,98 @@
|
|
1
|
+
from pydantic import BaseModel, Field, field_validator
|
2
|
+
from typing import Optional, Dict, Any
|
3
|
+
from uuid import uuid4
|
4
|
+
from datetime import datetime
|
5
|
+
import json
|
6
|
+
import warnings
|
7
|
+
|
8
|
+
# Brainstorming what are the requirements for the fields?
|
9
|
+
class CustomExample(BaseModel):
|
10
|
+
name: Optional[str] = None
|
11
|
+
additional_metadata: Optional[Dict[str, Any]] = None
|
12
|
+
example_id: str = Field(default_factory=lambda: str(uuid4()))
|
13
|
+
example_index: Optional[int] = None
|
14
|
+
timestamp: Optional[str] = None
|
15
|
+
trace_id: Optional[str] = None
|
16
|
+
|
17
|
+
model_config = {
|
18
|
+
"extra": "allow", # Allow extra fields with any types
|
19
|
+
}
|
20
|
+
|
21
|
+
def __init__(self, **data):
|
22
|
+
if 'example_id' not in data:
|
23
|
+
data['example_id'] = str(uuid4())
|
24
|
+
# Set timestamp if not provided
|
25
|
+
if 'timestamp' not in data:
|
26
|
+
data['timestamp'] = datetime.now().isoformat()
|
27
|
+
super().__init__(**data)
|
28
|
+
|
29
|
+
@field_validator('additional_metadata', mode='before')
|
30
|
+
@classmethod
|
31
|
+
def validate_additional_metadata(cls, v):
|
32
|
+
if v is not None and not isinstance(v, dict):
|
33
|
+
raise ValueError(f"Additional metadata must be a dictionary or None but got {v} of type {type(v)}")
|
34
|
+
return v
|
35
|
+
|
36
|
+
@field_validator('example_index', mode='before')
|
37
|
+
@classmethod
|
38
|
+
def validate_example_index(cls, v):
|
39
|
+
if v is not None and not isinstance(v, int):
|
40
|
+
raise ValueError(f"Example index must be an integer or None but got {v} of type {type(v)}")
|
41
|
+
return v
|
42
|
+
|
43
|
+
@field_validator('timestamp', mode='before')
|
44
|
+
@classmethod
|
45
|
+
def validate_timestamp(cls, v):
|
46
|
+
if v is not None and not isinstance(v, str):
|
47
|
+
raise ValueError(f"Timestamp must be a string or None but got {v} of type {type(v)}")
|
48
|
+
return v
|
49
|
+
|
50
|
+
@field_validator('trace_id', mode='before')
|
51
|
+
@classmethod
|
52
|
+
def validate_trace_id(cls, v):
|
53
|
+
if v is not None and not isinstance(v, str):
|
54
|
+
raise ValueError(f"Trace ID must be a string or None but got {v} of type {type(v)}")
|
55
|
+
return v
|
56
|
+
|
57
|
+
def to_dict(self):
|
58
|
+
return self.model_dump()
|
59
|
+
|
60
|
+
def __str__(self):
|
61
|
+
return str(self.model_dump())
|
62
|
+
|
63
|
+
def model_dump(self, **kwargs):
|
64
|
+
"""
|
65
|
+
Custom serialization that handles special cases for fields that might fail standard serialization.
|
66
|
+
"""
|
67
|
+
data = super().model_dump(**kwargs)
|
68
|
+
|
69
|
+
# Get all fields including custom ones
|
70
|
+
all_fields = self.__dict__
|
71
|
+
|
72
|
+
for field_name, value in all_fields.items():
|
73
|
+
try:
|
74
|
+
# Check if the field has its own serialization method
|
75
|
+
if hasattr(value, 'to_dict'):
|
76
|
+
data[field_name] = value.to_dict()
|
77
|
+
elif hasattr(value, 'model_dump'):
|
78
|
+
data[field_name] = value.model_dump()
|
79
|
+
# Field is already in data from super().model_dump()
|
80
|
+
elif field_name in data:
|
81
|
+
continue
|
82
|
+
else:
|
83
|
+
# Try standard JSON serialization
|
84
|
+
json.dumps(value)
|
85
|
+
data[field_name] = value
|
86
|
+
except (TypeError, OverflowError, ValueError):
|
87
|
+
# Handle non-serializable objects
|
88
|
+
try:
|
89
|
+
# Try converting to string
|
90
|
+
data[field_name] = str(value)
|
91
|
+
except Exception as _:
|
92
|
+
# If all else fails, store as None and optionally warn
|
93
|
+
warnings.warn(f"Could not serialize field {field_name}, setting to None")
|
94
|
+
data[field_name] = None
|
95
|
+
|
96
|
+
return data
|
97
|
+
|
98
|
+
|
@@ -7,12 +7,11 @@ import yaml
|
|
7
7
|
from dataclasses import dataclass, field
|
8
8
|
from typing import List, Union, Literal
|
9
9
|
|
10
|
-
from judgeval.data import Example
|
10
|
+
from judgeval.data import Example
|
11
11
|
from judgeval.common.logger import debug, error, warning, info
|
12
12
|
|
13
13
|
@dataclass
|
14
14
|
class EvalDataset:
|
15
|
-
ground_truths: List[GroundTruthExample]
|
16
15
|
examples: List[Example]
|
17
16
|
_alias: Union[str, None] = field(default=None)
|
18
17
|
_id: Union[str, None] = field(default=None)
|
@@ -21,13 +20,11 @@ class EvalDataset:
|
|
21
20
|
def __init__(self,
|
22
21
|
judgment_api_key: str = os.getenv("JUDGMENT_API_KEY"),
|
23
22
|
organization_id: str = os.getenv("JUDGMENT_ORG_ID"),
|
24
|
-
ground_truths: List[GroundTruthExample] = [],
|
25
23
|
examples: List[Example] = [],
|
26
24
|
):
|
27
|
-
debug(f"Initializing EvalDataset with {len(
|
25
|
+
debug(f"Initializing EvalDataset with {len(examples)} examples")
|
28
26
|
if not judgment_api_key:
|
29
27
|
warning("No judgment_api_key provided")
|
30
|
-
self.ground_truths = ground_truths
|
31
28
|
self.examples = examples
|
32
29
|
self._alias = None
|
33
30
|
self._id = None
|
@@ -37,38 +34,13 @@ class EvalDataset:
|
|
37
34
|
def add_from_json(self, file_path: str) -> None:
|
38
35
|
debug(f"Loading dataset from JSON file: {file_path}")
|
39
36
|
"""
|
40
|
-
Adds examples
|
37
|
+
Adds examples from a JSON file.
|
41
38
|
|
42
|
-
The format of the JSON file is expected to be a dictionary with
|
43
|
-
The value of
|
39
|
+
The format of the JSON file is expected to be a dictionary with one key: "examples".
|
40
|
+
The value of the key is a list of dictionaries, where each dictionary represents an example.
|
44
41
|
|
45
42
|
The JSON file is expected to have the following format:
|
46
43
|
{
|
47
|
-
"ground_truths": [
|
48
|
-
{
|
49
|
-
"input": "test input",
|
50
|
-
"actual_output": null,
|
51
|
-
"expected_output": "expected output",
|
52
|
-
"context": [
|
53
|
-
"context1"
|
54
|
-
],
|
55
|
-
"retrieval_context": [
|
56
|
-
"retrieval1"
|
57
|
-
],
|
58
|
-
"additional_metadata": {
|
59
|
-
"key": "value"
|
60
|
-
},
|
61
|
-
"comments": "test comment",
|
62
|
-
"tools_called": [
|
63
|
-
"tool1"
|
64
|
-
],
|
65
|
-
"expected_tools": [
|
66
|
-
"tool1"
|
67
|
-
],
|
68
|
-
"source_file": "test.py",
|
69
|
-
"trace_id": "094121"
|
70
|
-
}
|
71
|
-
],
|
72
44
|
"examples": [
|
73
45
|
{
|
74
46
|
"input": "test input",
|
@@ -103,7 +75,6 @@ class EvalDataset:
|
|
103
75
|
with open(file_path, "r") as file:
|
104
76
|
payload = json.load(file)
|
105
77
|
examples = payload.get("examples", [])
|
106
|
-
ground_truths = payload.get("ground_truths", [])
|
107
78
|
except FileNotFoundError:
|
108
79
|
error(f"JSON file not found: {file_path}")
|
109
80
|
raise FileNotFoundError(f"The file {file_path} was not found.")
|
@@ -111,21 +82,17 @@ class EvalDataset:
|
|
111
82
|
error(f"Invalid JSON file: {file_path}")
|
112
83
|
raise ValueError(f"The file {file_path} is not a valid JSON file.")
|
113
84
|
|
114
|
-
info(f"Added {len(examples)} examples
|
85
|
+
info(f"Added {len(examples)} examples from JSON")
|
115
86
|
new_examples = [Example(**e) for e in examples]
|
116
87
|
for e in new_examples:
|
117
88
|
self.add_example(e)
|
118
|
-
|
119
|
-
new_ground_truths = [GroundTruthExample(**g) for g in ground_truths]
|
120
|
-
for g in new_ground_truths:
|
121
|
-
self.add_ground_truth(g)
|
122
89
|
|
123
90
|
def add_from_csv(
|
124
91
|
self,
|
125
92
|
file_path: str,
|
126
93
|
) -> None:
|
127
94
|
"""
|
128
|
-
Add Examples
|
95
|
+
Add Examples from a CSV file.
|
129
96
|
"""
|
130
97
|
try:
|
131
98
|
import pandas as pd
|
@@ -144,14 +111,14 @@ class EvalDataset:
|
|
144
111
|
"expected_tools", "name", "comments", "source_file", "example", \
|
145
112
|
"trace_id"
|
146
113
|
|
147
|
-
We want to collect the examples
|
114
|
+
We want to collect the examples separately which can
|
148
115
|
be determined by the "example" column. If the value is True, then it is an
|
149
|
-
example
|
116
|
+
example
|
150
117
|
|
151
118
|
We also assume that if there are multiple retrieval contexts or contexts, they are separated by semicolons.
|
152
119
|
This can be adjusted using the `context_delimiter` and `retrieval_context_delimiter` parameters.
|
153
120
|
"""
|
154
|
-
examples
|
121
|
+
examples = []
|
155
122
|
|
156
123
|
for _, row in df.iterrows():
|
157
124
|
data = {
|
@@ -174,49 +141,20 @@ class EvalDataset:
|
|
174
141
|
examples.append(e)
|
175
142
|
else:
|
176
143
|
raise ValueError("Every example must have an 'input' and 'actual_output' field.")
|
177
|
-
|
178
|
-
# GroundTruthExample has `comments` and `source_file` fields
|
179
|
-
data["comments"] = row["comments"] if pd.notna(row["comments"]) else None
|
180
|
-
data["source_file"] = row["source_file"] if pd.notna(row["source_file"]) else None
|
181
|
-
# every GroundTruthExample has `input` field
|
182
|
-
if data["input"] is not None:
|
183
|
-
g = GroundTruthExample(**data)
|
184
|
-
ground_truths.append(g)
|
185
|
-
else:
|
186
|
-
raise ValueError("Every ground truth must have an 'input' field.")
|
144
|
+
|
187
145
|
|
188
146
|
for e in examples:
|
189
147
|
self.add_example(e)
|
190
148
|
|
191
|
-
for g in ground_truths:
|
192
|
-
self.add_ground_truth(g)
|
193
|
-
|
194
149
|
def add_from_yaml(self, file_path: str) -> None:
|
195
150
|
debug(f"Loading dataset from YAML file: {file_path}")
|
196
151
|
"""
|
197
|
-
Adds examples
|
152
|
+
Adds examples from a YAML file.
|
198
153
|
|
199
|
-
The format of the YAML file is expected to be a dictionary with
|
200
|
-
The value of
|
154
|
+
The format of the YAML file is expected to be a dictionary with one key: "examples".
|
155
|
+
The value of the key is a list of dictionaries, where each dictionary represents an example.
|
201
156
|
|
202
157
|
The YAML file is expected to have the following format:
|
203
|
-
ground_truths:
|
204
|
-
- input: "test input"
|
205
|
-
actual_output: null
|
206
|
-
expected_output: "expected output"
|
207
|
-
context:
|
208
|
-
- "context1"
|
209
|
-
retrieval_context:
|
210
|
-
- "retrieval1"
|
211
|
-
additional_metadata:
|
212
|
-
key: "value"
|
213
|
-
comments: "test comment"
|
214
|
-
tools_called:
|
215
|
-
- "tool1"
|
216
|
-
expected_tools:
|
217
|
-
- "tool1"
|
218
|
-
source_file: "test.py"
|
219
|
-
trace_id: "094121"
|
220
158
|
examples:
|
221
159
|
- input: "test input"
|
222
160
|
actual_output: "test output"
|
@@ -244,7 +182,6 @@ class EvalDataset:
|
|
244
182
|
if payload is None:
|
245
183
|
raise ValueError("The YAML file is empty.")
|
246
184
|
examples = payload.get("examples", [])
|
247
|
-
ground_truths = payload.get("ground_truths", [])
|
248
185
|
except FileNotFoundError:
|
249
186
|
error(f"YAML file not found: {file_path}")
|
250
187
|
raise FileNotFoundError(f"The file {file_path} was not found.")
|
@@ -252,25 +189,18 @@ class EvalDataset:
|
|
252
189
|
error(f"Invalid YAML file: {file_path}")
|
253
190
|
raise ValueError(f"The file {file_path} is not a valid YAML file.")
|
254
191
|
|
255
|
-
info(f"Added {len(examples)} examples
|
192
|
+
info(f"Added {len(examples)} examples from YAML")
|
256
193
|
new_examples = [Example(**e) for e in examples]
|
257
194
|
for e in new_examples:
|
258
195
|
self.add_example(e)
|
259
196
|
|
260
|
-
new_ground_truths = [GroundTruthExample(**g) for g in ground_truths]
|
261
|
-
for g in new_ground_truths:
|
262
|
-
self.add_ground_truth(g)
|
263
|
-
|
264
197
|
def add_example(self, e: Example) -> None:
|
265
198
|
self.examples = self.examples + [e]
|
266
199
|
# TODO if we need to add rank, then we need to do it here
|
267
|
-
|
268
|
-
def add_ground_truth(self, g: GroundTruthExample) -> None:
|
269
|
-
self.ground_truths = self.ground_truths + [g]
|
270
200
|
|
271
201
|
def save_as(self, file_type: Literal["json", "csv", "yaml"], dir_path: str, save_name: str = None) -> None:
|
272
202
|
"""
|
273
|
-
Saves the dataset as a file. Save
|
203
|
+
Saves the dataset as a file. Save only the examples.
|
274
204
|
|
275
205
|
Args:
|
276
206
|
file_type (Literal["json", "csv"]): The file type to save the dataset as.
|
@@ -285,7 +215,6 @@ class EvalDataset:
|
|
285
215
|
with open(complete_path, "w") as file:
|
286
216
|
json.dump(
|
287
217
|
{
|
288
|
-
"ground_truths": [g.to_dict() for g in self.ground_truths],
|
289
218
|
"examples": [e.to_dict() for e in self.examples],
|
290
219
|
},
|
291
220
|
file,
|
@@ -319,24 +248,7 @@ class EvalDataset:
|
|
319
248
|
]
|
320
249
|
)
|
321
250
|
|
322
|
-
|
323
|
-
writer.writerow(
|
324
|
-
[
|
325
|
-
g.input,
|
326
|
-
g.actual_output,
|
327
|
-
g.expected_output,
|
328
|
-
";".join(g.context),
|
329
|
-
";".join(g.retrieval_context),
|
330
|
-
g.additional_metadata,
|
331
|
-
";".join(g.tools_called),
|
332
|
-
";".join(g.expected_tools),
|
333
|
-
None, # GroundTruthExample does not have name
|
334
|
-
g.comments,
|
335
|
-
g.source_file,
|
336
|
-
False, # Adding a GroundTruthExample, not an Example
|
337
|
-
g.trace_id
|
338
|
-
]
|
339
|
-
)
|
251
|
+
|
340
252
|
elif file_type == "yaml":
|
341
253
|
with open(complete_path, "w") as file:
|
342
254
|
yaml_data = {
|
@@ -358,24 +270,6 @@ class EvalDataset:
|
|
358
270
|
}
|
359
271
|
for e in self.examples
|
360
272
|
],
|
361
|
-
"ground_truths": [
|
362
|
-
{
|
363
|
-
"input": g.input,
|
364
|
-
"actual_output": g.actual_output,
|
365
|
-
"expected_output": g.expected_output,
|
366
|
-
"context": g.context,
|
367
|
-
"retrieval_context": g.retrieval_context,
|
368
|
-
"additional_metadata": g.additional_metadata,
|
369
|
-
"tools_called": g.tools_called,
|
370
|
-
"expected_tools": g.expected_tools,
|
371
|
-
"name": None, # GroundTruthExample does not have name
|
372
|
-
"comments": g.comments,
|
373
|
-
"source_file": g.source_file,
|
374
|
-
"example": False, # Adding a GroundTruthExample, not an Example
|
375
|
-
"trace_id": g.trace_id
|
376
|
-
}
|
377
|
-
for g in self.ground_truths
|
378
|
-
]
|
379
273
|
}
|
380
274
|
yaml.dump(yaml_data, file, default_flow_style=False)
|
381
275
|
else:
|
@@ -391,7 +285,6 @@ class EvalDataset:
|
|
391
285
|
def __str__(self):
|
392
286
|
return (
|
393
287
|
f"{self.__class__.__name__}("
|
394
|
-
f"ground_truths={self.ground_truths}, "
|
395
288
|
f"examples={self.examples}, "
|
396
289
|
f"_alias={self._alias}, "
|
397
290
|
f"_id={self._id}"
|
@@ -11,7 +11,7 @@ from judgeval.constants import (
|
|
11
11
|
JUDGMENT_DATASETS_EDIT_API_URL,
|
12
12
|
JUDGMENT_DATASETS_EXPORT_JSONL_API_URL
|
13
13
|
)
|
14
|
-
from judgeval.data import Example
|
14
|
+
from judgeval.data import Example
|
15
15
|
from judgeval.data.datasets import EvalDataset
|
16
16
|
|
17
17
|
|
@@ -35,7 +35,6 @@ class EvalDatasetClient:
|
|
35
35
|
Mock request:
|
36
36
|
dataset = {
|
37
37
|
"alias": alias,
|
38
|
-
"ground_truths": [...],
|
39
38
|
"examples": [...],
|
40
39
|
"overwrite": overwrite
|
41
40
|
} ==>
|
@@ -55,7 +54,6 @@ class EvalDatasetClient:
|
|
55
54
|
)
|
56
55
|
content = {
|
57
56
|
"alias": alias,
|
58
|
-
"ground_truths": [g.to_dict() for g in dataset.ground_truths],
|
59
57
|
"examples": [e.to_dict() for e in dataset.examples],
|
60
58
|
"overwrite": overwrite,
|
61
59
|
}
|
@@ -102,7 +100,6 @@ class EvalDatasetClient:
|
|
102
100
|
}
|
103
101
|
==>
|
104
102
|
{
|
105
|
-
"ground_truths": [...],
|
106
103
|
"examples": [...],
|
107
104
|
"_alias": alias,
|
108
105
|
"_id": "..." # ID of the dataset
|
@@ -142,7 +139,6 @@ class EvalDatasetClient:
|
|
142
139
|
|
143
140
|
info(f"Successfully pulled dataset with alias '{alias}'")
|
144
141
|
payload = response.json()
|
145
|
-
dataset.ground_truths = [GroundTruthExample(**g) for g in payload.get("ground_truths", [])]
|
146
142
|
dataset.examples = [Example(**e) for e in payload.get("examples", [])]
|
147
143
|
dataset._alias = payload.get("_alias")
|
148
144
|
dataset._id = payload.get("_id")
|
@@ -164,8 +160,8 @@ class EvalDatasetClient:
|
|
164
160
|
}
|
165
161
|
==>
|
166
162
|
{
|
167
|
-
"test_dataset_1": {"examples_count": len(dataset1.examples)
|
168
|
-
"test_dataset_2": {"examples_count": len(dataset2.examples)
|
163
|
+
"test_dataset_1": {"examples_count": len(dataset1.examples)},
|
164
|
+
"test_dataset_2": {"examples_count": len(dataset2.examples)},
|
169
165
|
...
|
170
166
|
}
|
171
167
|
"""
|
@@ -209,15 +205,14 @@ class EvalDatasetClient:
|
|
209
205
|
|
210
206
|
return payload
|
211
207
|
|
212
|
-
def edit_dataset(self, alias: str, examples: List[Example]
|
208
|
+
def edit_dataset(self, alias: str, examples: List[Example]) -> bool:
|
213
209
|
"""
|
214
|
-
Edits the dataset on Judgment platform by adding new examples
|
210
|
+
Edits the dataset on Judgment platform by adding new examples
|
215
211
|
|
216
212
|
Mock request:
|
217
213
|
{
|
218
214
|
"alias": alias,
|
219
215
|
"examples": [...],
|
220
|
-
"ground_truths": [...],
|
221
216
|
"judgment_api_key": self.judgment_api_key
|
222
217
|
}
|
223
218
|
"""
|
@@ -234,7 +229,6 @@ class EvalDatasetClient:
|
|
234
229
|
content = {
|
235
230
|
"alias": alias,
|
236
231
|
"examples": [e.to_dict() for e in examples],
|
237
|
-
"ground_truths": [g.to_dict() for g in ground_truths],
|
238
232
|
}
|
239
233
|
|
240
234
|
try:
|
File without changes
|
@@ -10,7 +10,6 @@ from judgeval.data.datasets import EvalDataset, EvalDatasetClient
|
|
10
10
|
from judgeval.data import (
|
11
11
|
ScoringResult,
|
12
12
|
Example,
|
13
|
-
GroundTruthExample
|
14
13
|
)
|
15
14
|
from judgeval.scorers import (
|
16
15
|
APIJudgmentScorer,
|
@@ -283,11 +282,11 @@ class JudgmentClient:
|
|
283
282
|
"""
|
284
283
|
return self.eval_dataset_client.pull_all_user_dataset_stats()
|
285
284
|
|
286
|
-
def edit_dataset(self, alias: str, examples: List[Example]
|
285
|
+
def edit_dataset(self, alias: str, examples: List[Example]) -> bool:
|
287
286
|
"""
|
288
|
-
Edits the dataset on Judgment platform by adding new examples
|
287
|
+
Edits the dataset on Judgment platform by adding new examples
|
289
288
|
"""
|
290
|
-
return self.eval_dataset_client.edit_dataset(alias, examples
|
289
|
+
return self.eval_dataset_client.edit_dataset(alias, examples)
|
291
290
|
|
292
291
|
# Maybe add option where you can pass in the EvaluationRun object and it will pull the eval results from the backend
|
293
292
|
def pull_eval(self, project_name: str, eval_run_name: str) -> List[Dict[str, Union[str, List[ScoringResult]]]]:
|
@@ -116,8 +116,8 @@ class JudgevalScorer:
|
|
116
116
|
For unit testing, determines whether the test case passes or fails
|
117
117
|
"""
|
118
118
|
warning("Attempting to call unimplemented success_check method")
|
119
|
-
error("
|
120
|
-
raise NotImplementedError("You must implement the `
|
119
|
+
error("_success_check method not implemented")
|
120
|
+
raise NotImplementedError("You must implement the `_success_check` method in your custom scorer")
|
121
121
|
|
122
122
|
def __str__(self):
|
123
123
|
debug("Converting JudgevalScorer instance to string representation")
|
judgeval-0.0.23/src/judgeval/scorers/judgeval_scorers/local_implementations/comparison/__init__.py
ADDED
File without changes
|