judgeval 0.7.1__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. judgeval/__init__.py +139 -12
  2. judgeval/api/__init__.py +501 -0
  3. judgeval/api/api_types.py +344 -0
  4. judgeval/cli.py +2 -4
  5. judgeval/constants.py +10 -26
  6. judgeval/data/evaluation_run.py +49 -26
  7. judgeval/data/example.py +2 -2
  8. judgeval/data/judgment_types.py +266 -82
  9. judgeval/data/result.py +4 -5
  10. judgeval/data/scorer_data.py +4 -2
  11. judgeval/data/tool.py +2 -2
  12. judgeval/data/trace.py +7 -50
  13. judgeval/data/trace_run.py +7 -4
  14. judgeval/{dataset.py → dataset/__init__.py} +43 -28
  15. judgeval/env.py +67 -0
  16. judgeval/{run_evaluation.py → evaluation/__init__.py} +29 -95
  17. judgeval/exceptions.py +27 -0
  18. judgeval/integrations/langgraph/__init__.py +788 -0
  19. judgeval/judges/__init__.py +2 -2
  20. judgeval/judges/litellm_judge.py +75 -15
  21. judgeval/judges/together_judge.py +86 -18
  22. judgeval/judges/utils.py +7 -21
  23. judgeval/{common/logger.py → logger.py} +8 -6
  24. judgeval/scorers/__init__.py +0 -4
  25. judgeval/scorers/agent_scorer.py +3 -7
  26. judgeval/scorers/api_scorer.py +8 -13
  27. judgeval/scorers/base_scorer.py +52 -32
  28. judgeval/scorers/example_scorer.py +1 -3
  29. judgeval/scorers/judgeval_scorers/api_scorers/__init__.py +0 -14
  30. judgeval/scorers/judgeval_scorers/api_scorers/prompt_scorer.py +45 -20
  31. judgeval/scorers/judgeval_scorers/api_scorers/tool_dependency.py +2 -2
  32. judgeval/scorers/judgeval_scorers/api_scorers/tool_order.py +3 -3
  33. judgeval/scorers/score.py +21 -31
  34. judgeval/scorers/trace_api_scorer.py +5 -0
  35. judgeval/scorers/utils.py +1 -103
  36. judgeval/tracer/__init__.py +1075 -2
  37. judgeval/tracer/constants.py +1 -0
  38. judgeval/tracer/exporters/__init__.py +37 -0
  39. judgeval/tracer/exporters/s3.py +119 -0
  40. judgeval/tracer/exporters/store.py +43 -0
  41. judgeval/tracer/exporters/utils.py +32 -0
  42. judgeval/tracer/keys.py +67 -0
  43. judgeval/tracer/llm/__init__.py +1233 -0
  44. judgeval/{common/tracer → tracer/llm}/providers.py +5 -10
  45. judgeval/{local_eval_queue.py → tracer/local_eval_queue.py} +15 -10
  46. judgeval/tracer/managers.py +188 -0
  47. judgeval/tracer/processors/__init__.py +181 -0
  48. judgeval/tracer/utils.py +20 -0
  49. judgeval/trainer/__init__.py +5 -0
  50. judgeval/{common/trainer → trainer}/config.py +12 -9
  51. judgeval/{common/trainer → trainer}/console.py +2 -9
  52. judgeval/{common/trainer → trainer}/trainable_model.py +12 -7
  53. judgeval/{common/trainer → trainer}/trainer.py +119 -17
  54. judgeval/utils/async_utils.py +2 -3
  55. judgeval/utils/decorators.py +24 -0
  56. judgeval/utils/file_utils.py +37 -4
  57. judgeval/utils/guards.py +32 -0
  58. judgeval/utils/meta.py +14 -0
  59. judgeval/{common/api/json_encoder.py → utils/serialize.py} +7 -1
  60. judgeval/utils/testing.py +88 -0
  61. judgeval/utils/url.py +10 -0
  62. judgeval/{version_check.py → utils/version_check.py} +3 -3
  63. judgeval/version.py +5 -0
  64. judgeval/warnings.py +4 -0
  65. {judgeval-0.7.1.dist-info → judgeval-0.9.0.dist-info}/METADATA +12 -14
  66. judgeval-0.9.0.dist-info/RECORD +80 -0
  67. judgeval/clients.py +0 -35
  68. judgeval/common/__init__.py +0 -13
  69. judgeval/common/api/__init__.py +0 -3
  70. judgeval/common/api/api.py +0 -375
  71. judgeval/common/api/constants.py +0 -186
  72. judgeval/common/exceptions.py +0 -27
  73. judgeval/common/storage/__init__.py +0 -6
  74. judgeval/common/storage/s3_storage.py +0 -97
  75. judgeval/common/tracer/__init__.py +0 -31
  76. judgeval/common/tracer/constants.py +0 -22
  77. judgeval/common/tracer/core.py +0 -2427
  78. judgeval/common/tracer/otel_exporter.py +0 -108
  79. judgeval/common/tracer/otel_span_processor.py +0 -188
  80. judgeval/common/tracer/span_processor.py +0 -37
  81. judgeval/common/tracer/span_transformer.py +0 -207
  82. judgeval/common/tracer/trace_manager.py +0 -101
  83. judgeval/common/trainer/__init__.py +0 -5
  84. judgeval/common/utils.py +0 -948
  85. judgeval/integrations/langgraph.py +0 -844
  86. judgeval/judges/mixture_of_judges.py +0 -287
  87. judgeval/judgment_client.py +0 -267
  88. judgeval/rules.py +0 -521
  89. judgeval/scorers/judgeval_scorers/api_scorers/execution_order.py +0 -52
  90. judgeval/scorers/judgeval_scorers/api_scorers/hallucination.py +0 -28
  91. judgeval/utils/alerts.py +0 -93
  92. judgeval/utils/requests.py +0 -50
  93. judgeval-0.7.1.dist-info/RECORD +0 -82
  94. {judgeval-0.7.1.dist-info → judgeval-0.9.0.dist-info}/WHEEL +0 -0
  95. {judgeval-0.7.1.dist-info → judgeval-0.9.0.dist-info}/entry_points.txt +0 -0
  96. {judgeval-0.7.1.dist-info → judgeval-0.9.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,287 +0,0 @@
1
- """
2
- Implementation for Mixture of Judges model through Judgeval
3
-
4
- Enables client to use multiple models to generate responses and then aggregate them into a single response.
5
- """
6
-
7
- import pydantic
8
- from typing import List, Union
9
- from judgeval.judges import JudgevalJudge
10
- from judgeval.common.utils import (
11
- get_completion_multiple_models,
12
- get_chat_completion,
13
- aget_completion_multiple_models,
14
- aget_chat_completion,
15
- )
16
- from judgeval.common.logger import judgeval_logger
17
- from judgeval.constants import DEFAULT_GPT_MODEL
18
-
19
-
20
- def build_dynamic_mixture_prompt(
21
- judge_responses: List[str],
22
- custom_system_prompt: Union[str, None] = None,
23
- custom_conversation_history: Union[List[dict], None] = None,
24
- ) -> List[dict]:
25
- """
26
- Dynamically builds a prompt to mix judge responses together for the Mixture of Judges model.
27
-
28
- In this implementation, we simply concatenate the judge responses into a formatted string, then
29
- pass it into a default prompt template. This template can be customized by providing a custom prompt.
30
-
31
- Args:
32
- judge_responses (List[str]): List of responses from individual judges to be synthesized
33
- custom_system_prompt (str, optional): Custom system prompt to override the default one. Defaults to None.
34
- custom_conversation_history (List[dict], optional): Custom conversation history to override the default one. Defaults to None.
35
- """
36
- formatted_responses = "\n".join(
37
- [
38
- f"# Judge {i + 1}'s response: #\n{response}"
39
- for i, response in enumerate(judge_responses)
40
- ]
41
- )
42
-
43
- # This is the default prompt for the Mixture of Judges model
44
- """
45
- You are tasked with synthesizing responses from multiple expert judges. You will receive N individual answers on the same topic. Your job is to:
46
-
47
- 1. Analyze and compare the key points, patterns, and agreements between the answers.
48
- 2. Identify the consensus by focusing on areas where most or all of the answers align. Consider common reasoning and frequently mentioned conclusions.
49
- 3. Condense the responses into a single, coherent, and concise answer that represents the collective judgment of the group.
50
- 4. When opinions differ or contradict, highlight the most supported viewpoint while briefly acknowledging the dissenting perspectives.
51
- 5. Ensure the final answer is balanced and clear, providing a comprehensive summary that captures the wisdom of all judges while avoiding repetition.
52
-
53
- ## Start of Judge Responses ##
54
- {{judge_responses}}
55
- ## End of Judge Responses ##
56
- Synthesized response:
57
- """
58
-
59
- default_conversation = [ # inject the judge responses into the default prompt
60
- {
61
- "role": "system",
62
- "content": "You are tasked with synthesizing responses from multiple expert judges. You will receive N individual answers on the same topic. Your job is to:\n1. Analyze and compare the key points, patterns, and agreements between the answers.\n2. Identify the consensus by focusing on areas where most or all of the answers align. Consider common reasoning and frequently mentioned conclusions.\n3. Condense the responses into a single, coherent, and concise answer that represents the collective judgment of the group.\n4. When opinions differ or contradict, highlight the most supported viewpoint while briefly acknowledging the dissenting perspectives.\n5. Ensure the final answer is balanced and clear, providing a comprehensive summary that captures the wisdom of all judges while avoiding repetition.\n\n**IMPORTANT**: IF THE JUDGE RESPONSES ARE IN JSON FORMAT, YOU MUST RESPOND USING THE SAME JSON FORMAT THAT THE RESPONSES ARE IN. If the judge responses are in JSON, you MUST RESPOND IN VALID JSON FORMAT. ",
63
- },
64
- {
65
- "role": "user",
66
- "content": '## Start of Judge Responses ## \n# Judge 1\'s response: #\n{\n"claims": [\n{\n"claim": "A 30-day full refund is offered.",\n"quote": "We offer a 30-day full refund at no extra cost."\n},\n{\n"claim": "The 30-day full refund comes at no extra cost.",\n"quote": "We offer a 30-day full refund at no extra cost."\n}\n]\n}\n\n# Judge 2\'s response: #\n{\n "claims": [\n {\n "claim": "A full refund is offered within 30 days.",\n "quote": "We offer a 30-day full refund at no extra cost."\n },\n {\n "claim": "The 30-day full refund is offered at no extra cost.",\n "quote": "We offer a 30-day full refund at no extra cost."\n }\n ]\n}\n# Judge 3\'s response: #\n {\n "claims": [\n {\n "claim": "A 30-day full refund is offered.",\n "quote": "We offer a 30-day full refund at no extra cost."\n },\n {\n "claim": "The 30-day full refund is offered at no extra cost.",\n "quote": "We offer a 30-day full refund at no extra cost."\n }\n ]\n}\n## End of Judge Responses ##\nSynthesized response:',
67
- },
68
- {
69
- "role": "assistant",
70
- "content": 'The consensus among the judges is clear and unanimous. All three judges agree that a 30-day full refund is offered, and this refund is available at no extra cost. This conclusion is consistently supported by their statements, as each of their claims is directly quoted as: "We offer a 30-day full refund at no extra cost." There are no dissenting perspectives or opposing views provided in any of the responses, indicating complete alignment on this topic.\n\nJSON:\n{\n "claims": [\n {\n "claim": "A full refund is offered within 30 days.",\n "quote": "We offer a 30-day full refund at no extra cost."\n },\n {\n "claim": "The 30-day full refund is offered at no extra cost.",\n "quote": "We offer a 30-day full refund at no extra cost."\n }\n ]\n}',
71
- },
72
- {
73
- "role": "user",
74
- "content": "## Start of Judge Responses ##\n# Judge 1's response: # \nThe capital of France is Paris.\n\n# Judge 2's response: #\nThe capital of France is Paris.\n\n# Judge 3's response: # \nThe capital of France is Paris. It's one of the most popular tourist destinations in the world, known for its art, culture, and history. It's also famous for its iconic landmarks such as the Eiffel Tower, Louvre Museum, and Notre-Dame Cathedral.\n\n## End of Judge Responses ##\nSynthesized response:",
75
- },
76
- {
77
- "role": "assistant",
78
- "content": "The capital of France is Paris. It is widely recognized as one of the world's most popular tourist destinations, celebrated for its rich art, culture, and history. Paris is renowned for its iconic landmarks, including the Eiffel Tower, Louvre Museum, and Notre-Dame Cathedral.",
79
- },
80
- {
81
- "role": "user",
82
- "content": f"## Start of Judge Responses ##\n{formatted_responses}\n## End of Judge Responses ##\nSynthesized response:\n",
83
- },
84
- ]
85
-
86
- # If a custom system prompt is provided, validate and use it
87
- if custom_system_prompt is not None:
88
- if not isinstance(custom_system_prompt, str):
89
- judgeval_logger.error(
90
- f"TypeError: Custom system prompt must be a string. Received: {type(custom_system_prompt)}."
91
- )
92
- raise TypeError(
93
- f"Custom system prompt must be a string. Received: {type(custom_system_prompt)}."
94
- )
95
- if not custom_system_prompt:
96
- raise ValueError("Custom system prompt cannot be empty")
97
- # Override the default system prompt, but also add special instructions for handling JSON
98
- default_conversation[0]["content"] = (
99
- custom_system_prompt
100
- + "\n\n**IMPORTANT**: IF THE JUDGE RESPONSES ARE IN JSON FORMAT, YOU MUST RESPOND USING THE SAME JSON FORMAT THAT THE RESPONSES ARE IN. If the judge responses are in JSON, you MUST RESPOND IN VALID JSON FORMAT."
101
- )
102
-
103
- # If a custom conversation history is provided, append the judge responses to it
104
- if custom_conversation_history is not None:
105
- # Validate custom conversation history format
106
- for message in custom_conversation_history:
107
- if not isinstance(message, dict):
108
- raise TypeError(
109
- f"Custom conversation history must be a list of dictionaries. Received: {message}."
110
- )
111
-
112
- if "role" not in message or "content" not in message:
113
- raise ValueError("Each message must have 'role' and 'content' keys")
114
-
115
- if not isinstance(message["role"], str) or not isinstance(
116
- message["content"], str
117
- ):
118
- raise TypeError(
119
- f"Message role and content must be strings. Received: {type(message['role'])}, {type(message['content'])}."
120
- )
121
-
122
- if message["role"] not in ["system", "user", "assistant"]:
123
- raise ValueError(
124
- f"Message role must be one of: 'system', 'user', 'assistant'. Received: {message['role']}."
125
- )
126
-
127
- judge_responses_prompt = {
128
- "role": "user",
129
- "content": f"## Start of Judge Responses ##\n{formatted_responses}\n## End of Judge Responses ##\nSynthesized response:\n",
130
- }
131
- return custom_conversation_history + [judge_responses_prompt]
132
-
133
- # Otherwise return the default conversation with system prompt and examples
134
- # No customization, return the default conversation with system prompt and examples
135
- return default_conversation
136
-
137
-
138
- BASE_CONVERSATION = [
139
- {"role": "system", "content": "You are a helpful assistant."},
140
- ] # for string inputs, we need to add the user query to a base conversation, since LiteLLM only accepts a list of dictionaries as a chat history
141
-
142
-
143
- class MixtureOfJudges(JudgevalJudge):
144
- """
145
- IMPORTANT: When supplying custom prompts and conversation histories for aggregation, supply them in the following format:
146
- in kwargs:
147
- {
148
- "custom_prompt": "Your custom prompt here",
149
- "custom_conversation": [
150
- {"role": "system", "content": "System message 1"},
151
- {"role": "user", "content": "User message 1"},
152
- {"role": "assistant", "content": "Assistant message 1"},
153
- ...
154
- ]
155
- }
156
- """
157
-
158
- def __init__(
159
- self,
160
- models: List[str] = [
161
- "QWEN",
162
- "LLAMA3_70B_INSTRUCT_TURBO",
163
- "MISTRAL_8x22B_INSTRUCT",
164
- ],
165
- aggregator: str = DEFAULT_GPT_MODEL,
166
- **kwargs,
167
- ):
168
- """
169
- `models` are the individual judge models to be used for generating responses.
170
- `aggregator` is the model that will aggregate the responses from the individual judges.
171
-
172
- kwargs include "custom_prompt" and "custom_conversation" for customizing the prompt for the Mixture of Judges model.
173
- """
174
- self.models = models
175
- self.aggregator = aggregator
176
- self.kwargs = kwargs
177
- super().__init__(model_name=models)
178
-
179
- def generate(
180
- self,
181
- input: Union[str, List[dict]],
182
- response_schema: Union[pydantic.BaseModel, None] = None,
183
- aggregation_schema: Union[pydantic.BaseModel, None] = None,
184
- **kwargs,
185
- ) -> str:
186
- """
187
- Args:
188
- input (Union[str, List[Mapping[str, str]]]): Input query or conversation history to the model.
189
- response_schema (pydantic.BaseModel): Response schema for individual judge models.
190
- aggregation_schema (pydantic.BaseModel): Response schema for the aggregator model.
191
- kwargs: Additional keyword arguments.
192
- """
193
-
194
- # Convert input to conversation format if needed
195
- if isinstance(input, str):
196
- convo = BASE_CONVERSATION + [{"role": "user", "content": input}]
197
- elif isinstance(input, list):
198
- convo = input
199
- else:
200
- judgeval_logger.error(f"Invalid input type received: {type(input)}")
201
- raise TypeError(
202
- f"Input must be a string or a list of dictionaries. Input type of: {type(input)}"
203
- )
204
-
205
- try:
206
- responses = get_completion_multiple_models(
207
- models=self.models,
208
- messages=[convo] * len(self.models),
209
- response_formats=[response_schema] * len(self.models),
210
- )
211
- except Exception:
212
- raise
213
-
214
- compiled_mixture_prompt = build_dynamic_mixture_prompt(
215
- responses,
216
- self.kwargs.get("custom_prompt"),
217
- self.kwargs.get("custom_conversation"),
218
- )
219
-
220
- try:
221
- mixed_response = get_chat_completion(
222
- model_type=self.aggregator,
223
- messages=compiled_mixture_prompt,
224
- response_format=aggregation_schema,
225
- )
226
- except Exception:
227
- raise
228
-
229
- return mixed_response
230
-
231
- async def a_generate(
232
- self,
233
- input: Union[str, List[dict]],
234
- response_schema: Union[pydantic.BaseModel, None] = None,
235
- aggregation_schema: Union[pydantic.BaseModel, None] = None,
236
- **kwargs,
237
- ) -> str:
238
- """
239
- Args:
240
- input (Union[str, List[Mapping[str, str]]]): Input query or conversation history to the model.
241
- response_schema (pydantic.BaseModel): Response schema for individual judge models.
242
- aggregation_schema (pydantic.BaseModel): Response schema for the aggregator model.
243
- kwargs: Additional keyword arguments.
244
- """
245
-
246
- # Convert input to conversation format if needed
247
- if isinstance(input, str):
248
- convo = BASE_CONVERSATION + [{"role": "user", "content": input}]
249
- elif isinstance(input, list):
250
- convo = input
251
- else:
252
- judgeval_logger.error(f"Invalid input type received: {type(input)}")
253
- raise TypeError(
254
- f"Input must be a string or a list of dictionaries. Input type of: {type(input)}"
255
- )
256
-
257
- try:
258
- responses = await aget_completion_multiple_models(
259
- models=self.models,
260
- messages=[convo] * len(self.models),
261
- response_formats=[response_schema] * len(self.models),
262
- )
263
- except Exception:
264
- raise
265
-
266
- compiled_mixture_prompt = build_dynamic_mixture_prompt(
267
- responses,
268
- self.kwargs.get("custom_prompt"),
269
- self.kwargs.get("custom_conversation"),
270
- )
271
-
272
- try:
273
- mixed_response = await aget_chat_completion(
274
- model_type=self.aggregator,
275
- messages=compiled_mixture_prompt,
276
- response_format=aggregation_schema,
277
- )
278
- except Exception:
279
- raise
280
-
281
- return mixed_response
282
-
283
- def load_model(self):
284
- return self.models
285
-
286
- def get_model_name(self) -> List[str]:
287
- return self.models
@@ -1,267 +0,0 @@
1
- """
2
- Implements the JudgmentClient to interact with the Judgment API.
3
- """
4
-
5
- from __future__ import annotations
6
- import os
7
- import importlib.util
8
- from pathlib import Path
9
- from uuid import uuid4
10
- from typing import Optional, List, Dict, Union
11
-
12
- from judgeval.data import (
13
- ScoringResult,
14
- Example,
15
- )
16
- from judgeval.scorers import (
17
- APIScorerConfig,
18
- BaseScorer,
19
- )
20
- from judgeval.data.evaluation_run import EvaluationRun
21
- from judgeval.run_evaluation import (
22
- run_eval,
23
- assert_test,
24
- )
25
- from judgeval.common.api import JudgmentApiClient
26
- from judgeval.common.exceptions import JudgmentAPIError
27
- from judgeval.common.utils import validate_api_key
28
- from pydantic import BaseModel
29
- from judgeval.common.logger import judgeval_logger
30
-
31
-
32
- from judgeval.constants import DEFAULT_GPT_MODEL
33
-
34
-
35
- class EvalRunRequestBody(BaseModel):
36
- eval_name: str
37
- project_name: str
38
-
39
-
40
- class DeleteEvalRunRequestBody(BaseModel):
41
- eval_names: List[str]
42
- project_name: str
43
-
44
-
45
- class SingletonMeta(type):
46
- _instances: Dict[type, "JudgmentClient"] = {}
47
-
48
- def __call__(cls, *args, **kwargs):
49
- if cls not in cls._instances:
50
- instance = super().__call__(*args, **kwargs)
51
- cls._instances[cls] = instance
52
- return cls._instances[cls]
53
-
54
-
55
- class JudgmentClient(metaclass=SingletonMeta):
56
- def __init__(
57
- self,
58
- api_key: Optional[str] = os.getenv("JUDGMENT_API_KEY"),
59
- organization_id: Optional[str] = os.getenv("JUDGMENT_ORG_ID"),
60
- ):
61
- if not api_key:
62
- raise ValueError(
63
- "api_key parameter must be provided. Please provide a valid API key value or set the JUDGMENT_API_KEY environment variable."
64
- )
65
-
66
- if not organization_id:
67
- raise ValueError(
68
- "organization_id parameter must be provided. Please provide a valid organization ID value or set the JUDGMENT_ORG_ID environment variable."
69
- )
70
-
71
- self.judgment_api_key = api_key
72
- self.organization_id = organization_id
73
- self.api_client = JudgmentApiClient(api_key, organization_id)
74
-
75
- # Verify API key is valid
76
- result, response = validate_api_key(api_key)
77
- if not result:
78
- # May be bad to output their invalid API key...
79
- raise JudgmentAPIError(f"Issue with passed in Judgment API key: {response}")
80
- else:
81
- judgeval_logger.info("Successfully initialized JudgmentClient!")
82
-
83
- def run_evaluation(
84
- self,
85
- examples: List[Example],
86
- scorers: List[Union[APIScorerConfig, BaseScorer]],
87
- model: Optional[str] = DEFAULT_GPT_MODEL,
88
- project_name: str = "default_project",
89
- eval_run_name: str = "default_eval_run",
90
- show_url: bool = True,
91
- ) -> List[ScoringResult]:
92
- """
93
- Executes an evaluation of `Example`s using one or more `Scorer`s
94
-
95
- Args:
96
- examples (List[Example]): The examples to evaluate
97
- scorers (List[Union[APIScorerConfig, BaseScorer]]): A list of scorers to use for evaluation
98
- model (str): The model used as a judge when using LLM as a Judge
99
- project_name (str): The name of the project the evaluation results belong to
100
- eval_run_name (str): A name for this evaluation run
101
-
102
- Returns:
103
- List[ScoringResult]: The results of the evaluation
104
- """
105
-
106
- try:
107
- eval = EvaluationRun(
108
- project_name=project_name,
109
- eval_name=eval_run_name,
110
- examples=examples,
111
- scorers=scorers,
112
- model=model,
113
- organization_id=self.organization_id,
114
- )
115
- return run_eval(
116
- eval,
117
- self.judgment_api_key,
118
- show_url=show_url,
119
- )
120
- except ValueError as e:
121
- raise ValueError(
122
- f"Please check your EvaluationRun object, one or more fields are invalid: \n{str(e)}"
123
- )
124
- except Exception as e:
125
- raise Exception(f"An unexpected error occurred during evaluation: {str(e)}")
126
-
127
- def create_project(self, project_name: str) -> bool:
128
- """
129
- Creates a project on the server.
130
- """
131
- try:
132
- self.api_client.create_project(project_name)
133
- return True
134
- except Exception as e:
135
- judgeval_logger.error(f"Error creating project: {e}")
136
- return False
137
-
138
- def delete_project(self, project_name: str) -> bool:
139
- """
140
- Deletes a project from the server. Which also deletes all evaluations and traces associated with the project.
141
- """
142
- self.api_client.delete_project(project_name)
143
- return True
144
-
145
- def assert_test(
146
- self,
147
- examples: List[Example],
148
- scorers: List[Union[APIScorerConfig, BaseScorer]],
149
- model: Optional[str] = DEFAULT_GPT_MODEL,
150
- project_name: str = "default_test",
151
- eval_run_name: str = str(uuid4()),
152
- ) -> None:
153
- """
154
- Asserts a test by running the evaluation and checking the results for success
155
-
156
- Args:
157
- examples (List[Example]): The examples to evaluate.
158
- scorers (List[Union[APIScorerConfig, BaseScorer]]): A list of scorers to use for evaluation
159
- model (str): The model used as a judge when using LLM as a Judge
160
- project_name (str): The name of the project the evaluation results belong to
161
- eval_run_name (str): A name for this evaluation run
162
- """
163
-
164
- results: List[ScoringResult]
165
-
166
- results = self.run_evaluation(
167
- examples=examples,
168
- scorers=scorers,
169
- model=model,
170
- project_name=project_name,
171
- eval_run_name=eval_run_name,
172
- )
173
- assert_test(results)
174
-
175
- def _extract_scorer_name(self, scorer_file_path: str) -> str:
176
- """Extract scorer name from the scorer file by importing it."""
177
- try:
178
- spec = importlib.util.spec_from_file_location(
179
- "scorer_module", scorer_file_path
180
- )
181
- if spec is None or spec.loader is None:
182
- raise ImportError(f"Could not load spec from {scorer_file_path}")
183
-
184
- module = importlib.util.module_from_spec(spec)
185
- spec.loader.exec_module(module)
186
-
187
- for attr_name in dir(module):
188
- attr = getattr(module, attr_name)
189
- if (
190
- isinstance(attr, type)
191
- and any("Scorer" in str(base) for base in attr.__mro__)
192
- and attr.__module__ == "scorer_module"
193
- ):
194
- try:
195
- # Instantiate the scorer and get its name
196
- scorer_instance = attr()
197
- if hasattr(scorer_instance, "name"):
198
- return scorer_instance.name
199
- except Exception:
200
- # Skip if instantiation fails
201
- continue
202
-
203
- raise AttributeError("No scorer class found or could be instantiated")
204
- except Exception as e:
205
- judgeval_logger.warning(f"Could not extract scorer name: {e}")
206
- return Path(scorer_file_path).stem
207
-
208
- def upload_custom_scorer(
209
- self,
210
- scorer_file_path: str,
211
- requirements_file_path: Optional[str] = None,
212
- unique_name: Optional[str] = None,
213
- ) -> bool:
214
- """
215
- Upload custom ExampleScorer from files to backend.
216
-
217
- Args:
218
- scorer_file_path: Path to Python file containing CustomScorer class
219
- requirements_file_path: Optional path to requirements.txt
220
- unique_name: Optional unique identifier (auto-detected from scorer.name if not provided)
221
-
222
- Returns:
223
- bool: True if upload successful
224
-
225
- Raises:
226
- ValueError: If scorer file is invalid
227
- FileNotFoundError: If scorer file doesn't exist
228
- """
229
- import os
230
-
231
- if not os.path.exists(scorer_file_path):
232
- raise FileNotFoundError(f"Scorer file not found: {scorer_file_path}")
233
-
234
- # Auto-detect scorer name if not provided
235
- if unique_name is None:
236
- unique_name = self._extract_scorer_name(scorer_file_path)
237
- judgeval_logger.info(f"Auto-detected scorer name: '{unique_name}'")
238
-
239
- # Read scorer code
240
- with open(scorer_file_path, "r") as f:
241
- scorer_code = f.read()
242
-
243
- # Read requirements (optional)
244
- requirements_text = ""
245
- if requirements_file_path and os.path.exists(requirements_file_path):
246
- with open(requirements_file_path, "r") as f:
247
- requirements_text = f.read()
248
-
249
- try:
250
- response = self.api_client.upload_custom_scorer(
251
- scorer_name=unique_name,
252
- scorer_code=scorer_code,
253
- requirements_text=requirements_text,
254
- )
255
-
256
- if response.get("status") == "success":
257
- judgeval_logger.info(
258
- f"Successfully uploaded custom scorer: {unique_name}"
259
- )
260
- return True
261
- else:
262
- judgeval_logger.error(f"Failed to upload custom scorer: {unique_name}")
263
- return False
264
-
265
- except Exception as e:
266
- judgeval_logger.error(f"Error uploading custom scorer: {e}")
267
- raise