judgeval 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- judgeval/__init__.py +0 -71
- judgeval/clients.py +14 -3
- judgeval/common/tracer.py +57 -31
- judgeval/constants.py +1 -0
- judgeval/data/__init__.py +2 -1
- judgeval/data/scorer_data.py +2 -2
- judgeval/evaluation_run.py +16 -15
- judgeval/judges/__init__.py +2 -2
- judgeval/judges/base_judge.py +1 -1
- judgeval/judges/litellm_judge.py +2 -2
- judgeval/judges/mixture_of_judges.py +2 -2
- judgeval/judges/together_judge.py +2 -2
- judgeval/judges/utils.py +4 -4
- judgeval/judgment_client.py +67 -15
- judgeval/run_evaluation.py +79 -14
- judgeval/scorers/__init__.py +8 -4
- judgeval/scorers/api_scorer.py +64 -0
- judgeval/scorers/base_scorer.py +3 -2
- judgeval/scorers/exceptions.py +11 -0
- judgeval/scorers/{custom_scorer.py → judgeval_scorer.py} +9 -5
- judgeval/scorers/judgeval_scorers/__init__.py +132 -9
- judgeval/scorers/judgeval_scorers/api_scorers/__init__.py +23 -0
- judgeval/scorers/judgeval_scorers/api_scorers/answer_correctness.py +19 -0
- judgeval/scorers/judgeval_scorers/{answer_relevancy.py → api_scorers/answer_relevancy.py} +2 -2
- judgeval/scorers/judgeval_scorers/{contextual_precision.py → api_scorers/contextual_precision.py} +2 -2
- judgeval/scorers/judgeval_scorers/{contextual_recall.py → api_scorers/contextual_recall.py} +2 -2
- judgeval/scorers/judgeval_scorers/{contextual_relevancy.py → api_scorers/contextual_relevancy.py} +2 -2
- judgeval/scorers/judgeval_scorers/{faithfulness.py → api_scorers/faithfulness.py} +2 -2
- judgeval/scorers/judgeval_scorers/{hallucination.py → api_scorers/hallucination.py} +2 -2
- judgeval/scorers/judgeval_scorers/{json_correctness.py → api_scorers/json_correctness.py} +7 -7
- judgeval/scorers/judgeval_scorers/{summarization.py → api_scorers/summarization.py} +2 -2
- judgeval/scorers/judgeval_scorers/{tool_correctness.py → api_scorers/tool_correctness.py} +2 -2
- judgeval/scorers/judgeval_scorers/local_implementations/__init__.py +24 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/__init__.py +4 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/answer_correctness_scorer.py +272 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_correctness/prompts.py +169 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/__init__.py +4 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/answer_relevancy_scorer.py +292 -0
- judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/prompts.py +174 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/contextual_precision_scorer.py +259 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_precision/prompts.py +106 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/contextual_recall_scorer.py +249 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_recall/prompts.py +142 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/contextual_relevancy_scorer.py +240 -0
- judgeval/scorers/judgeval_scorers/local_implementations/contextual_relevancy/prompts.py +121 -0
- judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/faithfulness_scorer.py +318 -0
- judgeval/scorers/judgeval_scorers/local_implementations/faithfulness/prompts.py +265 -0
- judgeval/scorers/judgeval_scorers/local_implementations/hallucination/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/hallucination/hallucination_scorer.py +258 -0
- judgeval/scorers/judgeval_scorers/local_implementations/hallucination/prompts.py +104 -0
- judgeval/scorers/judgeval_scorers/local_implementations/json_correctness/json_correctness_scorer.py +127 -0
- judgeval/scorers/judgeval_scorers/local_implementations/summarization/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/summarization/prompts.py +247 -0
- judgeval/scorers/judgeval_scorers/local_implementations/summarization/summarization_scorer.py +541 -0
- judgeval/scorers/judgeval_scorers/local_implementations/tool_correctness/__init__.py +3 -0
- judgeval/scorers/judgeval_scorers/local_implementations/tool_correctness/tool_correctness_scorer.py +151 -0
- judgeval/scorers/prompt_scorer.py +4 -4
- judgeval/scorers/score.py +14 -14
- judgeval/scorers/utils.py +40 -6
- {judgeval-0.0.3.dist-info → judgeval-0.0.5.dist-info}/METADATA +1 -1
- judgeval-0.0.5.dist-info/RECORD +78 -0
- judgeval-0.0.3.dist-info/RECORD +0 -46
- {judgeval-0.0.3.dist-info → judgeval-0.0.5.dist-info}/WHEEL +0 -0
- {judgeval-0.0.3.dist-info → judgeval-0.0.5.dist-info}/licenses/LICENSE.md +0 -0
judgeval/scorers/judgeval_scorers/local_implementations/answer_relevancy/answer_relevancy_scorer.py
ADDED
@@ -0,0 +1,292 @@
|
|
1
|
+
from typing import Optional, List, Union, Tuple
|
2
|
+
|
3
|
+
from judgeval.scorers.utils import (get_or_create_event_loop,
|
4
|
+
scorer_progress_meter,
|
5
|
+
create_verbose_logs,
|
6
|
+
parse_response_json,
|
7
|
+
check_example_params
|
8
|
+
)
|
9
|
+
from judgeval.scorers import JudgevalScorer
|
10
|
+
from judgeval.judges import JudgevalJudge
|
11
|
+
from judgeval.judges.utils import create_judge
|
12
|
+
from judgeval.data import Example, ExampleParams
|
13
|
+
from judgeval.scorers.judgeval_scorers.local_implementations.answer_relevancy.prompts import (
|
14
|
+
Statements,
|
15
|
+
ARVerdict,
|
16
|
+
Verdicts,
|
17
|
+
Reason,
|
18
|
+
AnswerRelevancyTemplate,
|
19
|
+
)
|
20
|
+
|
21
|
+
required_params = [
|
22
|
+
ExampleParams.INPUT,
|
23
|
+
ExampleParams.ACTUAL_OUTPUT,
|
24
|
+
]
|
25
|
+
|
26
|
+
|
27
|
+
class AnswerRelevancyScorer(JudgevalScorer):
|
28
|
+
def __init__(
|
29
|
+
self,
|
30
|
+
threshold: float = 0.5,
|
31
|
+
model: Optional[Union[str, JudgevalJudge]] = None,
|
32
|
+
include_reason: bool = True,
|
33
|
+
async_mode: bool = True,
|
34
|
+
strict_mode: bool = False,
|
35
|
+
verbose_mode: bool = False,
|
36
|
+
):
|
37
|
+
self.threshold = 1 if strict_mode else threshold
|
38
|
+
self.model, self.using_native_model = create_judge(model)
|
39
|
+
self.evaluation_model = self.model.get_model_name()
|
40
|
+
self.include_reason = include_reason
|
41
|
+
self.async_mode = async_mode
|
42
|
+
self.strict_mode = strict_mode
|
43
|
+
self.verbose_mode = verbose_mode
|
44
|
+
|
45
|
+
def score_example(
|
46
|
+
self,
|
47
|
+
example: Example,
|
48
|
+
_show_indicator: bool = True,
|
49
|
+
) -> float:
|
50
|
+
check_example_params(example, required_params, self)
|
51
|
+
|
52
|
+
with scorer_progress_meter(self, display_meter=_show_indicator):
|
53
|
+
try:
|
54
|
+
if self.async_mode:
|
55
|
+
loop = get_or_create_event_loop()
|
56
|
+
loop.run_until_complete(
|
57
|
+
self.a_score_example(example, _show_indicator=False)
|
58
|
+
)
|
59
|
+
else:
|
60
|
+
self.statements: List[str] = self._get_statements(
|
61
|
+
example.actual_output
|
62
|
+
)
|
63
|
+
self.verdicts: List[ARVerdict] = (
|
64
|
+
self._get_verdicts(example.input)
|
65
|
+
)
|
66
|
+
self.score = self._compute_score()
|
67
|
+
self.reason = self._get_reason(example.input)
|
68
|
+
self.success = self.score >= self.threshold
|
69
|
+
self.verbose_logs = create_verbose_logs(
|
70
|
+
self,
|
71
|
+
steps=[
|
72
|
+
f"Statements:\n{self.statements}",
|
73
|
+
# Convert to dict for serialization purposes
|
74
|
+
f"Verdicts:\n{[v.model_dump() for v in self.verdicts]}",
|
75
|
+
f"Score: {self.score}\nReason: {self.reason}",
|
76
|
+
],
|
77
|
+
)
|
78
|
+
return self.score
|
79
|
+
except Exception as e:
|
80
|
+
raise
|
81
|
+
|
82
|
+
async def a_score_example(
|
83
|
+
self,
|
84
|
+
example: Example,
|
85
|
+
_show_indicator: bool = True,
|
86
|
+
) -> float:
|
87
|
+
check_example_params(example, required_params, self)
|
88
|
+
try:
|
89
|
+
with scorer_progress_meter(
|
90
|
+
self, async_mode=True, display_meter=_show_indicator
|
91
|
+
):
|
92
|
+
self.statements: List[str] = await self._a_get_statements(
|
93
|
+
example.actual_output
|
94
|
+
)
|
95
|
+
self.verdicts: List[ARVerdict] = (
|
96
|
+
await self._a_get_verdicts(example.input)
|
97
|
+
)
|
98
|
+
self.score = self._compute_score()
|
99
|
+
self.reason = await self._a_get_reason(example.input)
|
100
|
+
self.success = self.score >= self.threshold
|
101
|
+
self.verbose_logs = create_verbose_logs(
|
102
|
+
self,
|
103
|
+
steps=[
|
104
|
+
f"Statements:\n{self.statements}",
|
105
|
+
# Convert to dict for serialization purposes
|
106
|
+
f"Verdicts:\n{[v.model_dump() for v in self.verdicts]}",
|
107
|
+
f"Score: {self.score}\nReason: {self.reason}",
|
108
|
+
],
|
109
|
+
)
|
110
|
+
return self.score
|
111
|
+
except Exception as e:
|
112
|
+
print(f"Error: {e}")
|
113
|
+
raise
|
114
|
+
|
115
|
+
async def _a_get_reason(self, input: str) -> str:
|
116
|
+
if self.include_reason is False:
|
117
|
+
return None
|
118
|
+
|
119
|
+
irrelevant_statements: List[Tuple[str, str]] = []
|
120
|
+
for idx, verdict in enumerate(self.verdicts):
|
121
|
+
if verdict.verdict.strip().lower() == "no":
|
122
|
+
irrelevant_statements.append((self.statements[idx], verdict.reason))
|
123
|
+
|
124
|
+
prompt = AnswerRelevancyTemplate.generate_reason(
|
125
|
+
irrelevant_statements=irrelevant_statements,
|
126
|
+
input=input,
|
127
|
+
score=format(self.score, ".2f"),
|
128
|
+
)
|
129
|
+
if self.using_native_model:
|
130
|
+
res = await self.model.a_generate(prompt)
|
131
|
+
data = parse_response_json(res, self)
|
132
|
+
return data["reason"]
|
133
|
+
else:
|
134
|
+
try:
|
135
|
+
res: Reason = await self.model.a_generate(
|
136
|
+
prompt=prompt, schema=Reason
|
137
|
+
)
|
138
|
+
return res.reason
|
139
|
+
except TypeError:
|
140
|
+
res = await self.model.a_generate(prompt)
|
141
|
+
data = parse_response_json(res, self)
|
142
|
+
return data["reason"]
|
143
|
+
|
144
|
+
def _get_reason(self, input: str) -> str:
|
145
|
+
if self.include_reason is False:
|
146
|
+
return None
|
147
|
+
|
148
|
+
irrelevant_statements = []
|
149
|
+
for verdict in self.verdicts:
|
150
|
+
if verdict.verdict.strip().lower() == "no":
|
151
|
+
irrelevant_statements.append(verdict.reason)
|
152
|
+
|
153
|
+
prompt = AnswerRelevancyTemplate.generate_reason(
|
154
|
+
irrelevant_statements=irrelevant_statements,
|
155
|
+
input=input,
|
156
|
+
score=format(self.score, ".2f"),
|
157
|
+
)
|
158
|
+
|
159
|
+
if self.using_native_model:
|
160
|
+
res = self.model.generate(prompt)
|
161
|
+
data = parse_response_json(res, self)
|
162
|
+
return data["reason"]
|
163
|
+
else:
|
164
|
+
try:
|
165
|
+
res: Reason = self.model.generate(prompt, schema=Reason)
|
166
|
+
return res.reason
|
167
|
+
except TypeError:
|
168
|
+
res = self.model.generate(prompt)
|
169
|
+
data = parse_response_json(res, self)
|
170
|
+
return data["reason"]
|
171
|
+
|
172
|
+
async def _a_get_verdicts(
|
173
|
+
self, input: str
|
174
|
+
) -> List[ARVerdict]:
|
175
|
+
if len(self.statements) == 0:
|
176
|
+
return []
|
177
|
+
|
178
|
+
prompt = AnswerRelevancyTemplate.generate_verdicts(
|
179
|
+
input=input,
|
180
|
+
actual_output=self.statements,
|
181
|
+
)
|
182
|
+
if self.using_native_model:
|
183
|
+
res = await self.model.a_generate(prompt)
|
184
|
+
data = parse_response_json(res, self)
|
185
|
+
return [
|
186
|
+
ARVerdict(**item) for item in data["verdicts"]
|
187
|
+
]
|
188
|
+
else:
|
189
|
+
try:
|
190
|
+
res: Verdicts = await self.model.a_generate(
|
191
|
+
prompt, schema=Verdicts
|
192
|
+
)
|
193
|
+
return [item for item in res.verdicts]
|
194
|
+
except TypeError:
|
195
|
+
res = await self.model.a_generate(prompt)
|
196
|
+
data = parse_response_json(res, self)
|
197
|
+
return [
|
198
|
+
ARVerdict(**item) for item in data["verdicts"]
|
199
|
+
]
|
200
|
+
|
201
|
+
def _get_verdicts(self, input: str) -> List[ARVerdict]:
|
202
|
+
if len(self.statements) == 0:
|
203
|
+
return []
|
204
|
+
|
205
|
+
prompt = AnswerRelevancyTemplate.generate_verdicts(
|
206
|
+
input=input,
|
207
|
+
actual_output=self.statements,
|
208
|
+
)
|
209
|
+
if self.using_native_model:
|
210
|
+
res = self.model.generate(prompt)
|
211
|
+
data = parse_response_json(res, self)
|
212
|
+
return [ARVerdict(**item) for item in data["verdicts"]]
|
213
|
+
else:
|
214
|
+
try:
|
215
|
+
res: Verdicts = self.model.generate(prompt, schema=Verdicts)
|
216
|
+
return [item for item in res.verdicts]
|
217
|
+
except TypeError:
|
218
|
+
res = self.model.generate(prompt)
|
219
|
+
data = parse_response_json(res, self)
|
220
|
+
return [
|
221
|
+
ARVerdict(**item) for item in data["verdicts"]
|
222
|
+
]
|
223
|
+
|
224
|
+
async def _a_get_statements(
|
225
|
+
self,
|
226
|
+
actual_output: str,
|
227
|
+
) -> List[str]:
|
228
|
+
prompt = AnswerRelevancyTemplate.deduce_statements(
|
229
|
+
actual_output=actual_output,
|
230
|
+
)
|
231
|
+
if self.using_native_model:
|
232
|
+
res = await self.model.a_generate(prompt)
|
233
|
+
data = parse_response_json(res, self)
|
234
|
+
return data["statements"]
|
235
|
+
else:
|
236
|
+
try:
|
237
|
+
res: Statements = await self.model.a_generate(
|
238
|
+
prompt, schema=Statements
|
239
|
+
)
|
240
|
+
return res.statements
|
241
|
+
except TypeError:
|
242
|
+
res = await self.model.a_generate(prompt)
|
243
|
+
data = parse_response_json(res, self)
|
244
|
+
return data["statements"]
|
245
|
+
|
246
|
+
def _get_statements(
|
247
|
+
self,
|
248
|
+
actual_output: str,
|
249
|
+
) -> List[str]:
|
250
|
+
prompt = AnswerRelevancyTemplate.deduce_statements(
|
251
|
+
actual_output=actual_output,
|
252
|
+
)
|
253
|
+
if self.using_native_model:
|
254
|
+
res = self.model.generate(prompt)
|
255
|
+
data = parse_response_json(res, self)
|
256
|
+
return data["statements"]
|
257
|
+
else:
|
258
|
+
try:
|
259
|
+
res: Statements = self.model.generate(prompt, schema=Statements)
|
260
|
+
return res.statements
|
261
|
+
except TypeError:
|
262
|
+
res = self.model.generate(prompt)
|
263
|
+
data = parse_response_json(res, self)
|
264
|
+
return data["statements"]
|
265
|
+
|
266
|
+
def _compute_score(self):
|
267
|
+
number_of_verdicts = len(self.verdicts)
|
268
|
+
if number_of_verdicts == 0:
|
269
|
+
return 1
|
270
|
+
|
271
|
+
relevant_count = 0
|
272
|
+
for verdict in self.verdicts:
|
273
|
+
if verdict.verdict.strip().lower() != "no":
|
274
|
+
relevant_count += 1
|
275
|
+
|
276
|
+
score = relevant_count / number_of_verdicts
|
277
|
+
return 0 if self.strict_mode and score < self.threshold else score
|
278
|
+
|
279
|
+
def _success_check(self) -> bool:
|
280
|
+
if self.error is not None:
|
281
|
+
self.success = False
|
282
|
+
else:
|
283
|
+
try:
|
284
|
+
self.success = self.score >= self.threshold
|
285
|
+
except:
|
286
|
+
self.success = False
|
287
|
+
return self.success
|
288
|
+
|
289
|
+
@property
|
290
|
+
def __name__(self):
|
291
|
+
return "Answer Relevancy"
|
292
|
+
|
@@ -0,0 +1,174 @@
|
|
1
|
+
"""
|
2
|
+
Util prompts for AnswerRelevancyScorer
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List, Optional, Tuple
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
|
8
|
+
|
9
|
+
# BaseModels to enforce formatting in LLM JSON response
|
10
|
+
class Statements(BaseModel):
|
11
|
+
statements: List[str]
|
12
|
+
|
13
|
+
|
14
|
+
class ARVerdict(BaseModel):
|
15
|
+
verdict: str
|
16
|
+
reason: str
|
17
|
+
|
18
|
+
|
19
|
+
class Verdicts(BaseModel):
|
20
|
+
verdicts: List[ARVerdict]
|
21
|
+
|
22
|
+
|
23
|
+
class Reason(BaseModel):
|
24
|
+
reason: str
|
25
|
+
|
26
|
+
|
27
|
+
class AnswerRelevancyTemplate:
|
28
|
+
@staticmethod
|
29
|
+
def deduce_statements(actual_output):
|
30
|
+
return f"""You will be presented with a piece of text. Your task is to break down the text and generate a list of statements contained within the text. Single words and ambiguous phrases should be considered statements.
|
31
|
+
|
32
|
+
===== START OF EXAMPLES =====
|
33
|
+
Example 1:
|
34
|
+
Example text: The weather is sunny today. Temperature is 75 degrees. Don't forget your sunscreen!
|
35
|
+
|
36
|
+
Output:
|
37
|
+
{{
|
38
|
+
"statements": ["The weather is sunny today", "Temperature is 75 degrees", "Don't forget your sunscreen!"]
|
39
|
+
}}
|
40
|
+
|
41
|
+
Example 2:
|
42
|
+
Example text: I love pizza. It has cheese and tomato sauce and the crust is crispy.
|
43
|
+
|
44
|
+
Output:
|
45
|
+
{{
|
46
|
+
"statements": ["I love pizza", "It has cheese and tomato sauce", "The crust is crispy"]
|
47
|
+
}}
|
48
|
+
===== END OF EXAMPLES =====
|
49
|
+
|
50
|
+
|
51
|
+
**
|
52
|
+
IMPORTANT: Please return your answer in valid JSON format, with the "statements" key mapping to a list of strings. No words or explanation is needed.
|
53
|
+
**
|
54
|
+
|
55
|
+
==== START OF INPUT ====
|
56
|
+
Text:
|
57
|
+
{actual_output}
|
58
|
+
==== END OF INPUT ====
|
59
|
+
|
60
|
+
==== YOUR ANSWER ====
|
61
|
+
JSON:
|
62
|
+
"""
|
63
|
+
|
64
|
+
@staticmethod
|
65
|
+
def generate_verdicts(input, actual_output):
|
66
|
+
return f"""You will be provided with a list of statements from a response; your task is to determine whether each statement is relevant with respect to a provided input.
|
67
|
+
More specifically, you should generate a JSON object with the key "verdicts". "verdicts" will map to a list of nested JSON objects with two keys: `verdict` and `reason`.
|
68
|
+
The "verdict" key be ONE OF THE FOLLOWING: ["yes", "no", "idk"]. You should select "yes" if the statement is relevant to addressing the original input, "no" if the statement is irrelevant, and 'idk' if it is ambiguous (eg., not directly relevant but could be used as a supporting point to address the input).
|
69
|
+
The "reason" key should provide an explanation for your choice, regardless of which verdict you select.
|
70
|
+
|
71
|
+
NOTE: the list of statements was generated from an output corresponding to the provided `input`. Account for this relationship during your evaluation of the content relevancy.
|
72
|
+
|
73
|
+
==== OUTPUT FORMATTING ====
|
74
|
+
IMPORTANT: Please make sure to only return in JSON format, with the "verdicts" key mapping to a list of JSON objects. Each JSON object should contain keys "verdict" (one of ["yes", "no", "idk"]) and "reason" (str).
|
75
|
+
|
76
|
+
==== START OF EXAMPLES ====
|
77
|
+
Example input 1: How do I make chocolate chip cookies?
|
78
|
+
Example statements 1: ["Preheat the oven to 375°F.", "I love baking!", "My grandmother had a cat.", "Mix the butter and sugar until creamy.", "Have a great day!"]
|
79
|
+
Example JSON 1:
|
80
|
+
{{
|
81
|
+
"verdicts": [
|
82
|
+
{{
|
83
|
+
"verdict": "yes",
|
84
|
+
"reason": "Preheating the oven is a crucial first step in baking cookies"
|
85
|
+
}},
|
86
|
+
{{
|
87
|
+
"verdict": "idk",
|
88
|
+
"reason": "While showing enthusiasm for baking, this statement doesn't directly contribute to the recipe instructions"
|
89
|
+
}},
|
90
|
+
{{
|
91
|
+
"verdict": "no",
|
92
|
+
"reason": "The statement about the grandmother's cat is completely irrelevant to instructions for making chocolate chip cookies"
|
93
|
+
}},
|
94
|
+
{{
|
95
|
+
"verdict": "yes",
|
96
|
+
"reason": "Mixing butter and sugar is an essential step in cookie preparation"
|
97
|
+
}},
|
98
|
+
{{
|
99
|
+
"verdict": "no",
|
100
|
+
"reason": "A farewell message is not relevant to the cookie recipe instructions being requested"
|
101
|
+
}}
|
102
|
+
]
|
103
|
+
}}
|
104
|
+
|
105
|
+
Example input 2: What are the main causes of climate change?
|
106
|
+
Example statements 2: ["Greenhouse gas emissions trap heat in the atmosphere.", "I watched a movie yesterday.", "Industrial processes release large amounts of CO2.", "The weather is nice today."]
|
107
|
+
Example JSON 2:
|
108
|
+
{{
|
109
|
+
"verdicts": [
|
110
|
+
{{
|
111
|
+
"verdict": "yes",
|
112
|
+
"reason": "This directly explains a key mechanism of climate change"
|
113
|
+
}},
|
114
|
+
{{
|
115
|
+
"verdict": "no",
|
116
|
+
"reason": "Personal entertainment activities are not related to the causes of climate change"
|
117
|
+
}},
|
118
|
+
{{
|
119
|
+
"verdict": "yes",
|
120
|
+
"reason": "This identifies a major source of greenhouse gas emissions contributing to climate change"
|
121
|
+
}},
|
122
|
+
{{
|
123
|
+
"verdict": "idk",
|
124
|
+
"reason": "While weather is related to climate, a single day's weather observation doesn't directly address the causes of climate change"
|
125
|
+
}}
|
126
|
+
]
|
127
|
+
}}
|
128
|
+
==== END OF EXAMPLES ====
|
129
|
+
|
130
|
+
** LASTLY **
|
131
|
+
Since you are tasked to choose a verdict for each statement, the number of "verdicts" SHOULD BE EXACTLY EQUAL to the number of "statements".
|
132
|
+
|
133
|
+
|
134
|
+
==== YOUR TURN =====
|
135
|
+
|
136
|
+
Input:
|
137
|
+
{input}
|
138
|
+
|
139
|
+
Statements:
|
140
|
+
{actual_output}
|
141
|
+
|
142
|
+
JSON:
|
143
|
+
"""
|
144
|
+
|
145
|
+
@staticmethod
|
146
|
+
def generate_reason(irrelevant_statements: List[Tuple[str, str]], input: str, score: float):
|
147
|
+
irrelevant_statements = "\n".join([f"statement: {statement}\nreason: {reason}\n------" for statement, reason in irrelevant_statements])
|
148
|
+
return f"""==== TASK INSTRUCTIONS ====\nYou will provided with three inputs: an answer relevancy score, a list of irrelevant statements made in a model's output (with the reason why it's irrelevant), and the corresponding input to the output. Your task is to provide a CLEAR and CONCISE reason for the answer relevancy score.
|
149
|
+
You should explain why the score is not higher, but also include why its current score is fair.
|
150
|
+
The irrelevant statements represent parts of the model output that are irrelevant to addressing whatever is asked/talked about in the input. The irrelevant statement will be paired with the reason why it's irrelevant.
|
151
|
+
If there are no irrelevant statements, instead respond with a positive remark with an upbeat encouraging tone (but don't overblow the kind attitude).
|
152
|
+
|
153
|
+
|
154
|
+
==== FORMATTING YOUR ANSWER ====
|
155
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
156
|
+
Example JSON:
|
157
|
+
{{
|
158
|
+
"reason": "The score is <answer_relevancy_score> because <your_reason>."
|
159
|
+
}}
|
160
|
+
|
161
|
+
==== YOUR TURN ====
|
162
|
+
---- ANSWER RELEVANCY SCORE ----
|
163
|
+
{score}
|
164
|
+
|
165
|
+
---- IRRELEVANT STATEMENTS ----
|
166
|
+
{irrelevant_statements}
|
167
|
+
|
168
|
+
---- INPUT ----
|
169
|
+
{input}
|
170
|
+
|
171
|
+
---- YOUR RESPONSE ----
|
172
|
+
JSON:
|
173
|
+
"""
|
174
|
+
|