google-adk 1.5.0__py3-none-any.whl → 1.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/adk/a2a/converters/event_converter.py +257 -36
- google/adk/a2a/converters/part_converter.py +93 -25
- google/adk/a2a/converters/request_converter.py +12 -32
- google/adk/a2a/converters/utils.py +22 -4
- google/adk/a2a/executor/__init__.py +13 -0
- google/adk/a2a/executor/a2a_agent_executor.py +260 -0
- google/adk/a2a/executor/task_result_aggregator.py +71 -0
- google/adk/a2a/logs/__init__.py +13 -0
- google/adk/a2a/logs/log_utils.py +349 -0
- google/adk/agents/base_agent.py +54 -0
- google/adk/agents/llm_agent.py +15 -0
- google/adk/agents/remote_a2a_agent.py +532 -0
- google/adk/artifacts/in_memory_artifact_service.py +6 -3
- google/adk/cli/browser/chunk-EQDQRRRY.js +1 -0
- google/adk/cli/browser/chunk-TXJFAAIW.js +2 -0
- google/adk/cli/browser/index.html +4 -3
- google/adk/cli/browser/main-RXDVX3K6.js +3914 -0
- google/adk/cli/browser/polyfills-FFHMD2TL.js +17 -0
- google/adk/cli/cli_deploy.py +4 -1
- google/adk/cli/cli_eval.py +8 -6
- google/adk/cli/cli_tools_click.py +30 -10
- google/adk/cli/fast_api.py +120 -5
- google/adk/cli/utils/agent_loader.py +12 -0
- google/adk/evaluation/agent_evaluator.py +107 -10
- google/adk/evaluation/base_eval_service.py +157 -0
- google/adk/evaluation/constants.py +20 -0
- google/adk/evaluation/eval_case.py +3 -3
- google/adk/evaluation/eval_metrics.py +39 -0
- google/adk/evaluation/evaluation_generator.py +1 -1
- google/adk/evaluation/final_response_match_v2.py +230 -0
- google/adk/evaluation/llm_as_judge.py +141 -0
- google/adk/evaluation/llm_as_judge_utils.py +48 -0
- google/adk/evaluation/metric_evaluator_registry.py +89 -0
- google/adk/evaluation/response_evaluator.py +38 -211
- google/adk/evaluation/safety_evaluator.py +54 -0
- google/adk/evaluation/trajectory_evaluator.py +16 -2
- google/adk/evaluation/vertex_ai_eval_facade.py +147 -0
- google/adk/events/event.py +2 -4
- google/adk/flows/llm_flows/base_llm_flow.py +2 -0
- google/adk/memory/in_memory_memory_service.py +3 -2
- google/adk/models/lite_llm.py +50 -10
- google/adk/runners.py +27 -10
- google/adk/sessions/database_session_service.py +25 -7
- google/adk/sessions/in_memory_session_service.py +5 -1
- google/adk/sessions/vertex_ai_session_service.py +67 -42
- google/adk/tools/bigquery/config.py +11 -1
- google/adk/tools/bigquery/query_tool.py +306 -12
- google/adk/tools/enterprise_search_tool.py +2 -2
- google/adk/tools/function_tool.py +7 -1
- google/adk/tools/google_search_tool.py +1 -1
- google/adk/tools/mcp_tool/mcp_session_manager.py +44 -30
- google/adk/tools/mcp_tool/mcp_tool.py +44 -7
- google/adk/version.py +1 -1
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/METADATA +6 -4
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/RECORD +58 -42
- google/adk/cli/browser/main-JAAWEV7F.js +0 -92
- google/adk/cli/browser/polyfills-B6TNHZQ6.js +0 -17
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/WHEEL +0 -0
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/entry_points.txt +0 -0
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,230 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
import logging
|
18
|
+
import re
|
19
|
+
from typing import Optional
|
20
|
+
|
21
|
+
from typing_extensions import override
|
22
|
+
|
23
|
+
from ..models.llm_response import LlmResponse
|
24
|
+
from ..utils.feature_decorator import working_in_progress
|
25
|
+
from .eval_case import Invocation
|
26
|
+
from .eval_metrics import EvalMetric
|
27
|
+
from .evaluator import EvalStatus
|
28
|
+
from .evaluator import EvaluationResult
|
29
|
+
from .evaluator import PerInvocationResult
|
30
|
+
from .llm_as_judge import LlmAsJudge
|
31
|
+
from .llm_as_judge_utils import get_eval_status
|
32
|
+
from .llm_as_judge_utils import get_text_from_content
|
33
|
+
from .llm_as_judge_utils import Label
|
34
|
+
|
35
|
+
logger = logging.getLogger("google_adk." + __name__)
|
36
|
+
|
37
|
+
_FINAL_RESPONSE_MATCH_V2_PROMPT = """You are an expert rater for an AI agent. The AI agent is going to call an API to answer the user query and generate API tool use code based for the choice of the API and API arguments. The ideal model response should be a function call that fulfills user query, or a natural language response hedges or asks users for further clarification if a function call does not apply.
|
38
|
+
The primary focus of this rating task is to check correctness of the model responses.
|
39
|
+
|
40
|
+
The data consists of:
|
41
|
+
- A user query.
|
42
|
+
- A model generated response for the prompt. The responses can consist of:
|
43
|
+
- Natural language, when the model is asking for clarification, or tells the user it does not possess the requested functionality / option.
|
44
|
+
- Code, in the form of one or multiple python function calls, and additional code as needed, for when the model is fulfilling the user request.
|
45
|
+
You can use the help from a reference response annotated by a human rater. This reference response is of high quality. You can compare the agent's response with the reference response and decide if the agent's response is valid.
|
46
|
+
Note sometimes the reference response only contains the key entities of the correct answer and you need to be flexible to allow the agent response to contain more information than the reference response, or to present the key entities in a different format or structure or in shorter or longer format.
|
47
|
+
When the agent response is provided in the form of tables/dataframes or should be best provided in the form of tables/dataframes: focus on the key entities and main components requested in the user query and check whether you can retrieve those from the agent response. Likewise, if you have the reference response, then find out the key entities and main components in them and check whether you can retrieve those from the agent response. If the prompt does not specify any format instructions and the main items/components are included in the response then tolerate the differences in the formatting of those tables/dataframes.
|
48
|
+
|
49
|
+
You should follow the constitutions below very carefully to rate the model response:
|
50
|
+
- Allow flexibility of format even when reference code only uses one of the possible format, unless API spec or user prompt has explicit format requirement
|
51
|
+
- e.g. For state name, allow both abbreviation and full name unless API spec has explicit requirement. e.g. both 'tx' and 'Texas' should be allowed in the agent response even when reference code only uses one of them.
|
52
|
+
- e.g. If a reference response list outputs in a list format, the agent response is allowed to use sentence format and vice versa unless user prompt explicitly asks for a specific format.
|
53
|
+
- e.g. For numbers, allow flexibility of formatting, e.g. 1000000 vs 1,000,000.
|
54
|
+
- The model shouldn't assume that it doesn't have access to according data or incapable of answering the question if reference response is able to find a legit answer.
|
55
|
+
- If the model response contains the correct final answer, rate it as valid even when the model response contains more information than the reference response.
|
56
|
+
- If the user prompt has csv or other table format data, don't read it yourself. Trust the reference response final answer instead.
|
57
|
+
- When the validation needs maths, date calculations, do not use your own calculator. Trust the reference response final answer instead.
|
58
|
+
- Be mindful about unit of numbers. For example, if the reference response says 100 miles, but the model response says 100 km, it is invalid.
|
59
|
+
- When the agent response or the reference response is provided in the form of tables/dataframes: focus on the key entities and main components requested in the user query and check whether you can retrieve those from the agent response and whether those match the reference response. If the user query does not specify any format instructions and the main items/components are included in the response then tolerate the differences in the formatting of those tables/dataframes.
|
60
|
+
- When the answer is in numeric format, check whether there are any format requirements in the numeric format, rounding, precision, number of decimals, etc. specified in the user query and the prompt. If there are no such instructions, then tolerate different numerical formats.
|
61
|
+
- When the answer is in numeric format and there are rounding or precision differences between the agent response and the reference response, if no further instructions are provided evaluate if the rounding strategy or precision in the agent response follows the standards for that entity. For instance, model accuracy scores must be reported with at least two decimal places (e.g., 0.798 → 0.80 is acceptable, but 0.7 is not).
|
62
|
+
|
63
|
+
Below are the inputs:
|
64
|
+
{{
|
65
|
+
"User prompt": {prompt},
|
66
|
+
"Agent response": {response},
|
67
|
+
"Reference response": {golden_response},
|
68
|
+
}}
|
69
|
+
|
70
|
+
The answer should be a json alone which follows the json structure below:
|
71
|
+
{{
|
72
|
+
"reasoning": [reasoning],
|
73
|
+
"is_the_agent_response_valid": [valid or invalid],
|
74
|
+
}}
|
75
|
+
Answer with assertiveness:
|
76
|
+
"""
|
77
|
+
|
78
|
+
_DEFAULT_NUM_SAMPLES = 5
|
79
|
+
|
80
|
+
|
81
|
+
def _parse_critique(response: str) -> Label:
|
82
|
+
"""Parses the judge model critique and extracts the final label.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
response: model response
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
The extracted label, either VALID, INVALID, or NOT_FOUND.
|
89
|
+
"""
|
90
|
+
# Regex matching the label field in the response. The end of the field is
|
91
|
+
# identified by either a comma, new line, or an end-bracket.
|
92
|
+
label_match_is_response_valid = re.search(
|
93
|
+
r'"is_the_agent_response_valid":\s*\[*[\n\s]*"*([^"^\]^\s]*)"*[\n\s]*\]*\s*[,\n\}]',
|
94
|
+
response,
|
95
|
+
)
|
96
|
+
# In case the model names the label field as "is_the_agent_response_*invalid*"
|
97
|
+
# instead of "..._*valid*"
|
98
|
+
label_match_is_response_invalid = re.search(
|
99
|
+
r'"is_the_agent_response_invalid":\s*\[*[\n\s]*"*([^"^\]^\s]*)"*[\n\s]*\]*\s*[,\n\}]',
|
100
|
+
response,
|
101
|
+
)
|
102
|
+
# Remove any trailing whitespace, commas, or end-brackets from the label.
|
103
|
+
if label_match_is_response_valid:
|
104
|
+
label = label_match_is_response_valid.group(1).strip(r"\s,\}")
|
105
|
+
if label in [
|
106
|
+
Label.INVALID.value,
|
107
|
+
Label.ALMOST.value,
|
108
|
+
Label.FALSE.value,
|
109
|
+
*Label.PARTIALLY_VALID.value,
|
110
|
+
]:
|
111
|
+
label = Label.INVALID
|
112
|
+
elif label in [Label.VALID.value, Label.TRUE.value]:
|
113
|
+
label = Label.VALID
|
114
|
+
else:
|
115
|
+
label = Label.NOT_FOUND
|
116
|
+
elif label_match_is_response_invalid:
|
117
|
+
label = label_match_is_response_invalid.group(1).strip(r"\s,\}")
|
118
|
+
label = (
|
119
|
+
Label.INVALID
|
120
|
+
if label in [Label.TRUE.value, Label.INVALID.value]
|
121
|
+
else Label.VALID
|
122
|
+
)
|
123
|
+
else:
|
124
|
+
label = Label.NOT_FOUND
|
125
|
+
return label
|
126
|
+
|
127
|
+
|
128
|
+
@working_in_progress
|
129
|
+
class FinalResponseMatchV2Evaluator(LlmAsJudge):
|
130
|
+
"""V2 final response match evaluator which uses an LLM to judge responses.
|
131
|
+
|
132
|
+
The evaluator prompts the LLM to output whether the agent final response is
|
133
|
+
valid or invalid, hence outputs a score of 0 or 1. Repeated invocation samples
|
134
|
+
are aggregated by taking majority vote, and then the overall score is the
|
135
|
+
fraction, ranging from 0 to 1, of valid samples. Higher values of overall
|
136
|
+
score indicate better final response performance of the agent.
|
137
|
+
"""
|
138
|
+
|
139
|
+
def __init__(
|
140
|
+
self,
|
141
|
+
eval_metric: EvalMetric,
|
142
|
+
):
|
143
|
+
super().__init__(eval_metric)
|
144
|
+
self._auto_rater_prompt_template = _FINAL_RESPONSE_MATCH_V2_PROMPT
|
145
|
+
assert self._eval_metric.judge_model_options is not None
|
146
|
+
if self._eval_metric.judge_model_options.num_samples is None:
|
147
|
+
self._eval_metric.judge_model_options.num_samples = _DEFAULT_NUM_SAMPLES
|
148
|
+
|
149
|
+
@override
|
150
|
+
def format_auto_rater_prompt(
|
151
|
+
self, actual_invocation: Invocation, expected_invocation: Invocation
|
152
|
+
) -> str:
|
153
|
+
reference = get_text_from_content(expected_invocation.final_response)
|
154
|
+
response = get_text_from_content(actual_invocation.final_response)
|
155
|
+
user_prompt = get_text_from_content(expected_invocation.user_content)
|
156
|
+
return self._auto_rater_prompt_template.format(
|
157
|
+
prompt=user_prompt,
|
158
|
+
response=response,
|
159
|
+
golden_response=reference,
|
160
|
+
)
|
161
|
+
|
162
|
+
@override
|
163
|
+
def convert_auto_rater_response_to_score(
|
164
|
+
self, llm_response: LlmResponse
|
165
|
+
) -> Optional[float]:
|
166
|
+
response_text = get_text_from_content(llm_response.content)
|
167
|
+
if response_text is None:
|
168
|
+
return None
|
169
|
+
label = _parse_critique(response_text)
|
170
|
+
if label == Label.VALID:
|
171
|
+
return 1.0
|
172
|
+
elif label == Label.INVALID:
|
173
|
+
return 0.0
|
174
|
+
else:
|
175
|
+
return None
|
176
|
+
|
177
|
+
@override
|
178
|
+
def aggregate_per_invocation_samples(
|
179
|
+
self,
|
180
|
+
per_invocation_samples: list[PerInvocationResult],
|
181
|
+
) -> PerInvocationResult:
|
182
|
+
"""Aggregates samples of per-invocation results by taking majority vote.
|
183
|
+
|
184
|
+
Only consider results that were successfully evaluated. In the case of a
|
185
|
+
tie, consider the result to be invalid.
|
186
|
+
|
187
|
+
Args:
|
188
|
+
per_invocation_samples: Samples of per-invocation results to
|
189
|
+
aggregate.
|
190
|
+
|
191
|
+
Returns:
|
192
|
+
If there is a majority of valid results, return the first valid result.
|
193
|
+
Otherwise, return the first invalid result. If no results were
|
194
|
+
successfully evaluated, return the first sample.
|
195
|
+
"""
|
196
|
+
positive_results = []
|
197
|
+
negative_results = []
|
198
|
+
for result in per_invocation_samples:
|
199
|
+
if result.score == 1.0:
|
200
|
+
positive_results.append(result)
|
201
|
+
elif result.score == 0.0:
|
202
|
+
negative_results.append(result)
|
203
|
+
# If no results were successfully evaluated, just return the first sample.
|
204
|
+
if not positive_results and not negative_results:
|
205
|
+
return per_invocation_samples[0]
|
206
|
+
elif len(positive_results) > len(negative_results):
|
207
|
+
return positive_results[0]
|
208
|
+
else:
|
209
|
+
return negative_results[0]
|
210
|
+
|
211
|
+
@override
|
212
|
+
def aggregate_invocation_results(
|
213
|
+
self, per_invocation_results: list[PerInvocationResult]
|
214
|
+
) -> EvaluationResult:
|
215
|
+
"""Computes the fraction of invocation results that are valid."""
|
216
|
+
num_valid = 0
|
217
|
+
num_evaluated = 0
|
218
|
+
for result in per_invocation_results:
|
219
|
+
if result.score is None or result.eval_status == EvalStatus.NOT_EVALUATED:
|
220
|
+
continue
|
221
|
+
num_evaluated += 1
|
222
|
+
num_valid += result.score
|
223
|
+
overall_score = num_valid / num_evaluated
|
224
|
+
return EvaluationResult(
|
225
|
+
overall_score=overall_score,
|
226
|
+
overall_eval_status=get_eval_status(
|
227
|
+
overall_score, self._eval_metric.threshold
|
228
|
+
),
|
229
|
+
per_invocation_results=per_invocation_results,
|
230
|
+
)
|
@@ -0,0 +1,141 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
from abc import abstractmethod
|
18
|
+
from typing import Optional
|
19
|
+
|
20
|
+
from google.genai import types as genai_types
|
21
|
+
from typing_extensions import override
|
22
|
+
|
23
|
+
from ..models.base_llm import BaseLlm
|
24
|
+
from ..models.llm_request import LlmRequest
|
25
|
+
from ..models.llm_response import LlmResponse
|
26
|
+
from ..models.registry import LLMRegistry
|
27
|
+
from .eval_case import Invocation
|
28
|
+
from .eval_metrics import EvalMetric
|
29
|
+
from .evaluator import EvaluationResult
|
30
|
+
from .evaluator import Evaluator
|
31
|
+
from .evaluator import PerInvocationResult
|
32
|
+
from .llm_as_judge_utils import get_eval_status
|
33
|
+
|
34
|
+
|
35
|
+
class LlmAsJudge(Evaluator):
|
36
|
+
"""Evaluator based on a LLM.
|
37
|
+
|
38
|
+
It is meant to be extended by specific auto-raters for different evaluation
|
39
|
+
tasks:
|
40
|
+
- Provide the prompt template, and implement format_auto_rater_prompt to
|
41
|
+
format the auto-rater prompt for a given invocation.
|
42
|
+
- Implement convert_auto_rater_response_to_score to parse the auto-rater
|
43
|
+
response and return the corresponding score.
|
44
|
+
- Implement aggregate_invocation_results to aggregate the per-invocation
|
45
|
+
results to get the overall score.
|
46
|
+
- (Optional) Override aggregate_per_invocation_result_samples to aggregate
|
47
|
+
multiple auto-rater samples of the same invocation.
|
48
|
+
"""
|
49
|
+
|
50
|
+
def __init__(
|
51
|
+
self,
|
52
|
+
eval_metric: EvalMetric,
|
53
|
+
):
|
54
|
+
self._eval_metric = eval_metric
|
55
|
+
if not eval_metric.judge_model_options:
|
56
|
+
raise ValueError("Judge model options is required for LlmAsJudge.")
|
57
|
+
self._judge_model_options = eval_metric.judge_model_options
|
58
|
+
if self._judge_model_options.judge_model_config is None:
|
59
|
+
self._judge_model_options.judge_model_config = (
|
60
|
+
genai_types.GenerateContentConfig()
|
61
|
+
)
|
62
|
+
self._judge_model = self._setup_auto_rater()
|
63
|
+
|
64
|
+
@abstractmethod
|
65
|
+
def format_auto_rater_prompt(
|
66
|
+
self, actual: Invocation, expected: Invocation
|
67
|
+
) -> str:
|
68
|
+
"""Formats the auto-rater prompt to evaluate the given invocation."""
|
69
|
+
|
70
|
+
@abstractmethod
|
71
|
+
def convert_auto_rater_response_to_score(
|
72
|
+
self, auto_rater_response: LlmResponse
|
73
|
+
) -> Optional[float]:
|
74
|
+
"""Parses auto_rater_response and returns the corresponding score, or None if the score cannot be determined."""
|
75
|
+
|
76
|
+
@abstractmethod
|
77
|
+
def aggregate_per_invocation_samples(
|
78
|
+
self,
|
79
|
+
per_invocation_samples: list[PerInvocationResult],
|
80
|
+
) -> PerInvocationResult:
|
81
|
+
"""Aggregates repeated per-invocation samples to get the final result for the invocation."""
|
82
|
+
|
83
|
+
@abstractmethod
|
84
|
+
def aggregate_invocation_results(
|
85
|
+
self,
|
86
|
+
per_invocation_results: list[PerInvocationResult],
|
87
|
+
) -> EvaluationResult:
|
88
|
+
"""Aggregates the per invocation results to get the overall score."""
|
89
|
+
|
90
|
+
@override
|
91
|
+
async def evaluate_invocations(
|
92
|
+
self,
|
93
|
+
actual_invocations: list[Invocation],
|
94
|
+
expected_invocations: list[Invocation],
|
95
|
+
) -> EvaluationResult:
|
96
|
+
per_invocation_results = []
|
97
|
+
for actual, expected in zip(actual_invocations, expected_invocations):
|
98
|
+
auto_rater_prompt = self.format_auto_rater_prompt(actual, expected)
|
99
|
+
llm_request = LlmRequest(
|
100
|
+
model=self._judge_model_options.judge_model,
|
101
|
+
contents=[
|
102
|
+
genai_types.Content(
|
103
|
+
parts=[genai_types.Part(text=auto_rater_prompt)],
|
104
|
+
role="user",
|
105
|
+
)
|
106
|
+
],
|
107
|
+
config=self._judge_model_options.judge_model_config,
|
108
|
+
)
|
109
|
+
num_samples = self._judge_model_options.num_samples
|
110
|
+
invocation_result_samples = []
|
111
|
+
for _ in range(num_samples):
|
112
|
+
async for llm_response in self._judge_model.generate_content_async(
|
113
|
+
llm_request
|
114
|
+
):
|
115
|
+
# Non-streaming call, so there is only one response content.
|
116
|
+
score = self.convert_auto_rater_response_to_score(llm_response)
|
117
|
+
invocation_result_samples.append(
|
118
|
+
PerInvocationResult(
|
119
|
+
actual_invocation=actual,
|
120
|
+
expected_invocation=expected,
|
121
|
+
score=score,
|
122
|
+
eval_status=get_eval_status(
|
123
|
+
score, self._eval_metric.threshold
|
124
|
+
),
|
125
|
+
)
|
126
|
+
)
|
127
|
+
if not invocation_result_samples:
|
128
|
+
continue
|
129
|
+
per_invocation_results.append(
|
130
|
+
self.aggregate_per_invocation_samples(invocation_result_samples)
|
131
|
+
)
|
132
|
+
|
133
|
+
if per_invocation_results:
|
134
|
+
return self.aggregate_invocation_results(per_invocation_results)
|
135
|
+
return EvaluationResult()
|
136
|
+
|
137
|
+
def _setup_auto_rater(self) -> BaseLlm:
|
138
|
+
model_id = self._judge_model_options.judge_model
|
139
|
+
llm_registry = LLMRegistry()
|
140
|
+
llm_class = llm_registry.resolve(model_id)
|
141
|
+
return llm_class(model=model_id)
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
import enum
|
18
|
+
from typing import Optional
|
19
|
+
|
20
|
+
from google.genai import types as genai_types
|
21
|
+
|
22
|
+
from .evaluator import EvalStatus
|
23
|
+
|
24
|
+
|
25
|
+
@enum.unique
|
26
|
+
class Label(enum.Enum):
|
27
|
+
"""Labels for auto rater response."""
|
28
|
+
|
29
|
+
TRUE = "true"
|
30
|
+
INVALID = "invalid"
|
31
|
+
VALID = "valid"
|
32
|
+
PARTIALLY_VALID = "partially_valid", "partially valid", "partially"
|
33
|
+
ALMOST = "almost"
|
34
|
+
FALSE = "false"
|
35
|
+
NOT_FOUND = "label field not found"
|
36
|
+
|
37
|
+
|
38
|
+
def get_text_from_content(
|
39
|
+
content: Optional[genai_types.Content],
|
40
|
+
) -> Optional[str]:
|
41
|
+
if content and content.parts:
|
42
|
+
return "\n".join([p.text for p in content.parts if p.text])
|
43
|
+
|
44
|
+
|
45
|
+
def get_eval_status(score: Optional[float], threshold: float) -> EvalStatus:
|
46
|
+
if score is None:
|
47
|
+
return EvalStatus.NOT_EVALUATED
|
48
|
+
return EvalStatus.PASSED if score >= threshold else EvalStatus.FAILED
|
@@ -0,0 +1,89 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
import logging
|
18
|
+
|
19
|
+
from ..errors.not_found_error import NotFoundError
|
20
|
+
from .eval_metrics import EvalMetric
|
21
|
+
from .eval_metrics import MetricName
|
22
|
+
from .eval_metrics import PrebuiltMetrics
|
23
|
+
from .evaluator import Evaluator
|
24
|
+
from .response_evaluator import ResponseEvaluator
|
25
|
+
from .trajectory_evaluator import TrajectoryEvaluator
|
26
|
+
|
27
|
+
logger = logging.getLogger("google_adk." + __name__)
|
28
|
+
|
29
|
+
|
30
|
+
class MetricEvaluatorRegistry:
|
31
|
+
"""A registry for metric Evaluators."""
|
32
|
+
|
33
|
+
_registry: dict[str, type[Evaluator]] = {}
|
34
|
+
|
35
|
+
def get_evaluator(self, eval_metric: EvalMetric) -> Evaluator:
|
36
|
+
"""Returns an Evaluator for the given metric.
|
37
|
+
|
38
|
+
A new instance of the Evaluator is returned.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
eval_metric: The metric for which we need the Evaluator.
|
42
|
+
|
43
|
+
Raises:
|
44
|
+
NotFoundError: If there is no evaluator for the metric.
|
45
|
+
"""
|
46
|
+
if eval_metric.metric_name not in self._registry:
|
47
|
+
raise NotFoundError(f"{eval_metric.metric_name} not found in registry.")
|
48
|
+
|
49
|
+
return self._registry[eval_metric.metric_name](eval_metric=eval_metric)
|
50
|
+
|
51
|
+
def register_evaluator(
|
52
|
+
self, metric_name: MetricName, evaluator: type[Evaluator]
|
53
|
+
):
|
54
|
+
"""Registers an evaluator given the metric name.
|
55
|
+
|
56
|
+
If a mapping already exist, then it is updated.
|
57
|
+
"""
|
58
|
+
if metric_name in self._registry:
|
59
|
+
logger.info(
|
60
|
+
"Updating Evaluator class for %s from %s to %s",
|
61
|
+
metric_name,
|
62
|
+
self._registry[metric_name],
|
63
|
+
evaluator,
|
64
|
+
)
|
65
|
+
|
66
|
+
self._registry[str(metric_name)] = evaluator
|
67
|
+
|
68
|
+
|
69
|
+
def _get_default_metric_evaluator_registry() -> MetricEvaluatorRegistry:
|
70
|
+
"""Returns an instance of MetricEvaluatorRegistry with standard metrics already registered in it."""
|
71
|
+
metric_evaluator_registry = MetricEvaluatorRegistry()
|
72
|
+
|
73
|
+
metric_evaluator_registry.register_evaluator(
|
74
|
+
metric_name=PrebuiltMetrics.TOOL_TRAJECTORY_AVG_SCORE,
|
75
|
+
evaluator=type(TrajectoryEvaluator),
|
76
|
+
)
|
77
|
+
metric_evaluator_registry.register_evaluator(
|
78
|
+
metric_name=PrebuiltMetrics.RESPONSE_EVALUATION_SCORE,
|
79
|
+
evaluator=type(ResponseEvaluator),
|
80
|
+
)
|
81
|
+
metric_evaluator_registry.register_evaluator(
|
82
|
+
metric_name=PrebuiltMetrics.RESPONSE_MATCH_SCORE,
|
83
|
+
evaluator=type(ResponseEvaluator),
|
84
|
+
)
|
85
|
+
|
86
|
+
return metric_evaluator_registry
|
87
|
+
|
88
|
+
|
89
|
+
DEFAULT_METRIC_EVALUATOR_REGISTRY = _get_default_metric_evaluator_registry()
|