google-adk 0.5.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. google/adk/agents/base_agent.py +76 -30
  2. google/adk/agents/base_agent.py.orig +330 -0
  3. google/adk/agents/callback_context.py +0 -5
  4. google/adk/agents/llm_agent.py +122 -30
  5. google/adk/agents/loop_agent.py +1 -1
  6. google/adk/agents/parallel_agent.py +7 -0
  7. google/adk/agents/readonly_context.py +7 -1
  8. google/adk/agents/run_config.py +1 -1
  9. google/adk/agents/sequential_agent.py +31 -0
  10. google/adk/agents/transcription_entry.py +4 -2
  11. google/adk/artifacts/gcs_artifact_service.py +1 -1
  12. google/adk/artifacts/in_memory_artifact_service.py +1 -1
  13. google/adk/auth/auth_credential.py +6 -1
  14. google/adk/auth/auth_preprocessor.py +7 -1
  15. google/adk/auth/auth_tool.py +3 -4
  16. google/adk/cli/agent_graph.py +5 -5
  17. google/adk/cli/browser/index.html +2 -2
  18. google/adk/cli/browser/{main-ULN5R5I5.js → main-QOEMUXM4.js} +44 -45
  19. google/adk/cli/cli.py +7 -7
  20. google/adk/cli/cli_deploy.py +7 -2
  21. google/adk/cli/cli_eval.py +172 -99
  22. google/adk/cli/cli_tools_click.py +147 -64
  23. google/adk/cli/fast_api.py +330 -148
  24. google/adk/cli/fast_api.py.orig +174 -80
  25. google/adk/cli/utils/common.py +23 -0
  26. google/adk/cli/utils/evals.py +83 -1
  27. google/adk/cli/utils/logs.py +13 -5
  28. google/adk/code_executors/__init__.py +3 -1
  29. google/adk/code_executors/built_in_code_executor.py +52 -0
  30. google/adk/evaluation/__init__.py +1 -1
  31. google/adk/evaluation/agent_evaluator.py +168 -128
  32. google/adk/evaluation/eval_case.py +102 -0
  33. google/adk/evaluation/eval_set.py +37 -0
  34. google/adk/evaluation/eval_sets_manager.py +42 -0
  35. google/adk/evaluation/evaluation_generator.py +88 -113
  36. google/adk/evaluation/evaluator.py +56 -0
  37. google/adk/evaluation/local_eval_sets_manager.py +264 -0
  38. google/adk/evaluation/response_evaluator.py +106 -2
  39. google/adk/evaluation/trajectory_evaluator.py +83 -2
  40. google/adk/events/event.py +6 -1
  41. google/adk/events/event_actions.py +6 -1
  42. google/adk/examples/example_util.py +3 -2
  43. google/adk/flows/llm_flows/_code_execution.py +9 -1
  44. google/adk/flows/llm_flows/audio_transcriber.py +4 -3
  45. google/adk/flows/llm_flows/base_llm_flow.py +54 -15
  46. google/adk/flows/llm_flows/functions.py +9 -8
  47. google/adk/flows/llm_flows/instructions.py +13 -5
  48. google/adk/flows/llm_flows/single_flow.py +1 -1
  49. google/adk/memory/__init__.py +1 -1
  50. google/adk/memory/_utils.py +23 -0
  51. google/adk/memory/base_memory_service.py +23 -21
  52. google/adk/memory/base_memory_service.py.orig +76 -0
  53. google/adk/memory/in_memory_memory_service.py +57 -25
  54. google/adk/memory/memory_entry.py +37 -0
  55. google/adk/memory/vertex_ai_rag_memory_service.py +38 -15
  56. google/adk/models/anthropic_llm.py +16 -9
  57. google/adk/models/gemini_llm_connection.py +11 -11
  58. google/adk/models/google_llm.py +9 -2
  59. google/adk/models/google_llm.py.orig +305 -0
  60. google/adk/models/lite_llm.py +77 -21
  61. google/adk/models/llm_response.py +14 -2
  62. google/adk/models/registry.py +1 -1
  63. google/adk/runners.py +65 -41
  64. google/adk/sessions/__init__.py +1 -1
  65. google/adk/sessions/base_session_service.py +6 -33
  66. google/adk/sessions/database_session_service.py +58 -65
  67. google/adk/sessions/in_memory_session_service.py +106 -24
  68. google/adk/sessions/session.py +3 -0
  69. google/adk/sessions/vertex_ai_session_service.py +23 -45
  70. google/adk/telemetry.py +3 -0
  71. google/adk/tools/__init__.py +4 -7
  72. google/adk/tools/{built_in_code_execution_tool.py → _built_in_code_execution_tool.py} +11 -0
  73. google/adk/tools/_memory_entry_utils.py +30 -0
  74. google/adk/tools/agent_tool.py +9 -9
  75. google/adk/tools/apihub_tool/apihub_toolset.py +55 -74
  76. google/adk/tools/application_integration_tool/application_integration_toolset.py +107 -85
  77. google/adk/tools/application_integration_tool/clients/connections_client.py +20 -0
  78. google/adk/tools/application_integration_tool/clients/integration_client.py +6 -6
  79. google/adk/tools/application_integration_tool/integration_connector_tool.py +69 -26
  80. google/adk/tools/base_toolset.py +58 -0
  81. google/adk/tools/enterprise_search_tool.py +65 -0
  82. google/adk/tools/function_parameter_parse_util.py +2 -2
  83. google/adk/tools/google_api_tool/__init__.py +18 -70
  84. google/adk/tools/google_api_tool/google_api_tool.py +11 -5
  85. google/adk/tools/google_api_tool/google_api_toolset.py +126 -0
  86. google/adk/tools/google_api_tool/google_api_toolsets.py +102 -0
  87. google/adk/tools/google_api_tool/googleapi_to_openapi_converter.py +40 -42
  88. google/adk/tools/langchain_tool.py +96 -49
  89. google/adk/tools/load_memory_tool.py +14 -5
  90. google/adk/tools/mcp_tool/__init__.py +3 -2
  91. google/adk/tools/mcp_tool/mcp_session_manager.py +153 -16
  92. google/adk/tools/mcp_tool/mcp_session_manager.py.orig +322 -0
  93. google/adk/tools/mcp_tool/mcp_tool.py +12 -12
  94. google/adk/tools/mcp_tool/mcp_toolset.py +155 -195
  95. google/adk/tools/openapi_tool/openapi_spec_parser/openapi_toolset.py +32 -7
  96. google/adk/tools/openapi_tool/openapi_spec_parser/operation_parser.py +31 -31
  97. google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py +1 -1
  98. google/adk/tools/preload_memory_tool.py +27 -18
  99. google/adk/tools/retrieval/__init__.py +1 -1
  100. google/adk/tools/retrieval/vertex_ai_rag_retrieval.py +1 -1
  101. google/adk/tools/toolbox_toolset.py +79 -0
  102. google/adk/tools/transfer_to_agent_tool.py +0 -1
  103. google/adk/version.py +1 -1
  104. {google_adk-0.5.0.dist-info → google_adk-1.0.0.dist-info}/METADATA +7 -5
  105. google_adk-1.0.0.dist-info/RECORD +195 -0
  106. google/adk/agents/remote_agent.py +0 -50
  107. google/adk/tools/google_api_tool/google_api_tool_set.py +0 -110
  108. google/adk/tools/google_api_tool/google_api_tool_sets.py +0 -112
  109. google/adk/tools/toolbox_tool.py +0 -46
  110. google_adk-0.5.0.dist-info/RECORD +0 -180
  111. {google_adk-0.5.0.dist-info → google_adk-1.0.0.dist-info}/WHEEL +0 -0
  112. {google_adk-0.5.0.dist-info → google_adk-1.0.0.dist-info}/entry_points.txt +0 -0
  113. {google_adk-0.5.0.dist-info → google_adk-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,52 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from google.genai import types
16
+ from typing_extensions import override
17
+
18
+ from ..agents.invocation_context import InvocationContext
19
+ from ..models import LlmRequest
20
+ from .base_code_executor import BaseCodeExecutor
21
+ from .code_execution_utils import CodeExecutionInput
22
+ from .code_execution_utils import CodeExecutionResult
23
+
24
+
25
+ class BuiltInCodeExecutor(BaseCodeExecutor):
26
+ """A code executor that uses the Model's built-in code executor.
27
+
28
+ Currently only supports Gemini 2.0+ models, but will be expanded to
29
+ other models.
30
+ """
31
+
32
+ @override
33
+ def execute_code(
34
+ self,
35
+ invocation_context: InvocationContext,
36
+ code_execution_input: CodeExecutionInput,
37
+ ) -> CodeExecutionResult:
38
+ pass
39
+
40
+ def process_llm_request(self, llm_request: LlmRequest) -> None:
41
+ """Pre-process the LLM request for Gemini 2.0+ models to use the code execution tool."""
42
+ if llm_request.model and llm_request.model.startswith("gemini-2"):
43
+ llm_request.config = llm_request.config or types.GenerateContentConfig()
44
+ llm_request.config.tools = llm_request.config.tools or []
45
+ llm_request.config.tools.append(
46
+ types.Tool(code_execution=types.ToolCodeExecution())
47
+ )
48
+ return
49
+ raise ValueError(
50
+ "Gemini code execution tool is not supported for model"
51
+ f" {llm_request.model}"
52
+ )
@@ -14,7 +14,7 @@
14
14
 
15
15
  import logging
16
16
 
17
- logger = logging.getLogger(__name__)
17
+ logger = logging.getLogger('google_adk.' + __name__)
18
18
 
19
19
  __all__ = []
20
20
 
@@ -13,16 +13,30 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ import logging
16
17
  import os
17
18
  from os import path
19
+ from typing import Any
18
20
  from typing import Dict
19
21
  from typing import List
22
+ from typing import Optional
20
23
  from typing import Union
24
+ import uuid
21
25
 
26
+ from pydantic import ValidationError
27
+
28
+ from .eval_set import EvalSet
22
29
  from .evaluation_generator import EvaluationGenerator
30
+ from .evaluator import EvalStatus
31
+ from .evaluator import EvaluationResult
32
+ from .evaluator import Evaluator
33
+ from .local_eval_sets_manager import convert_eval_set_to_pydanctic_schema
23
34
  from .response_evaluator import ResponseEvaluator
24
35
  from .trajectory_evaluator import TrajectoryEvaluator
25
36
 
37
+ logger = logging.getLogger("google_adk." + __name__)
38
+
39
+
26
40
  # Constants for default runs and evaluation criteria
27
41
  NUM_RUNS = 2
28
42
  TOOL_TRAJECTORY_SCORE_KEY = "tool_trajectory_avg_score"
@@ -76,12 +90,67 @@ class AgentEvaluator:
76
90
  return DEFAULT_CRITERIA
77
91
 
78
92
  @staticmethod
79
- async def evaluate(
80
- agent_module,
81
- eval_dataset_file_path_or_dir,
93
+ async def evaluate_eval_set(
94
+ agent_module: str,
95
+ eval_set: EvalSet,
96
+ criteria: dict[str, float],
82
97
  num_runs=NUM_RUNS,
83
98
  agent_name=None,
84
- initial_session_file=None,
99
+ ):
100
+ """Evaluates an agent using the given EvalSet.
101
+
102
+ Args:
103
+ agent_module: The path to python module that contains the definition of
104
+ the agent. There is convention in place here, where the code is going to
105
+ look for 'root_agent' in the loaded module.
106
+ eval_set: The eval set.
107
+ criteria: Evauation criterias, a dictionary of metric names to their
108
+ respective thresholds.
109
+ num_runs: Number of times all entries in the eval dataset should be
110
+ assessed.
111
+ agent_name: The name of the agent.
112
+ """
113
+ eval_case_responses_list = await EvaluationGenerator.generate_responses(
114
+ eval_set=eval_set,
115
+ agent_module_path=agent_module,
116
+ repeat_num=num_runs,
117
+ agent_name=agent_name,
118
+ )
119
+
120
+ for eval_case_responses in eval_case_responses_list:
121
+ actual_invocations = [
122
+ invocation
123
+ for invocations in eval_case_responses.responses
124
+ for invocation in invocations
125
+ ]
126
+ expected_invocations = (
127
+ eval_case_responses.eval_case.conversation * num_runs
128
+ )
129
+
130
+ for metric_name, threshold in criteria.items():
131
+ metric_evaluator = AgentEvaluator._get_metric_evaluator(
132
+ metric_name=metric_name, threshold=threshold
133
+ )
134
+
135
+ evaluation_result: EvaluationResult = (
136
+ metric_evaluator.evaluate_invocations(
137
+ actual_invocations=actual_invocations,
138
+ expected_invocations=expected_invocations,
139
+ )
140
+ )
141
+
142
+ assert evaluation_result.overall_eval_status == EvalStatus.PASSED, (
143
+ f"{metric_name} for {agent_module} Failed. Expected {threshold},"
144
+ f" but got {evaluation_result.overall_score}."
145
+ )
146
+
147
+ @staticmethod
148
+ async def evaluate(
149
+ agent_module: str,
150
+ eval_dataset_file_path_or_dir: str,
151
+ num_runs: int = NUM_RUNS,
152
+ agent_name: Optional[str] = None,
153
+ initial_session_file: Optional[str] = None,
85
154
  ):
86
155
  """Evaluates an Agent given eval data.
87
156
 
@@ -109,35 +178,102 @@ class AgentEvaluator:
109
178
  else:
110
179
  test_files = [eval_dataset_file_path_or_dir]
111
180
 
112
- initial_session_state = {}
113
- if initial_session_file:
114
- with open(initial_session_file, "r") as f:
115
- initial_session_state = json.loads(f.read())["state"]
181
+ initial_session = AgentEvaluator._get_initial_session(initial_session_file)
116
182
 
117
183
  for test_file in test_files:
118
- dataset = AgentEvaluator._load_dataset(test_file)[0]
119
184
  criteria = AgentEvaluator.find_config_for_test_file(test_file)
185
+ eval_set = AgentEvaluator._load_eval_set_from_file(
186
+ test_file, criteria, initial_session
187
+ )
120
188
 
121
- AgentEvaluator._validate_input([dataset], criteria)
122
-
123
- evaluation_response = await AgentEvaluator._generate_responses(
124
- agent_module,
125
- [dataset],
126
- num_runs,
189
+ await AgentEvaluator.evaluate_eval_set(
190
+ agent_module=agent_module,
191
+ eval_set=eval_set,
192
+ criteria=criteria,
193
+ num_runs=num_runs,
127
194
  agent_name=agent_name,
128
- initial_session={"state": initial_session_state},
129
195
  )
130
196
 
131
- if AgentEvaluator._response_evaluation_required(criteria, [dataset]):
132
- AgentEvaluator._evaluate_response_scores(
133
- agent_module, evaluation_response, criteria
134
- )
197
+ @staticmethod
198
+ def migrate_eval_data_to_new_schema(
199
+ old_eval_data_file: str,
200
+ new_eval_data_file: str,
201
+ initial_session_file: Optional[str] = None,
202
+ ):
203
+ """A utility for migrating eval data to new schema backed by EvalSet."""
204
+ if not old_eval_data_file or not new_eval_data_file:
205
+ raise ValueError(
206
+ "One of old_eval_data_file or new_eval_data_file is empty."
207
+ )
208
+
209
+ criteria = AgentEvaluator.find_config_for_test_file(old_eval_data_file)
210
+ initial_session = AgentEvaluator._get_initial_session(initial_session_file)
211
+
212
+ eval_set = AgentEvaluator._get_eval_set_from_old_format(
213
+ old_eval_data_file, criteria, initial_session
214
+ )
135
215
 
136
- if AgentEvaluator._trajectory_evaluation_required(criteria, [dataset]):
137
- AgentEvaluator._evaluate_tool_trajectory(
138
- agent_module, evaluation_response, criteria
216
+ with open(new_eval_data_file, "w") as f:
217
+ f.write(eval_set.model_dump_json(indent=2))
218
+
219
+ @staticmethod
220
+ def _load_eval_set_from_file(
221
+ eval_set_file: str,
222
+ criteria: dict[str, float],
223
+ initial_session: dict[str, Any],
224
+ ) -> EvalSet:
225
+ """Loads an EvalSet from the given file."""
226
+ if os.path.isfile(eval_set_file):
227
+ with open(eval_set_file, "r", encoding="utf-8") as f:
228
+ content = f.read()
229
+
230
+ try:
231
+ eval_set = EvalSet.model_validate_json(content)
232
+ assert len(initial_session) == 0, (
233
+ "Intial session should be specified as a part of EvalSet file."
234
+ " Explicit initial session is only needed, when specifying data in"
235
+ " the older schema."
236
+ )
237
+ return eval_set
238
+ except ValidationError:
239
+ # We assume that the eval data was specified in the old format
240
+ logger.warning(
241
+ f"Contents of {eval_set_file} appear to be in older format.To avoid"
242
+ " this warning, please update your test files to contain data in"
243
+ " EvalSet schema. You can use `migrate_eval_data_to_new_schema`"
244
+ " for migrating your old test files."
139
245
  )
140
246
 
247
+ # If we are here, the data must be specified in the older format.
248
+ return AgentEvaluator._get_eval_set_from_old_format(
249
+ eval_set_file, criteria, initial_session
250
+ )
251
+
252
+ @staticmethod
253
+ def _get_eval_set_from_old_format(
254
+ eval_set_file: str,
255
+ criteria: dict[str, float],
256
+ initial_session: dict[str, Any],
257
+ ) -> EvalSet:
258
+ data = AgentEvaluator._load_dataset(eval_set_file)[0]
259
+ AgentEvaluator._validate_input([data], criteria)
260
+ eval_data = {
261
+ "name": eval_set_file,
262
+ "data": data,
263
+ "initial_session": initial_session,
264
+ }
265
+ return convert_eval_set_to_pydanctic_schema(
266
+ eval_set_id=str(uuid.uuid4()), eval_set_in_json_format=[eval_data]
267
+ )
268
+
269
+ @staticmethod
270
+ def _get_initial_session(initial_session_file: Optional[str] = None):
271
+ initial_session = {}
272
+ if initial_session_file:
273
+ with open(initial_session_file, "r") as f:
274
+ initial_session = json.loads(f.read())
275
+ return initial_session
276
+
141
277
  @staticmethod
142
278
  def _load_dataset(
143
279
  input_data: Union[str, List[str], List[Dict], List[List[Dict]]],
@@ -221,109 +357,13 @@ class AgentEvaluator:
221
357
  )
222
358
 
223
359
  @staticmethod
224
- def _get_infer_criteria(eval_dataset):
225
- """Infers evaluation criteria based on the provided dataset.
226
-
227
- Args:
228
- eval_dataset (list): A list of evaluation samples.
229
-
230
- Returns:
231
- dict: Inferred evaluation criteria based on dataset fields.
232
- """
233
- inferred_criteria = {}
234
- sample = eval_dataset[0][0]
235
-
236
- if QUERY_COLUMN in sample and EXPECTED_TOOL_USE_COLUMN in sample:
237
- inferred_criteria[TOOL_TRAJECTORY_SCORE_KEY] = DEFAULT_CRITERIA[
238
- TOOL_TRAJECTORY_SCORE_KEY
239
- ]
240
-
241
- if QUERY_COLUMN in sample and REFERENCE_COLUMN in sample:
242
- inferred_criteria[RESPONSE_MATCH_SCORE_KEY] = DEFAULT_CRITERIA[
243
- RESPONSE_MATCH_SCORE_KEY
244
- ]
245
-
246
- return inferred_criteria
247
-
248
- @staticmethod
249
- async def _generate_responses(
250
- agent_module, eval_dataset, num_runs, agent_name=None, initial_session={}
251
- ):
252
- """Generates evaluation responses by running the agent module multiple times."""
253
- return EvaluationGenerator.generate_responses(
254
- eval_dataset,
255
- agent_module,
256
- repeat_num=num_runs,
257
- agent_name=agent_name,
258
- initial_session=initial_session,
259
- )
260
-
261
- @staticmethod
262
- def _generate_responses_from_session(eval_dataset, session_path):
263
- """Generates evaluation responses by running the agent module multiple times."""
264
- return EvaluationGenerator.generate_responses_from_session(
265
- session_path, eval_dataset
266
- )
267
-
268
- @staticmethod
269
- def _response_evaluation_required(criteria, eval_dataset):
270
- """Checks if response evaluation are needed."""
271
- return REFERENCE_COLUMN in eval_dataset[0][0] and any(
272
- key in criteria
273
- for key in [RESPONSE_EVALUATION_SCORE_KEY, RESPONSE_MATCH_SCORE_KEY]
274
- )
275
-
276
- @staticmethod
277
- def _trajectory_evaluation_required(evaluation_criteria, eval_dataset):
278
- """Checks if response evaluation are needed."""
279
- return (
280
- EXPECTED_TOOL_USE_COLUMN in eval_dataset[0][0]
281
- and TOOL_TRAJECTORY_SCORE_KEY in evaluation_criteria
282
- )
283
-
284
- @staticmethod
285
- def _evaluate_response_scores(agent_module, evaluation_response, criteria):
286
- """Evaluates response scores and raises an assertion error if they don't meet the criteria."""
287
- metrics = ResponseEvaluator.evaluate(
288
- evaluation_response, criteria, print_detailed_results=True
289
- )
290
-
291
- AgentEvaluator._assert_score(
292
- metrics,
293
- "coherence/mean",
294
- criteria.get(RESPONSE_EVALUATION_SCORE_KEY),
295
- "Average response evaluation score",
296
- agent_module,
297
- )
298
-
299
- AgentEvaluator._assert_score(
300
- metrics,
301
- "rouge_1/mean",
302
- criteria.get(RESPONSE_MATCH_SCORE_KEY),
303
- "Average response match score",
304
- agent_module,
305
- )
306
-
307
- @staticmethod
308
- def _evaluate_tool_trajectory(agent_module, evaluation_response, criteria):
309
- """Evaluates tool trajectory scores and raises an assertion error if they don't meet the criteria."""
310
- score = TrajectoryEvaluator.evaluate(
311
- evaluation_response, print_detailed_results=True
312
- )
313
- AgentEvaluator._assert_score(
314
- {TOOL_TRAJECTORY_SCORE_KEY: score},
315
- TOOL_TRAJECTORY_SCORE_KEY,
316
- criteria[TOOL_TRAJECTORY_SCORE_KEY],
317
- "Average tool trajectory evaluation score",
318
- agent_module,
319
- )
360
+ def _get_metric_evaluator(metric_name: str, threshold: float) -> Evaluator:
361
+ if metric_name == TOOL_TRAJECTORY_SCORE_KEY:
362
+ return TrajectoryEvaluator(threshold=threshold)
363
+ elif (
364
+ metric_name == RESPONSE_MATCH_SCORE_KEY
365
+ or metric_name == RESPONSE_EVALUATION_SCORE_KEY
366
+ ):
367
+ return ResponseEvaluator(threshold=threshold, metric_name=metric_name)
320
368
 
321
- @staticmethod
322
- def _assert_score(metrics, metric_key, threshold, description, agent_module):
323
- """Asserts that a metric meets the specified threshold."""
324
- if metric_key in metrics:
325
- actual_score = metrics[metric_key]
326
- assert actual_score >= threshold, (
327
- f"{description} for {agent_module} is lower than expected. "
328
- f"Expected >= {threshold}, but got {actual_score}."
329
- )
369
+ raise ValueError(f"Unsupported eval metric: {metric_name}")
@@ -0,0 +1,102 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Optional, Tuple
17
+
18
+ from google.genai import types as genai_types
19
+ from pydantic import alias_generators
20
+ from pydantic import BaseModel
21
+ from pydantic import ConfigDict
22
+ from pydantic import Field
23
+
24
+
25
+ class EvalBaseModel(BaseModel):
26
+ model_config = ConfigDict(
27
+ alias_generator=alias_generators.to_camel,
28
+ populate_by_name=True,
29
+ )
30
+
31
+
32
+ class IntermediateData(EvalBaseModel):
33
+ """Container for intermediate data that an agent would generate as it responds with a final answer."""
34
+
35
+ tool_uses: list[genai_types.FunctionCall] = []
36
+ """Tool use trajectory in chronological order."""
37
+
38
+ intermediate_responses: list[Tuple[str, list[genai_types.Part]]] = []
39
+ """Intermediate responses generated by sub-agents to convey progress or status
40
+ in a multi-agent system, distinct from the final response.
41
+
42
+ This is expressed as a Tuple of:
43
+ - Author: Usually the sub-agent name that generated the intermediate
44
+ response.
45
+
46
+ - A list of Parts that comprise of the response.
47
+ """
48
+
49
+
50
+ class Invocation(EvalBaseModel):
51
+ """Represents a single invocation."""
52
+
53
+ invocation_id: str = ''
54
+ """Unique identifier for the invocation."""
55
+
56
+ user_content: genai_types.Content
57
+ """Content provided by the user in this invocation."""
58
+
59
+ final_response: Optional[genai_types.Content] = None
60
+ """Final response from the agent."""
61
+
62
+ intermediate_data: Optional[IntermediateData] = None
63
+ """Intermediate steps generated as a part of Agent execution.
64
+
65
+ For a multi-agent system, it is also helpful to inspect the route that
66
+ the agent took to generate final response.
67
+ """
68
+
69
+ creation_timestamp: float = 0.0
70
+ """Timestamp for the current invocation, primarily intended for debugging purposes."""
71
+
72
+
73
+ class SessionInput(EvalBaseModel):
74
+ """Values that help initialize a Session."""
75
+
76
+ app_name: str
77
+ """The name of the app."""
78
+
79
+ user_id: str
80
+ """The user id."""
81
+
82
+ state: dict[str, Any] = Field(default_factory=dict)
83
+ """The state of the session."""
84
+
85
+
86
+ class EvalCase(EvalBaseModel):
87
+ """An eval case."""
88
+
89
+ eval_id: str
90
+ """Unique identifier for the evaluation case."""
91
+
92
+ conversation: list[Invocation]
93
+ """A conversation between the user and the Agent. The conversation can have any number of invocations."""
94
+
95
+ session_input: Optional[SessionInput] = None
96
+ """Session input that will be passed on to the Agent during eval.
97
+ It is common for Agents state to be initialized to some initial/default value,
98
+ for example, your agent may need to know today's date.
99
+ """
100
+
101
+ creation_timestamp: float = 0.0
102
+ """The time at which this eval case was created."""
@@ -0,0 +1,37 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional
16
+ from pydantic import BaseModel
17
+ from .eval_case import EvalCase
18
+
19
+
20
+ class EvalSet(BaseModel):
21
+ """A set of eval cases."""
22
+
23
+ eval_set_id: str
24
+ """Unique identifier for the eval set."""
25
+
26
+ name: Optional[str] = None
27
+ """Name of the dataset."""
28
+
29
+ description: Optional[str] = None
30
+ """Description of the dataset."""
31
+
32
+ eval_cases: list[EvalCase]
33
+ """List of eval cases in the dataset. Each case represents a single
34
+ interaction to be evaluated."""
35
+
36
+ creation_timestamp: float = 0.0
37
+ """The time at which this eval set was created."""
@@ -0,0 +1,42 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+
17
+ from .eval_case import EvalCase
18
+ from .eval_set import EvalSet
19
+
20
+
21
+ class EvalSetsManager(ABC):
22
+ """An interface to manage an Eval Sets."""
23
+
24
+ @abstractmethod
25
+ def get_eval_set(self, app_name: str, eval_set_id: str) -> EvalSet:
26
+ """Returns an EvalSet identified by an app_name and eval_set_id."""
27
+ raise NotImplementedError()
28
+
29
+ @abstractmethod
30
+ def create_eval_set(self, app_name: str, eval_set_id: str):
31
+ """Creates an empty EvalSet given the app_name and eval_set_id."""
32
+ raise NotImplementedError()
33
+
34
+ @abstractmethod
35
+ def list_eval_sets(self, app_name: str) -> list[str]:
36
+ """Returns a list of EvalSets that belong to the given app_name."""
37
+ raise NotImplementedError()
38
+
39
+ @abstractmethod
40
+ def add_eval_case(self, app_name: str, eval_set_id: str, eval_case: EvalCase):
41
+ """Adds the given EvalCase to an existing EvalSet identified by app_name and eval_set_id."""
42
+ raise NotImplementedError()