google-adk 1.7.0__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. google/adk/a2a/converters/request_converter.py +1 -2
  2. google/adk/a2a/logs/log_utils.py +1 -2
  3. google/adk/a2a/utils/__init__.py +0 -0
  4. google/adk/a2a/utils/agent_card_builder.py +544 -0
  5. google/adk/a2a/utils/agent_to_a2a.py +118 -0
  6. google/adk/agents/base_agent.py +6 -1
  7. google/adk/agents/config_schemas/AgentConfig.json +22 -0
  8. google/adk/agents/live_request_queue.py +15 -0
  9. google/adk/agents/llm_agent.py +11 -0
  10. google/adk/agents/loop_agent.py +6 -1
  11. google/adk/agents/remote_a2a_agent.py +2 -2
  12. google/adk/artifacts/gcs_artifact_service.py +86 -18
  13. google/adk/cli/browser/index.html +2 -2
  14. google/adk/cli/browser/{main-SRBSE46V.js → main-W7QZBYAR.js} +139 -139
  15. google/adk/cli/cli_eval.py +87 -12
  16. google/adk/cli/cli_tools_click.py +143 -82
  17. google/adk/cli/fast_api.py +136 -95
  18. google/adk/evaluation/eval_metrics.py +4 -0
  19. google/adk/evaluation/eval_sets_manager.py +5 -1
  20. google/adk/evaluation/final_response_match_v2.py +2 -2
  21. google/adk/evaluation/gcs_eval_sets_manager.py +2 -1
  22. google/adk/evaluation/local_eval_service.py +2 -2
  23. google/adk/evaluation/local_eval_set_results_manager.py +2 -2
  24. google/adk/evaluation/local_eval_sets_manager.py +1 -1
  25. google/adk/evaluation/metric_evaluator_registry.py +16 -6
  26. google/adk/evaluation/vertex_ai_eval_facade.py +7 -1
  27. google/adk/events/event.py +7 -2
  28. google/adk/flows/llm_flows/base_llm_flow.py +25 -6
  29. google/adk/flows/llm_flows/functions.py +13 -19
  30. google/adk/memory/in_memory_memory_service.py +1 -1
  31. google/adk/memory/vertex_ai_memory_bank_service.py +12 -10
  32. google/adk/models/anthropic_llm.py +2 -1
  33. google/adk/models/base_llm_connection.py +2 -0
  34. google/adk/models/gemini_llm_connection.py +17 -6
  35. google/adk/models/google_llm.py +35 -5
  36. google/adk/models/lite_llm.py +31 -18
  37. google/adk/sessions/database_session_service.py +25 -24
  38. google/adk/sessions/vertex_ai_session_service.py +13 -5
  39. google/adk/tools/__init__.py +2 -0
  40. google/adk/tools/_automatic_function_calling_util.py +20 -2
  41. google/adk/tools/agent_tool.py +14 -3
  42. google/adk/tools/base_toolset.py +22 -0
  43. google/adk/tools/bigquery/metadata_tool.py +2 -0
  44. google/adk/tools/bigquery/query_tool.py +15 -1
  45. google/adk/tools/computer_use/__init__.py +13 -0
  46. google/adk/tools/computer_use/base_computer.py +265 -0
  47. google/adk/tools/computer_use/computer_use_tool.py +166 -0
  48. google/adk/tools/computer_use/computer_use_toolset.py +220 -0
  49. google/adk/tools/exit_loop_tool.py +1 -0
  50. google/adk/tools/langchain_tool.py +14 -3
  51. google/adk/tools/openapi_tool/openapi_spec_parser/openapi_spec_parser.py +5 -0
  52. google/adk/version.py +1 -1
  53. {google_adk-1.7.0.dist-info → google_adk-1.8.0.dist-info}/METADATA +2 -1
  54. {google_adk-1.7.0.dist-info → google_adk-1.8.0.dist-info}/RECORD +57 -50
  55. {google_adk-1.7.0.dist-info → google_adk-1.8.0.dist-info}/WHEEL +0 -0
  56. {google_adk-1.7.0.dist-info → google_adk-1.8.0.dist-info}/entry_points.txt +0 -0
  57. {google_adk-1.7.0.dist-info → google_adk-1.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -267,36 +267,27 @@ async def handle_function_calls_live(
267
267
  # in python debugger.
268
268
  function_args = function_call.args or {}
269
269
  function_response = None
270
- # # Calls the tool if before_tool_callback does not exist or returns None.
271
- # if agent.before_tool_callback:
272
- # function_response = agent.before_tool_callback(
273
- # tool, function_args, tool_context
274
- # )
275
- if agent.before_tool_callback:
276
- function_response = agent.before_tool_callback(
270
+
271
+ # Handle before_tool_callbacks - iterate through the canonical callback
272
+ # list
273
+ for callback in agent.canonical_before_tool_callbacks:
274
+ function_response = callback(
277
275
  tool=tool, args=function_args, tool_context=tool_context
278
276
  )
279
277
  if inspect.isawaitable(function_response):
280
278
  function_response = await function_response
279
+ if function_response:
280
+ break
281
281
 
282
- if not function_response:
282
+ if function_response is None:
283
283
  function_response = await _process_function_live_helper(
284
284
  tool, tool_context, function_call, function_args, invocation_context
285
285
  )
286
286
 
287
287
  # Calls after_tool_callback if it exists.
288
- # if agent.after_tool_callback:
289
- # new_response = agent.after_tool_callback(
290
- # tool,
291
- # function_args,
292
- # tool_context,
293
- # function_response,
294
- # )
295
- # if new_response:
296
- # function_response = new_response
297
288
  altered_function_response = None
298
- if agent.after_tool_callback:
299
- altered_function_response = agent.after_tool_callback(
289
+ for callback in agent.canonical_after_tool_callbacks:
290
+ altered_function_response = callback(
300
291
  tool=tool,
301
292
  args=function_args,
302
293
  tool_context=tool_context,
@@ -304,6 +295,9 @@ async def handle_function_calls_live(
304
295
  )
305
296
  if inspect.isawaitable(altered_function_response):
306
297
  altered_function_response = await altered_function_response
298
+ if altered_function_response:
299
+ break
300
+
307
301
  if altered_function_response is not None:
308
302
  function_response = altered_function_response
309
303
 
@@ -76,7 +76,7 @@ class InMemoryMemoryService(BaseMemoryService):
76
76
  with self._lock:
77
77
  session_event_lists = self._session_events.get(user_key, {})
78
78
 
79
- words_in_query = set(query.lower().split())
79
+ words_in_query = _extract_words_lower(query)
80
80
  response = SearchMemoryResponse()
81
81
 
82
82
  for session_events in session_event_lists.values():
@@ -16,13 +16,15 @@ from __future__ import annotations
16
16
 
17
17
  import json
18
18
  import logging
19
+ from typing import Any
20
+ from typing import Dict
19
21
  from typing import Optional
20
22
  from typing import TYPE_CHECKING
21
23
 
24
+ from google.genai import Client
25
+ from google.genai import types
22
26
  from typing_extensions import override
23
27
 
24
- from google import genai
25
-
26
28
  from .base_memory_service import BaseMemoryService
27
29
  from .base_memory_service import SearchMemoryResponse
28
30
  from .memory_entry import MemoryEntry
@@ -84,7 +86,8 @@ class VertexAiMemoryBankService(BaseMemoryService):
84
86
  path=f'reasoningEngines/{self._agent_engine_id}/memories:generate',
85
87
  request_dict=request_dict,
86
88
  )
87
- logger.info(f'Generate memory response: {api_response}')
89
+ logger.info('Generate memory response received.')
90
+ logger.debug('Generate memory response: %s', api_response)
88
91
  else:
89
92
  logger.info('No events to add to memory.')
90
93
 
@@ -106,7 +109,8 @@ class VertexAiMemoryBankService(BaseMemoryService):
106
109
  },
107
110
  )
108
111
  api_response = _convert_api_response(api_response)
109
- logger.info(f'Search memory response: {api_response}')
112
+ logger.info('Search memory response received.')
113
+ logger.debug('Search memory response: %s', api_response)
110
114
 
111
115
  if not api_response or not api_response.get('retrievedMemories', None):
112
116
  return SearchMemoryResponse()
@@ -117,10 +121,8 @@ class VertexAiMemoryBankService(BaseMemoryService):
117
121
  memory_events.append(
118
122
  MemoryEntry(
119
123
  author='user',
120
- content=genai.types.Content(
121
- parts=[
122
- genai.types.Part(text=memory.get('memory').get('fact'))
123
- ],
124
+ content=types.Content(
125
+ parts=[types.Part(text=memory.get('memory').get('fact'))],
124
126
  role='user',
125
127
  ),
126
128
  timestamp=memory.get('updateTime'),
@@ -137,13 +139,13 @@ class VertexAiMemoryBankService(BaseMemoryService):
137
139
  Returns:
138
140
  An API client for the given project and location.
139
141
  """
140
- client = genai.Client(
142
+ client = Client(
141
143
  vertexai=True, project=self._project, location=self._location
142
144
  )
143
145
  return client._api_client
144
146
 
145
147
 
146
- def _convert_api_response(api_response):
148
+ def _convert_api_response(api_response) -> Dict[str, Any]:
147
149
  """Converts the API response to a JSON object based on the type."""
148
150
  if hasattr(api_response, 'body'):
149
151
  return json.loads(api_response.body)
@@ -174,7 +174,8 @@ def content_block_to_part(
174
174
  def message_to_generate_content_response(
175
175
  message: anthropic_types.Message,
176
176
  ) -> LlmResponse:
177
- logger.info(
177
+ logger.info("Received response from Claude.")
178
+ logger.debug(
178
179
  "Claude response: %s",
179
180
  message.model_dump_json(indent=2, exclude_none=True),
180
181
  )
@@ -12,6 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from __future__ import annotations
16
+
15
17
  from abc import abstractmethod
16
18
  from typing import AsyncGenerator
17
19
 
@@ -16,6 +16,7 @@ from __future__ import annotations
16
16
 
17
17
  import logging
18
18
  from typing import AsyncGenerator
19
+ from typing import Union
19
20
 
20
21
  from google.genai import live
21
22
  from google.genai import types
@@ -25,6 +26,8 @@ from .llm_response import LlmResponse
25
26
 
26
27
  logger = logging.getLogger('google_adk.' + __name__)
27
28
 
29
+ RealtimeInput = Union[types.Blob, types.ActivityStart, types.ActivityEnd]
30
+
28
31
 
29
32
  class GeminiLlmConnection(BaseLlmConnection):
30
33
  """The Gemini model connection."""
@@ -93,16 +96,24 @@ class GeminiLlmConnection(BaseLlmConnection):
93
96
  )
94
97
  )
95
98
 
96
- async def send_realtime(self, blob: types.Blob):
99
+ async def send_realtime(self, input: RealtimeInput):
97
100
  """Sends a chunk of audio or a frame of video to the model in realtime.
98
101
 
99
102
  Args:
100
- blob: The blob to send to the model.
103
+ input: The input to send to the model.
101
104
  """
102
-
103
- input_blob = blob.model_dump()
104
- logger.debug('Sending LLM Blob: %s', input_blob)
105
- await self._gemini_session.send(input=input_blob)
105
+ if isinstance(input, types.Blob):
106
+ input_blob = input.model_dump()
107
+ logger.debug('Sending LLM Blob: %s', input_blob)
108
+ await self._gemini_session.send(input=input_blob)
109
+ elif isinstance(input, types.ActivityStart):
110
+ logger.debug('Sending LLM activity start signal')
111
+ await self._gemini_session.send_realtime_input(activity_start=input)
112
+ elif isinstance(input, types.ActivityEnd):
113
+ logger.debug('Sending LLM activity end signal')
114
+ await self._gemini_session.send_realtime_input(activity_end=input)
115
+ else:
116
+ raise ValueError('Unsupported input type: %s' % type(input))
106
117
 
107
118
  def __build_full_text_response(self, text: str):
108
119
  """Builds a full text response.
@@ -68,6 +68,8 @@ class Gemini(BaseLlm):
68
68
 
69
69
  return [
70
70
  r'gemini-.*',
71
+ # model optimizer pattern
72
+ r'model-optimizer-.*',
71
73
  # fine-tuned vertex endpoint pattern
72
74
  r'projects\/.+\/locations\/.+\/endpoints\/.+',
73
75
  # vertex gemini long name
@@ -86,7 +88,7 @@ class Gemini(BaseLlm):
86
88
  Yields:
87
89
  LlmResponse: The model response.
88
90
  """
89
- self._preprocess_request(llm_request)
91
+ await self._preprocess_request(llm_request)
90
92
  self._maybe_append_user_content(llm_request)
91
93
  logger.info(
92
94
  'Sending out request, model: %s, backend: %s, stream: %s',
@@ -94,7 +96,7 @@ class Gemini(BaseLlm):
94
96
  self._api_backend,
95
97
  stream,
96
98
  )
97
- logger.info(_build_request_log(llm_request))
99
+ logger.debug(_build_request_log(llm_request))
98
100
 
99
101
  # add tracking headers to custom headers given it will override the headers
100
102
  # set in the api client constructor
@@ -119,7 +121,7 @@ class Gemini(BaseLlm):
119
121
  # previous partial content. The only difference is bidi rely on
120
122
  # complete_turn flag to detect end while sse depends on finish_reason.
121
123
  async for response in responses:
122
- logger.info(_build_response_log(response))
124
+ logger.debug(_build_response_log(response))
123
125
  llm_response = LlmResponse.create(response)
124
126
  usage_metadata = llm_response.usage_metadata
125
127
  if (
@@ -177,7 +179,8 @@ class Gemini(BaseLlm):
177
179
  contents=llm_request.contents,
178
180
  config=llm_request.config,
179
181
  )
180
- logger.info(_build_response_log(response))
182
+ logger.info('Response received from the model.')
183
+ logger.debug(_build_response_log(response))
181
184
  yield LlmResponse.create(response)
182
185
 
183
186
  @cached_property
@@ -267,7 +270,22 @@ class Gemini(BaseLlm):
267
270
  ) as live_session:
268
271
  yield GeminiLlmConnection(live_session)
269
272
 
270
- def _preprocess_request(self, llm_request: LlmRequest) -> None:
273
+ async def _adapt_computer_use_tool(self, llm_request: LlmRequest) -> None:
274
+ """Adapt the google computer use predefined functions to the adk computer use toolset."""
275
+
276
+ from ..tools.computer_use.computer_use_toolset import ComputerUseToolset
277
+
278
+ async def convert_wait_to_wait_5_seconds(wait_func):
279
+ async def wait_5_seconds():
280
+ return await wait_func(5)
281
+
282
+ return wait_5_seconds
283
+
284
+ await ComputerUseToolset.adapt_computer_use_tool(
285
+ 'wait', convert_wait_to_wait_5_seconds, llm_request
286
+ )
287
+
288
+ async def _preprocess_request(self, llm_request: LlmRequest) -> None:
271
289
 
272
290
  if self._api_backend == GoogleLLMVariant.GEMINI_API:
273
291
  # Using API key from Google AI Studio to call model doesn't support labels.
@@ -282,6 +300,18 @@ class Gemini(BaseLlm):
282
300
  _remove_display_name_if_present(part.inline_data)
283
301
  _remove_display_name_if_present(part.file_data)
284
302
 
303
+ # Initialize config if needed
304
+ if llm_request.config and llm_request.config.tools:
305
+ # Check if computer use is configured
306
+ for tool in llm_request.config.tools:
307
+ if (
308
+ isinstance(tool, (types.Tool, types.ToolDict))
309
+ and hasattr(tool, 'computer_use')
310
+ and tool.computer_use
311
+ ):
312
+ llm_request.config.system_instruction = None
313
+ await self._adapt_computer_use_tool(llm_request)
314
+
285
315
 
286
316
  def _build_function_declaration_log(
287
317
  func_decl: types.FunctionDeclaration,
@@ -311,7 +311,9 @@ TYPE_LABELS = {
311
311
 
312
312
 
313
313
  def _schema_to_dict(schema: types.Schema) -> dict:
314
- """Recursively converts a types.Schema to a dictionary.
314
+ """
315
+ Recursively converts a types.Schema to a pure-python dict
316
+ with all enum values written as lower-case strings.
315
317
 
316
318
  Args:
317
319
  schema: The schema to convert.
@@ -319,29 +321,40 @@ def _schema_to_dict(schema: types.Schema) -> dict:
319
321
  Returns:
320
322
  The dictionary representation of the schema.
321
323
  """
322
-
324
+ # Dump without json encoding so we still get Enum members
323
325
  schema_dict = schema.model_dump(exclude_none=True)
326
+
327
+ # ---- normalise this level ------------------------------------------------
324
328
  if "type" in schema_dict:
325
- schema_dict["type"] = schema_dict["type"].lower()
329
+ # schema_dict["type"] can be an Enum or a str
330
+ t = schema_dict["type"]
331
+ schema_dict["type"] = (t.value if isinstance(t, types.Type) else t).lower()
332
+
333
+ # ---- recurse into `items` -----------------------------------------------
326
334
  if "items" in schema_dict:
327
- if isinstance(schema_dict["items"], dict):
328
- schema_dict["items"] = _schema_to_dict(
329
- types.Schema.model_validate(schema_dict["items"])
330
- )
331
- elif isinstance(schema_dict["items"]["type"], types.Type):
332
- schema_dict["items"]["type"] = TYPE_LABELS[
333
- schema_dict["items"]["type"].value
334
- ]
335
+ schema_dict["items"] = _schema_to_dict(
336
+ schema.items
337
+ if isinstance(schema.items, types.Schema)
338
+ else types.Schema.model_validate(schema_dict["items"])
339
+ )
340
+
341
+ # ---- recurse into `properties` ------------------------------------------
335
342
  if "properties" in schema_dict:
336
- properties = {}
343
+ new_props = {}
337
344
  for key, value in schema_dict["properties"].items():
338
- if isinstance(value, types.Schema):
339
- properties[key] = _schema_to_dict(value)
345
+ # value is a dict → rebuild a Schema object and recurse
346
+ if isinstance(value, dict):
347
+ new_props[key] = _schema_to_dict(types.Schema.model_validate(value))
348
+ # value is already a Schema instance
349
+ elif isinstance(value, types.Schema):
350
+ new_props[key] = _schema_to_dict(value)
351
+ # plain dict without nested schemas
340
352
  else:
341
- properties[key] = value
342
- if "type" in properties[key]:
343
- properties[key]["type"] = properties[key]["type"].lower()
344
- schema_dict["properties"] = properties
353
+ new_props[key] = value
354
+ if "type" in new_props[key]:
355
+ new_props[key]["type"] = new_props[key]["type"].lower()
356
+ schema_dict["properties"] = new_props
357
+
345
358
  return schema_dict
346
359
 
347
360
 
@@ -160,6 +160,26 @@ class StorageSession(Base):
160
160
  return self.update_time.replace(tzinfo=timezone.utc).timestamp()
161
161
  return self.update_time.timestamp()
162
162
 
163
+ def to_session(
164
+ self,
165
+ state: dict[str, Any] | None = None,
166
+ events: list[Event] | None = None,
167
+ ) -> Session:
168
+ """Converts the storage session to a session object."""
169
+ if state is None:
170
+ state = {}
171
+ if events is None:
172
+ events = []
173
+
174
+ return Session(
175
+ app_name=self.app_name,
176
+ user_id=self.user_id,
177
+ id=self.id,
178
+ state=state,
179
+ events=events,
180
+ last_update_time=self.update_timestamp_tz,
181
+ )
182
+
163
183
 
164
184
  class StorageEvent(Base):
165
185
  """Represents an event stored in the database."""
@@ -423,14 +443,8 @@ class DatabaseSessionService(BaseSessionService):
423
443
 
424
444
  # Merge states for response
425
445
  merged_state = _merge_state(app_state, user_state, session_state)
426
- session = Session(
427
- app_name=str(storage_session.app_name),
428
- user_id=str(storage_session.user_id),
429
- id=str(storage_session.id),
430
- state=merged_state,
431
- last_update_time=storage_session.update_timestamp_tz,
432
- )
433
- return session
446
+ session = storage_session.to_session(state=merged_state)
447
+ return session
434
448
 
435
449
  @override
436
450
  async def get_session(
@@ -486,14 +500,8 @@ class DatabaseSessionService(BaseSessionService):
486
500
  merged_state = _merge_state(app_state, user_state, session_state)
487
501
 
488
502
  # Convert storage session to session
489
- session = Session(
490
- app_name=app_name,
491
- user_id=user_id,
492
- id=session_id,
493
- state=merged_state,
494
- last_update_time=storage_session.update_timestamp_tz,
495
- )
496
- session.events = [e.to_event() for e in reversed(storage_events)]
503
+ events = [e.to_event() for e in reversed(storage_events)]
504
+ session = storage_session.to_session(state=merged_state, events=events)
497
505
  return session
498
506
 
499
507
  @override
@@ -509,14 +517,7 @@ class DatabaseSessionService(BaseSessionService):
509
517
  )
510
518
  sessions = []
511
519
  for storage_session in results:
512
- session = Session(
513
- app_name=app_name,
514
- user_id=user_id,
515
- id=storage_session.id,
516
- state={},
517
- last_update_time=storage_session.update_timestamp_tz,
518
- )
519
- sessions.append(session)
520
+ sessions.append(storage_session.to_session())
520
521
  return ListSessionsResponse(sessions=sessions)
521
522
 
522
523
  @override
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
  from __future__ import annotations
15
15
 
16
- import asyncio
17
16
  import json
18
17
  import logging
19
18
  import os
@@ -110,7 +109,8 @@ class VertexAiSessionService(BaseSessionService):
110
109
  request_dict=session_json_dict,
111
110
  )
112
111
  api_response = _convert_api_response(api_response)
113
- logger.info(f'Create Session response {api_response}')
112
+ logger.info('Create session response received.')
113
+ logger.debug('Create session response: %s', api_response)
114
114
 
115
115
  session_id = api_response['name'].split('/')[-3]
116
116
  operation_id = api_response['name'].split('/')[-1]
@@ -351,16 +351,24 @@ class VertexAiSessionService(BaseSessionService):
351
351
 
352
352
  return match.groups()[-1]
353
353
 
354
+ def _api_client_http_options_override(
355
+ self,
356
+ ) -> Optional[genai.types.HttpOptions]:
357
+ return None
358
+
354
359
  def _get_api_client(self):
355
360
  """Instantiates an API client for the given project and location.
356
361
 
357
362
  It needs to be instantiated inside each request so that the event loop
358
363
  management can be properly propagated.
359
364
  """
360
- client = genai.Client(
365
+ api_client = genai.Client(
361
366
  vertexai=True, project=self._project, location=self._location
362
- )
363
- return client._api_client
367
+ )._api_client
368
+
369
+ if new_options := self._api_client_http_options_override():
370
+ api_client._http_options = new_options
371
+ return api_client
364
372
 
365
373
 
366
374
  def _is_vertex_express_mode(
@@ -14,6 +14,7 @@
14
14
 
15
15
 
16
16
  from ..auth.auth_tool import AuthToolArguments
17
+ from .agent_tool import AgentTool
17
18
  from .apihub_tool.apihub_toolset import APIHubToolset
18
19
  from .base_tool import BaseTool
19
20
  from .example_tool import ExampleTool
@@ -31,6 +32,7 @@ from .url_context_tool import url_context
31
32
  from .vertex_ai_search_tool import VertexAiSearchTool
32
33
 
33
34
  __all__ = [
35
+ 'AgentTool',
34
36
  'APIHubToolset',
35
37
  'AuthToolArguments',
36
38
  'BaseTool',
@@ -20,7 +20,6 @@ import typing
20
20
  from typing import Any
21
21
  from typing import Callable
22
22
  from typing import Dict
23
- from typing import Literal
24
23
  from typing import Optional
25
24
  from typing import Union
26
25
 
@@ -329,7 +328,26 @@ def from_function_with_options(
329
328
  return declaration
330
329
 
331
330
  return_annotation = inspect.signature(func).return_annotation
332
- if return_annotation is inspect._empty:
331
+
332
+ # Handle functions with no return annotation or that return None
333
+ if (
334
+ return_annotation is inspect._empty
335
+ or return_annotation is None
336
+ or return_annotation is type(None)
337
+ ):
338
+ # Create a response schema for None/null return
339
+ return_value = inspect.Parameter(
340
+ 'return_value',
341
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
342
+ annotation=None,
343
+ )
344
+ declaration.response = (
345
+ _function_parameter_parse_util._parse_schema_from_parameter(
346
+ variant,
347
+ return_value,
348
+ func.__name__,
349
+ )
350
+ )
333
351
  return declaration
334
352
 
335
353
  return_value = inspect.Parameter(
@@ -23,15 +23,12 @@ from typing_extensions import override
23
23
 
24
24
  from . import _automatic_function_calling_util
25
25
  from ..memory.in_memory_memory_service import InMemoryMemoryService
26
- from ..runners import Runner
27
- from ..sessions.in_memory_session_service import InMemorySessionService
28
26
  from ._forwarding_artifact_service import ForwardingArtifactService
29
27
  from .base_tool import BaseTool
30
28
  from .tool_context import ToolContext
31
29
 
32
30
  if TYPE_CHECKING:
33
31
  from ..agents.base_agent import BaseAgent
34
- from ..agents.llm_agent import LlmAgent
35
32
 
36
33
 
37
34
  class AgentTool(BaseTool):
@@ -61,6 +58,7 @@ class AgentTool(BaseTool):
61
58
  @override
62
59
  def _get_declaration(self) -> types.FunctionDeclaration:
63
60
  from ..agents.llm_agent import LlmAgent
61
+ from ..utils.variant_utils import GoogleLLMVariant
64
62
 
65
63
  if isinstance(self.agent, LlmAgent) and self.agent.input_schema:
66
64
  result = _automatic_function_calling_util.build_function_declaration(
@@ -80,6 +78,17 @@ class AgentTool(BaseTool):
80
78
  description=self.agent.description,
81
79
  name=self.name,
82
80
  )
81
+
82
+ # Set response schema for non-GEMINI_API variants
83
+ if self._api_variant != GoogleLLMVariant.GEMINI_API:
84
+ # Determine response type based on agent's output schema
85
+ if isinstance(self.agent, LlmAgent) and self.agent.output_schema:
86
+ # Agent has structured output schema - response is an object
87
+ result.response = types.Schema(type=types.Type.OBJECT)
88
+ else:
89
+ # Agent returns text - response is a string
90
+ result.response = types.Schema(type=types.Type.STRING)
91
+
83
92
  result.name = self.name
84
93
  return result
85
94
 
@@ -91,6 +100,8 @@ class AgentTool(BaseTool):
91
100
  tool_context: ToolContext,
92
101
  ) -> Any:
93
102
  from ..agents.llm_agent import LlmAgent
103
+ from ..runners import Runner
104
+ from ..sessions.in_memory_session_service import InMemorySessionService
94
105
 
95
106
  if self.skip_summarization:
96
107
  tool_context.actions.skip_summarization = True
@@ -20,11 +20,16 @@ from typing import List
20
20
  from typing import Optional
21
21
  from typing import Protocol
22
22
  from typing import runtime_checkable
23
+ from typing import TYPE_CHECKING
23
24
  from typing import Union
24
25
 
25
26
  from ..agents.readonly_context import ReadonlyContext
26
27
  from .base_tool import BaseTool
27
28
 
29
+ if TYPE_CHECKING:
30
+ from ..models.llm_request import LlmRequest
31
+ from .tool_context import ToolContext
32
+
28
33
 
29
34
  @runtime_checkable
30
35
  class ToolPredicate(Protocol):
@@ -96,3 +101,20 @@ class BaseToolset(ABC):
96
101
  return tool.name in self.tool_filter
97
102
 
98
103
  return False
104
+
105
+ async def process_llm_request(
106
+ self, *, tool_context: ToolContext, llm_request: LlmRequest
107
+ ) -> None:
108
+ """Processes the outgoing LLM request for this toolset. This method will be
109
+ called before each tool processes the llm request.
110
+
111
+ Use cases:
112
+ - Instead of let each tool process the llm request, we can let the toolset
113
+ process the llm request. e.g. ComputerUseToolset can add computer use
114
+ tool to the llm request.
115
+
116
+ Args:
117
+ tool_context: The context of the tool.
118
+ llm_request: The outgoing LLM request, mutable this method.
119
+ """
120
+ pass
@@ -12,6 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from __future__ import annotations
16
+
15
17
  from google.auth.credentials import Credentials
16
18
  from google.cloud import bigquery
17
19
 
@@ -12,7 +12,10 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from __future__ import annotations
16
+
15
17
  import functools
18
+ import json
16
19
  import types
17
20
  from typing import Callable
18
21
 
@@ -159,7 +162,18 @@ def execute_sql(
159
162
  project=project_id,
160
163
  max_results=MAX_DOWNLOADED_QUERY_RESULT_ROWS,
161
164
  )
162
- rows = [{key: val for key, val in row.items()} for row in row_iterator]
165
+ rows = []
166
+ for row in row_iterator:
167
+ row_values = {}
168
+ for key, val in row.items():
169
+ try:
170
+ # if the json serialization of the value succeeds, use it as is
171
+ json.dumps(val)
172
+ except:
173
+ val = str(val)
174
+ row_values[key] = val
175
+ rows.append(row_values)
176
+
163
177
  result = {"status": "SUCCESS", "rows": rows}
164
178
  if (
165
179
  MAX_DOWNLOADED_QUERY_RESULT_ROWS is not None
@@ -0,0 +1,13 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.