google-adk 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. google/adk/agents/callback_context.py +0 -1
  2. google/adk/auth/auth_credential.py +2 -1
  3. google/adk/auth/auth_handler.py +7 -3
  4. google/adk/cli/browser/index.html +1 -1
  5. google/adk/cli/browser/{main-ZBO76GRM.js → main-HWIBUY2R.js} +69 -53
  6. google/adk/cli/cli_deploy.py +6 -1
  7. google/adk/cli/cli_eval.py +1 -1
  8. google/adk/cli/cli_tools_click.py +28 -12
  9. google/adk/cli/fast_api.py +6 -0
  10. google/adk/evaluation/agent_evaluator.py +2 -2
  11. google/adk/evaluation/response_evaluator.py +2 -2
  12. google/adk/evaluation/trajectory_evaluator.py +4 -5
  13. google/adk/flows/llm_flows/agent_transfer.py +1 -1
  14. google/adk/flows/llm_flows/base_llm_flow.py +1 -1
  15. google/adk/flows/llm_flows/contents.py +1 -1
  16. google/adk/flows/llm_flows/functions.py +1 -3
  17. google/adk/flows/llm_flows/instructions.py +2 -2
  18. google/adk/models/gemini_llm_connection.py +2 -2
  19. google/adk/models/llm_response.py +10 -1
  20. google/adk/planners/built_in_planner.py +1 -0
  21. google/adk/sessions/database_session_service.py +10 -6
  22. google/adk/sessions/state.py +1 -1
  23. google/adk/tools/agent_tool.py +2 -3
  24. google/adk/tools/google_api_tool/google_api_tool_set.py +12 -9
  25. google/adk/tools/load_artifacts_tool.py +1 -1
  26. google/adk/tools/openapi_tool/auth/credential_exchangers/oauth2_exchanger.py +4 -4
  27. google/adk/tools/openapi_tool/openapi_spec_parser/openapi_toolset.py +1 -1
  28. google/adk/tools/openapi_tool/openapi_spec_parser/operation_parser.py +5 -12
  29. google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py +46 -8
  30. google/adk/version.py +1 -1
  31. {google_adk-0.2.0.dist-info → google_adk-0.3.0.dist-info}/METADATA +27 -4
  32. {google_adk-0.2.0.dist-info → google_adk-0.3.0.dist-info}/RECORD +35 -35
  33. {google_adk-0.2.0.dist-info → google_adk-0.3.0.dist-info}/WHEEL +0 -0
  34. {google_adk-0.2.0.dist-info → google_adk-0.3.0.dist-info}/entry_points.txt +0 -0
  35. {google_adk-0.2.0.dist-info → google_adk-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -54,7 +54,7 @@ COPY "agents/{app_name}/" "/app/agents/{app_name}/"
54
54
 
55
55
  EXPOSE {port}
56
56
 
57
- CMD adk {command} --port={port} {trace_to_cloud_option} "/app/agents"
57
+ CMD adk {command} --port={port} {session_db_option} {trace_to_cloud_option} "/app/agents"
58
58
  """
59
59
 
60
60
 
@@ -85,6 +85,7 @@ def to_cloud_run(
85
85
  trace_to_cloud: bool,
86
86
  with_ui: bool,
87
87
  verbosity: str,
88
+ session_db_url: str,
88
89
  ):
89
90
  """Deploys an agent to Google Cloud Run.
90
91
 
@@ -112,6 +113,7 @@ def to_cloud_run(
112
113
  trace_to_cloud: Whether to enable Cloud Trace.
113
114
  with_ui: Whether to deploy with UI.
114
115
  verbosity: The verbosity level of the CLI.
116
+ session_db_url: The database URL to connect the session.
115
117
  """
116
118
  app_name = app_name or os.path.basename(agent_folder)
117
119
 
@@ -144,6 +146,9 @@ def to_cloud_run(
144
146
  port=port,
145
147
  command='web' if with_ui else 'api_server',
146
148
  install_agent_deps=install_agent_deps,
149
+ session_db_option=f'--session_db_url={session_db_url}'
150
+ if session_db_url
151
+ else '',
147
152
  trace_to_cloud_option='--trace_to_cloud' if trace_to_cloud else '',
148
153
  )
149
154
  dockerfile_path = os.path.join(temp_folder, 'Dockerfile')
@@ -256,7 +256,7 @@ def run_evals(
256
256
  )
257
257
 
258
258
  if final_eval_status == EvalStatus.PASSED:
259
- result = "✅ Passsed"
259
+ result = "✅ Passed"
260
260
  else:
261
261
  result = "❌ Failed"
262
262
 
@@ -245,12 +245,13 @@ def cli_eval(
245
245
  @click.option(
246
246
  "--session_db_url",
247
247
  help=(
248
- "Optional. The database URL to store the session.\n\n - Use"
249
- " 'agentengine://<agent_engine_resource_id>' to connect to Vertex"
250
- " managed session service.\n\n - Use 'sqlite://<path_to_sqlite_file>'"
251
- " to connect to a SQLite DB.\n\n - See"
252
- " https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls"
253
- " for more details on supported DB URLs."
248
+ """Optional. The database URL to store the session.
249
+
250
+ - Use 'agentengine://<agent_engine_resource_id>' to connect to Agent Engine sessions.
251
+
252
+ - Use 'sqlite://<path_to_sqlite_file>' to connect to a SQLite DB.
253
+
254
+ - See https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls for more details on supported DB URLs."""
254
255
  ),
255
256
  )
256
257
  @click.option(
@@ -366,12 +367,13 @@ def cli_web(
366
367
  @click.option(
367
368
  "--session_db_url",
368
369
  help=(
369
- "Optional. The database URL to store the session.\n\n - Use"
370
- " 'agentengine://<agent_engine_resource_id>' to connect to Vertex"
371
- " managed session service.\n\n - Use 'sqlite://<path_to_sqlite_file>'"
372
- " to connect to a SQLite DB.\n\n - See"
373
- " https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls"
374
- " for more details on supported DB URLs."
370
+ """Optional. The database URL to store the session.
371
+
372
+ - Use 'agentengine://<agent_engine_resource_id>' to connect to Agent Engine sessions.
373
+
374
+ - Use 'sqlite://<path_to_sqlite_file>' to connect to a SQLite DB.
375
+
376
+ - See https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls for more details on supported DB URLs."""
375
377
  ),
376
378
  )
377
379
  @click.option(
@@ -541,6 +543,18 @@ def cli_api_server(
541
543
  default="WARNING",
542
544
  help="Optional. Override the default verbosity level.",
543
545
  )
546
+ @click.option(
547
+ "--session_db_url",
548
+ help=(
549
+ """Optional. The database URL to store the session.
550
+
551
+ - Use 'agentengine://<agent_engine_resource_id>' to connect to Agent Engine sessions.
552
+
553
+ - Use 'sqlite://<path_to_sqlite_file>' to connect to a SQLite DB.
554
+
555
+ - See https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls for more details on supported DB URLs."""
556
+ ),
557
+ )
544
558
  @click.argument(
545
559
  "agent",
546
560
  type=click.Path(
@@ -558,6 +572,7 @@ def cli_deploy_cloud_run(
558
572
  trace_to_cloud: bool,
559
573
  with_ui: bool,
560
574
  verbosity: str,
575
+ session_db_url: str,
561
576
  ):
562
577
  """Deploys an agent to Cloud Run.
563
578
 
@@ -579,6 +594,7 @@ def cli_deploy_cloud_run(
579
594
  trace_to_cloud=trace_to_cloud,
580
595
  with_ui=with_ui,
581
596
  verbosity=verbosity,
597
+ session_db_url=session_db_url,
582
598
  )
583
599
  except Exception as e:
584
600
  click.secho(f"Deploy failed: {e}", fg="red", err=True)
@@ -756,6 +756,12 @@ def get_fast_api_app(
756
756
  except Exception as e:
757
757
  logger.exception("Error during live websocket communication: %s", e)
758
758
  traceback.print_exc()
759
+ WEBSOCKET_INTERNAL_ERROR_CODE = 1011
760
+ WEBSOCKET_MAX_BYTES_FOR_REASON = 123
761
+ await websocket.close(
762
+ code=WEBSOCKET_INTERNAL_ERROR_CODE,
763
+ reason=str(e)[:WEBSOCKET_MAX_BYTES_FOR_REASON],
764
+ )
759
765
  finally:
760
766
  for task in pending:
761
767
  task.cancel()
@@ -55,7 +55,7 @@ def load_json(file_path: str) -> Union[Dict, List]:
55
55
 
56
56
 
57
57
  class AgentEvaluator:
58
- """An evaluator for Agents, mainly intented for helping with test cases."""
58
+ """An evaluator for Agents, mainly intended for helping with test cases."""
59
59
 
60
60
  @staticmethod
61
61
  def find_config_for_test_file(test_file: str):
@@ -91,7 +91,7 @@ class AgentEvaluator:
91
91
  look for 'root_agent' in the loaded module.
92
92
  eval_dataset: The eval data set. This can be either a string representing
93
93
  full path to the file containing eval dataset, or a directory that is
94
- recusively explored for all files that have a `.test.json` suffix.
94
+ recursively explored for all files that have a `.test.json` suffix.
95
95
  num_runs: Number of times all entries in the eval dataset should be
96
96
  assessed.
97
97
  agent_name: The name of the agent.
@@ -35,7 +35,7 @@ class ResponseEvaluator:
35
35
  Args:
36
36
  raw_eval_dataset: The dataset that will be evaluated.
37
37
  evaluation_criteria: The evaluation criteria to be used. This method
38
- support two criterias, `response_evaluation_score` and
38
+ support two criteria, `response_evaluation_score` and
39
39
  `response_match_score`.
40
40
  print_detailed_results: Prints detailed results on the console. This is
41
41
  usually helpful during debugging.
@@ -56,7 +56,7 @@ class ResponseEvaluator:
56
56
  Value range: [0, 5], where 0 means that the agent's response is not
57
57
  coherent, while 5 means it is . High values are good.
58
58
  A note on raw_eval_dataset:
59
- The dataset should be a list session, where each sesssion is represented
59
+ The dataset should be a list session, where each session is represented
60
60
  as a list of interaction that need evaluation. Each evaluation is
61
61
  represented as a dictionary that is expected to have values for the
62
62
  following keys:
@@ -31,10 +31,9 @@ class TrajectoryEvaluator:
31
31
  ):
32
32
  r"""Returns the mean tool use accuracy of the eval dataset.
33
33
 
34
- Tool use accuracy is calculated by comparing the expected and actuall tool
35
- use trajectories. An exact match scores a 1, 0 otherwise. The final number
36
- is an
37
- average of these individual scores.
34
+ Tool use accuracy is calculated by comparing the expected and the actual
35
+ tool use trajectories. An exact match scores a 1, 0 otherwise. The final
36
+ number is an average of these individual scores.
38
37
 
39
38
  Value range: [0, 1], where 0 is means none of the too use entries aligned,
40
39
  and 1 would mean all of them aligned. Higher value is good.
@@ -45,7 +44,7 @@ class TrajectoryEvaluator:
45
44
  usually helpful during debugging.
46
45
 
47
46
  A note on eval_dataset:
48
- The dataset should be a list session, where each sesssion is represented
47
+ The dataset should be a list session, where each session is represented
49
48
  as a list of interaction that need evaluation. Each evaluation is
50
49
  represented as a dictionary that is expected to have values for the
51
50
  following keys:
@@ -94,7 +94,7 @@ can answer it.
94
94
 
95
95
  If another agent is better for answering the question according to its
96
96
  description, call `{_TRANSFER_TO_AGENT_FUNCTION_NAME}` function to transfer the
97
- question to that agent. When transfering, do not generate any text other than
97
+ question to that agent. When transferring, do not generate any text other than
98
98
  the function call.
99
99
  """
100
100
 
@@ -115,7 +115,7 @@ class BaseLlmFlow(ABC):
115
115
  yield event
116
116
  # send back the function response
117
117
  if event.get_function_responses():
118
- logger.debug('Sending back last function resonse event: %s', event)
118
+ logger.debug('Sending back last function response event: %s', event)
119
119
  invocation_context.live_request_queue.send_content(event.content)
120
120
  if (
121
121
  event.content
@@ -111,7 +111,7 @@ def _rearrange_events_for_latest_function_response(
111
111
  """Rearrange the events for the latest function_response.
112
112
 
113
113
  If the latest function_response is for an async function_call, all events
114
- bewteen the initial function_call and the latest function_response will be
114
+ between the initial function_call and the latest function_response will be
115
115
  removed.
116
116
 
117
117
  Args:
@@ -310,9 +310,7 @@ async def _process_function_live_helper(
310
310
  function_response = {
311
311
  'status': f'No active streaming function named {function_name} found'
312
312
  }
313
- elif inspect.isasyncgenfunction(tool.func):
314
- print('is async')
315
-
313
+ elif hasattr(tool, "func") and inspect.isasyncgenfunction(tool.func):
316
314
  # for streaming tool use case
317
315
  # we require the function to be a async generator function
318
316
  async def run_tool_and_update_queue(tool, function_args, tool_context):
@@ -52,7 +52,7 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
52
52
  # Appends global instructions if set.
53
53
  if (
54
54
  isinstance(root_agent, LlmAgent) and root_agent.global_instruction
55
- ): # not emtpy str
55
+ ): # not empty str
56
56
  raw_si = root_agent.canonical_global_instruction(
57
57
  ReadonlyContext(invocation_context)
58
58
  )
@@ -60,7 +60,7 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
60
60
  llm_request.append_instructions([si])
61
61
 
62
62
  # Appends agent instructions if set.
63
- if agent.instruction: # not emtpy str
63
+ if agent.instruction: # not empty str
64
64
  raw_si = agent.canonical_instruction(ReadonlyContext(invocation_context))
65
65
  si = _populate_values(raw_si, invocation_context)
66
66
  llm_request.append_instructions([si])
@@ -152,7 +152,7 @@ class GeminiLlmConnection(BaseLlmConnection):
152
152
  ):
153
153
  # TODO: Right now, we just support output_transcription without
154
154
  # changing interface and data protocol. Later, we can consider to
155
- # support output_transcription as a separete field in LlmResponse.
155
+ # support output_transcription as a separate field in LlmResponse.
156
156
 
157
157
  # Transcription is always considered as partial event
158
158
  # We rely on other control signals to determine when to yield the
@@ -179,7 +179,7 @@ class GeminiLlmConnection(BaseLlmConnection):
179
179
  # in case of empty content or parts, we sill surface it
180
180
  # in case it's an interrupted message, we merge the previous partial
181
181
  # text. Other we don't merge. because content can be none when model
182
- # safty threshold is triggered
182
+ # safety threshold is triggered
183
183
  if message.server_content.interrupted and text:
184
184
  yield self.__build_full_text_response(text)
185
185
  text = ''
@@ -14,7 +14,7 @@
14
14
 
15
15
  from __future__ import annotations
16
16
 
17
- from typing import Optional
17
+ from typing import Any, Optional
18
18
 
19
19
  from google.genai import types
20
20
  from pydantic import BaseModel
@@ -37,6 +37,7 @@ class LlmResponse(BaseModel):
37
37
  error_message: Error message if the response is an error.
38
38
  interrupted: Flag indicating that LLM was interrupted when generating the
39
39
  content. Usually it's due to user interruption during a bidi streaming.
40
+ custom_metadata: The custom metadata of the LlmResponse.
40
41
  """
41
42
 
42
43
  model_config = ConfigDict(extra='forbid')
@@ -71,6 +72,14 @@ class LlmResponse(BaseModel):
71
72
  Usually it's due to user interruption during a bidi streaming.
72
73
  """
73
74
 
75
+ custom_metadata: Optional[dict[str, Any]] = None
76
+ """The custom metadata of the LlmResponse.
77
+
78
+ An optional key-value pair to label an LlmResponse.
79
+
80
+ NOTE: the entire dict must be JSON serializable.
81
+ """
82
+
74
83
  @staticmethod
75
84
  def create(
76
85
  generate_content_response: types.GenerateContentResponse,
@@ -56,6 +56,7 @@ class BuiltInPlanner(BasePlanner):
56
56
  llm_request: The LLM request to apply the thinking config to.
57
57
  """
58
58
  if self.thinking_config:
59
+ llm_request.config = llm_request.config or types.GenerateContentConfig()
59
60
  llm_request.config.thinking_config = self.thinking_config
60
61
 
61
62
  @override
@@ -17,10 +17,10 @@ import copy
17
17
  from datetime import datetime
18
18
  import json
19
19
  import logging
20
- from typing import Any
21
- from typing import Optional
20
+ from typing import Any, Optional
22
21
  import uuid
23
22
 
23
+ from google.genai import types
24
24
  from sqlalchemy import Boolean
25
25
  from sqlalchemy import delete
26
26
  from sqlalchemy import Dialect
@@ -136,7 +136,7 @@ class StorageEvent(Base):
136
136
  author: Mapped[str] = mapped_column(String)
137
137
  branch: Mapped[str] = mapped_column(String, nullable=True)
138
138
  timestamp: Mapped[DateTime] = mapped_column(DateTime(), default=func.now())
139
- content: Mapped[dict[str, Any]] = mapped_column(DynamicJSON)
139
+ content: Mapped[dict[str, Any]] = mapped_column(DynamicJSON, nullable=True)
140
140
  actions: Mapped[MutableDict[str, Any]] = mapped_column(PickleType)
141
141
 
142
142
  long_running_tool_ids_json: Mapped[Optional[str]] = mapped_column(
@@ -217,7 +217,7 @@ class DatabaseSessionService(BaseSessionService):
217
217
  """
218
218
  # 1. Create DB engine for db connection
219
219
  # 2. Create all tables based on schema
220
- # 3. Initialize all properies
220
+ # 3. Initialize all properties
221
221
 
222
222
  try:
223
223
  db_engine = create_engine(db_url)
@@ -576,8 +576,12 @@ def _merge_state(app_state, user_state, session_state):
576
576
  return merged_state
577
577
 
578
578
 
579
- def _decode_content(content: dict[str, Any]) -> dict[str, Any]:
579
+ def _decode_content(
580
+ content: Optional[dict[str, Any]],
581
+ ) -> Optional[types.Content]:
582
+ if not content:
583
+ return None
580
584
  for p in content["parts"]:
581
585
  if "inline_data" in p:
582
586
  p["inline_data"]["data"] = base64.b64decode(p["inline_data"]["data"][0])
583
- return content
587
+ return types.Content.model_validate(content)
@@ -26,7 +26,7 @@ class State:
26
26
  """
27
27
  Args:
28
28
  value: The current value of the state dict.
29
- delta: The delta change to the current value that hasn't been commited.
29
+ delta: The delta change to the current value that hasn't been committed.
30
30
  """
31
31
  self._value = value
32
32
  self._delta = delta
@@ -45,10 +45,9 @@ class AgentTool(BaseTool):
45
45
  skip_summarization: Whether to skip summarization of the agent output.
46
46
  """
47
47
 
48
- def __init__(self, agent: BaseAgent):
48
+ def __init__(self, agent: BaseAgent, skip_summarization: bool = False):
49
49
  self.agent = agent
50
- self.skip_summarization: bool = False
51
- """Whether to skip summarization of the agent output."""
50
+ self.skip_summarization: bool = skip_summarization
52
51
 
53
52
  super().__init__(name=agent.name, description=agent.description)
54
53
 
@@ -11,10 +11,12 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
14
17
  import inspect
15
18
  import os
16
19
  from typing import Any
17
- from typing import Dict
18
20
  from typing import Final
19
21
  from typing import List
20
22
  from typing import Optional
@@ -28,6 +30,7 @@ from .googleapi_to_openapi_converter import GoogleApiToOpenApiConverter
28
30
 
29
31
 
30
32
  class GoogleApiToolSet:
33
+ """Google API Tool Set."""
31
34
 
32
35
  def __init__(self, tools: List[RestApiTool]):
33
36
  self.tools: Final[List[GoogleApiTool]] = [
@@ -45,10 +48,10 @@ class GoogleApiToolSet:
45
48
 
46
49
  @staticmethod
47
50
  def _load_tool_set_with_oidc_auth(
48
- spec_file: str = None,
49
- spec_dict: Dict[str, Any] = None,
50
- scopes: list[str] = None,
51
- ) -> Optional[OpenAPIToolset]:
51
+ spec_file: Optional[str] = None,
52
+ spec_dict: Optional[dict[str, Any]] = None,
53
+ scopes: Optional[list[str]] = None,
54
+ ) -> OpenAPIToolset:
52
55
  spec_str = None
53
56
  if spec_file:
54
57
  # Get the frame of the caller
@@ -90,18 +93,18 @@ class GoogleApiToolSet:
90
93
 
91
94
  @classmethod
92
95
  def load_tool_set(
93
- cl: Type['GoogleApiToolSet'],
96
+ cls: Type[GoogleApiToolSet],
94
97
  api_name: str,
95
98
  api_version: str,
96
- ) -> 'GoogleApiToolSet':
99
+ ) -> GoogleApiToolSet:
97
100
  spec_dict = GoogleApiToOpenApiConverter(api_name, api_version).convert()
98
101
  scope = list(
99
102
  spec_dict['components']['securitySchemes']['oauth2']['flows'][
100
103
  'authorizationCode'
101
104
  ]['scopes'].keys()
102
105
  )[0]
103
- return cl(
104
- cl._load_tool_set_with_oidc_auth(
106
+ return cls(
107
+ cls._load_tool_set_with_oidc_auth(
105
108
  spec_dict=spec_dict, scopes=[scope]
106
109
  ).get_tools()
107
110
  )
@@ -89,7 +89,7 @@ class LoadArtifactsTool(BaseTool):
89
89
  than the function call.
90
90
  """])
91
91
 
92
- # Attache the content of the artifacts if the model requests them.
92
+ # Attach the content of the artifacts if the model requests them.
93
93
  # This only adds the content to the model request, instead of the session.
94
94
  if llm_request.contents and llm_request.contents[-1].parts:
95
95
  function_response = llm_request.contents[-1].parts[0].function_response
@@ -66,10 +66,10 @@ class OAuth2CredentialExchanger(BaseAuthCredentialExchanger):
66
66
 
67
67
  Returns:
68
68
  An AuthCredential object containing the HTTP bearer access token. If the
69
- HTTO bearer token cannot be generated, return the origianl credential
69
+ HTTP bearer token cannot be generated, return the original credential.
70
70
  """
71
71
 
72
- if "access_token" not in auth_credential.oauth2.token:
72
+ if not auth_credential.oauth2.access_token:
73
73
  return auth_credential
74
74
 
75
75
  # Return the access token as a bearer token.
@@ -78,7 +78,7 @@ class OAuth2CredentialExchanger(BaseAuthCredentialExchanger):
78
78
  http=HttpAuth(
79
79
  scheme="bearer",
80
80
  credentials=HttpCredentials(
81
- token=auth_credential.oauth2.token["access_token"]
81
+ token=auth_credential.oauth2.access_token
82
82
  ),
83
83
  ),
84
84
  )
@@ -111,7 +111,7 @@ class OAuth2CredentialExchanger(BaseAuthCredentialExchanger):
111
111
  return auth_credential
112
112
 
113
113
  # If access token is exchanged, exchange a HTTPBearer token.
114
- if auth_credential.oauth2.token:
114
+ if auth_credential.oauth2.access_token:
115
115
  return self.generate_auth_token(auth_credential)
116
116
 
117
117
  return None
@@ -124,7 +124,7 @@ class OpenAPIToolset:
124
124
  def _load_spec(
125
125
  self, spec_str: str, spec_type: Literal["json", "yaml"]
126
126
  ) -> Dict[str, Any]:
127
- """Loads the OpenAPI spec string into adictionary."""
127
+ """Loads the OpenAPI spec string into a dictionary."""
128
128
  if spec_type == "json":
129
129
  return json.loads(spec_str)
130
130
  elif spec_type == "yaml":
@@ -14,20 +14,12 @@
14
14
 
15
15
  import inspect
16
16
  from textwrap import dedent
17
- from typing import Any
18
- from typing import Dict
19
- from typing import List
20
- from typing import Optional
21
- from typing import Union
17
+ from typing import Any, Dict, List, Optional, Union
22
18
 
23
19
  from fastapi.encoders import jsonable_encoder
24
- from fastapi.openapi.models import Operation
25
- from fastapi.openapi.models import Parameter
26
- from fastapi.openapi.models import Schema
20
+ from fastapi.openapi.models import Operation, Parameter, Schema
27
21
 
28
- from ..common.common import ApiParameter
29
- from ..common.common import PydocHelper
30
- from ..common.common import to_snake_case
22
+ from ..common.common import ApiParameter, PydocHelper, to_snake_case
31
23
 
32
24
 
33
25
  class OperationParser:
@@ -110,7 +102,8 @@ class OperationParser:
110
102
  description = request_body.description or ''
111
103
 
112
104
  if schema and schema.type == 'object':
113
- for prop_name, prop_details in schema.properties.items():
105
+ properties = schema.properties or {}
106
+ for prop_name, prop_details in properties.items():
114
107
  self.params.append(
115
108
  ApiParameter(
116
109
  original_name=prop_name,
@@ -17,6 +17,7 @@ from typing import Dict
17
17
  from typing import List
18
18
  from typing import Literal
19
19
  from typing import Optional
20
+ from typing import Sequence
20
21
  from typing import Tuple
21
22
  from typing import Union
22
23
 
@@ -59,6 +60,40 @@ def snake_to_lower_camel(snake_case_string: str):
59
60
  ])
60
61
 
61
62
 
63
+ # TODO: Switch to Gemini `from_json_schema` util when it is released
64
+ # in Gemini SDK.
65
+ def normalize_json_schema_type(
66
+ json_schema_type: Optional[Union[str, Sequence[str]]],
67
+ ) -> tuple[Optional[str], bool]:
68
+ """Converts a JSON Schema Type into Gemini Schema type.
69
+
70
+ Adopted and modified from Gemini SDK. This gets the first available schema
71
+ type from JSON Schema, and use it to mark Gemini schema type. If JSON Schema
72
+ contains a list of types, the first non null type is used.
73
+
74
+ Remove this after switching to Gemini `from_json_schema`.
75
+ """
76
+ if json_schema_type is None:
77
+ return None, False
78
+ if isinstance(json_schema_type, str):
79
+ if json_schema_type == "null":
80
+ return None, True
81
+ return json_schema_type, False
82
+
83
+ non_null_types = []
84
+ nullable = False
85
+ # If json schema type is an array, pick the first non null type.
86
+ for type_value in json_schema_type:
87
+ if type_value == "null":
88
+ nullable = True
89
+ else:
90
+ non_null_types.append(type_value)
91
+ non_null_type = non_null_types[0] if non_null_types else None
92
+ return non_null_type, nullable
93
+
94
+
95
+ # TODO: Switch to Gemini `from_json_schema` util when it is released
96
+ # in Gemini SDK.
62
97
  def to_gemini_schema(openapi_schema: Optional[Dict[str, Any]] = None) -> Schema:
63
98
  """Converts an OpenAPI schema dictionary to a Gemini Schema object.
64
99
 
@@ -82,13 +117,6 @@ def to_gemini_schema(openapi_schema: Optional[Dict[str, Any]] = None) -> Schema:
82
117
  if not openapi_schema.get("type"):
83
118
  openapi_schema["type"] = "object"
84
119
 
85
- # Adding this to avoid "properties: should be non-empty for OBJECT type" error
86
- # See b/385165182
87
- if openapi_schema.get("type", "") == "object" and not openapi_schema.get(
88
- "properties"
89
- ):
90
- openapi_schema["properties"] = {"dummy_DO_NOT_GENERATE": {"type": "string"}}
91
-
92
120
  for key, value in openapi_schema.items():
93
121
  snake_case_key = to_snake_case(key)
94
122
  # Check if the snake_case_key exists in the Schema model's fields.
@@ -99,7 +127,17 @@ def to_gemini_schema(openapi_schema: Optional[Dict[str, Any]] = None) -> Schema:
99
127
  # Format: properties[expiration].format: only 'enum' and 'date-time' are
100
128
  # supported for STRING type
101
129
  continue
102
- if snake_case_key == "properties" and isinstance(value, dict):
130
+ elif snake_case_key == "type":
131
+ schema_type, nullable = normalize_json_schema_type(
132
+ openapi_schema.get("type", None)
133
+ )
134
+ # Adding this to force adding a type to an empty dict
135
+ # This avoid "... one_of or any_of must specify a type" error
136
+ pydantic_schema_data["type"] = schema_type if schema_type else "object"
137
+ pydantic_schema_data["type"] = pydantic_schema_data["type"].upper()
138
+ if nullable:
139
+ pydantic_schema_data["nullable"] = True
140
+ elif snake_case_key == "properties" and isinstance(value, dict):
103
141
  pydantic_schema_data[snake_case_key] = {
104
142
  k: to_gemini_schema(v) for k, v in value.items()
105
143
  }
google/adk/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
 
15
15
  # version: date+base_cl
16
- __version__ = "0.2.0"
16
+ __version__ = "0.3.0"