google-adk 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/adk/agents/base_agent.py +4 -4
- google/adk/agents/invocation_context.py +1 -1
- google/adk/agents/remote_agent.py +1 -1
- google/adk/agents/run_config.py +1 -1
- google/adk/auth/auth_preprocessor.py +2 -2
- google/adk/auth/auth_tool.py +1 -1
- google/adk/cli/browser/index.html +2 -2
- google/adk/cli/browser/{main-SLIAU2JL.js → main-ZBO76GRM.js} +65 -81
- google/adk/cli/cli_create.py +279 -0
- google/adk/cli/cli_deploy.py +4 -0
- google/adk/cli/cli_eval.py +2 -2
- google/adk/cli/cli_tools_click.py +67 -7
- google/adk/cli/fast_api.py +51 -16
- google/adk/cli/utils/envs.py +0 -3
- google/adk/cli/utils/evals.py +2 -2
- google/adk/evaluation/evaluation_generator.py +4 -4
- google/adk/evaluation/response_evaluator.py +15 -3
- google/adk/events/event.py +3 -3
- google/adk/flows/llm_flows/_nl_planning.py +10 -4
- google/adk/flows/llm_flows/contents.py +1 -1
- google/adk/models/lite_llm.py +51 -34
- google/adk/planners/plan_re_act_planner.py +2 -2
- google/adk/runners.py +1 -1
- google/adk/sessions/database_session_service.py +84 -23
- google/adk/sessions/state.py +1 -1
- google/adk/telemetry.py +2 -2
- google/adk/tools/application_integration_tool/clients/integration_client.py +3 -2
- google/adk/tools/base_tool.py +1 -1
- google/adk/tools/function_parameter_parse_util.py +2 -2
- google/adk/tools/google_api_tool/__init__.py +74 -1
- google/adk/tools/google_api_tool/google_api_tool_sets.py +91 -34
- google/adk/tools/google_api_tool/googleapi_to_openapi_converter.py +3 -1
- google/adk/tools/load_memory_tool.py +25 -2
- google/adk/tools/mcp_tool/mcp_session_manager.py +176 -0
- google/adk/tools/mcp_tool/mcp_tool.py +15 -2
- google/adk/tools/mcp_tool/mcp_toolset.py +31 -37
- google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py +1 -1
- google/adk/tools/toolbox_tool.py +1 -1
- google/adk/version.py +1 -1
- google_adk-0.2.0.dist-info/METADATA +212 -0
- {google_adk-0.1.0.dist-info → google_adk-0.2.0.dist-info}/RECORD +44 -42
- google_adk-0.1.0.dist-info/METADATA +0 -160
- {google_adk-0.1.0.dist-info → google_adk-0.2.0.dist-info}/WHEEL +0 -0
- {google_adk-0.1.0.dist-info → google_adk-0.2.0.dist-info}/entry_points.txt +0 -0
- {google_adk-0.1.0.dist-info → google_adk-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -42,10 +42,10 @@ class EvaluationGenerator:
|
|
42
42
|
"""Returns evaluation responses for the given dataset and agent.
|
43
43
|
|
44
44
|
Args:
|
45
|
-
eval_dataset: The dataset that needs to be scraped for
|
45
|
+
eval_dataset: The dataset that needs to be scraped for responses.
|
46
46
|
agent_module_path: Path to the module that contains the root agent.
|
47
47
|
repeat_num: Number of time the eval dataset should be repeated. This is
|
48
|
-
usually done to remove
|
48
|
+
usually done to remove uncertainty that a single run may bring.
|
49
49
|
agent_name: The name of the agent that should be evaluated. This is
|
50
50
|
usually the sub-agent.
|
51
51
|
initial_session: Initial session for the eval data.
|
@@ -253,8 +253,8 @@ class EvaluationGenerator:
|
|
253
253
|
all_mock_tools: set[str],
|
254
254
|
):
|
255
255
|
"""Recursively apply the before_tool_callback to the root agent and all its subagents."""
|
256
|
-
#
|
257
|
-
# We use function
|
256
|
+
# Check if the agent has tools that are defined by evalset.
|
257
|
+
# We use function names to check if tools match
|
258
258
|
if not isinstance(agent, Agent) and not isinstance(agent, LlmAgent):
|
259
259
|
return
|
260
260
|
|
@@ -42,7 +42,7 @@ class ResponseEvaluator:
|
|
42
42
|
|
43
43
|
A note on evaluation_criteria:
|
44
44
|
`response_match_score`: This metric compares the agents final natural
|
45
|
-
language
|
45
|
+
language response with the expected final response, stored in the
|
46
46
|
"reference" field in test/eval files. We use Rouge metric to compare the
|
47
47
|
two responses.
|
48
48
|
|
@@ -106,9 +106,11 @@ class ResponseEvaluator:
|
|
106
106
|
eval_dataset = pd.DataFrame(flattened_queries).rename(
|
107
107
|
columns={"query": "prompt", "expected_tool_use": "reference_trajectory"}
|
108
108
|
)
|
109
|
-
eval_task = EvalTask(dataset=eval_dataset, metrics=metrics)
|
110
109
|
|
111
|
-
eval_result =
|
110
|
+
eval_result = ResponseEvaluator._perform_eval(
|
111
|
+
dataset=eval_dataset, metrics=metrics
|
112
|
+
)
|
113
|
+
|
112
114
|
if print_detailed_results:
|
113
115
|
ResponseEvaluator._print_results(eval_result)
|
114
116
|
return eval_result.summary_metrics
|
@@ -129,6 +131,16 @@ class ResponseEvaluator:
|
|
129
131
|
metrics.append("rouge_1")
|
130
132
|
return metrics
|
131
133
|
|
134
|
+
@staticmethod
|
135
|
+
def _perform_eval(dataset, metrics):
|
136
|
+
"""This method hides away the call to external service.
|
137
|
+
|
138
|
+
Primarily helps with unit testing.
|
139
|
+
"""
|
140
|
+
eval_task = EvalTask(dataset=dataset, metrics=metrics)
|
141
|
+
|
142
|
+
return eval_task.evaluate()
|
143
|
+
|
132
144
|
@staticmethod
|
133
145
|
def _print_results(eval_result):
|
134
146
|
print("Evaluation Summary Metrics:", eval_result.summary_metrics)
|
google/adk/events/event.py
CHANGED
@@ -70,7 +70,7 @@ class Event(LlmResponse):
|
|
70
70
|
agent_2, and agent_2 is the parent of agent_3.
|
71
71
|
|
72
72
|
Branch is used when multiple sub-agent shouldn't see their peer agents'
|
73
|
-
|
73
|
+
conversation history.
|
74
74
|
"""
|
75
75
|
|
76
76
|
# The following are computed fields.
|
@@ -94,7 +94,7 @@ class Event(LlmResponse):
|
|
94
94
|
not self.get_function_calls()
|
95
95
|
and not self.get_function_responses()
|
96
96
|
and not self.partial
|
97
|
-
and not self.
|
97
|
+
and not self.has_trailing_code_execution_result()
|
98
98
|
)
|
99
99
|
|
100
100
|
def get_function_calls(self) -> list[types.FunctionCall]:
|
@@ -115,7 +115,7 @@ class Event(LlmResponse):
|
|
115
115
|
func_response.append(part.function_response)
|
116
116
|
return func_response
|
117
117
|
|
118
|
-
def
|
118
|
+
def has_trailing_code_execution_result(
|
119
119
|
self,
|
120
120
|
) -> bool:
|
121
121
|
"""Returns whether the event has a trailing code execution result."""
|
@@ -87,15 +87,21 @@ class _NlPlanningResponse(BaseLlmResponseProcessor):
|
|
87
87
|
return
|
88
88
|
|
89
89
|
# Postprocess the LLM response.
|
90
|
+
callback_context = CallbackContext(invocation_context)
|
90
91
|
processed_parts = planner.process_planning_response(
|
91
|
-
|
92
|
+
callback_context, llm_response.content.parts
|
92
93
|
)
|
93
94
|
if processed_parts:
|
94
95
|
llm_response.content.parts = processed_parts
|
95
96
|
|
96
|
-
|
97
|
-
|
98
|
-
|
97
|
+
if callback_context.state.has_delta():
|
98
|
+
state_update_event = Event(
|
99
|
+
invocation_id=invocation_context.invocation_id,
|
100
|
+
author=invocation_context.agent.name,
|
101
|
+
branch=invocation_context.branch,
|
102
|
+
actions=callback_context._event_actions,
|
103
|
+
)
|
104
|
+
yield state_update_event
|
99
105
|
|
100
106
|
|
101
107
|
response_processor = _NlPlanningResponse()
|
@@ -310,7 +310,7 @@ def _merge_function_response_events(
|
|
310
310
|
function_response_events: A list of function_response events.
|
311
311
|
NOTE: function_response_events must fulfill these requirements: 1. The
|
312
312
|
list is in increasing order of timestamp; 2. the first event is the
|
313
|
-
initial
|
313
|
+
initial function_response event; 3. all later events should contain at
|
314
314
|
least one function_response part that related to the function_call
|
315
315
|
event. (Note, 3. may not be true when aync function return some
|
316
316
|
intermediate response, there could also be some intermediate model
|
google/adk/models/lite_llm.py
CHANGED
@@ -136,54 +136,68 @@ def _safe_json_serialize(obj) -> str:
|
|
136
136
|
|
137
137
|
def _content_to_message_param(
|
138
138
|
content: types.Content,
|
139
|
-
) -> Message:
|
140
|
-
"""Converts a types.Content to a litellm Message.
|
139
|
+
) -> Union[Message, list[Message]]:
|
140
|
+
"""Converts a types.Content to a litellm Message or list of Messages.
|
141
|
+
|
142
|
+
Handles multipart function responses by returning a list of
|
143
|
+
ChatCompletionToolMessage objects if multiple function_response parts exist.
|
141
144
|
|
142
145
|
Args:
|
143
146
|
content: The content to convert.
|
144
147
|
|
145
148
|
Returns:
|
146
|
-
|
149
|
+
A litellm Message, a list of litellm Messages.
|
147
150
|
"""
|
148
151
|
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
152
|
+
tool_messages = []
|
153
|
+
for part in content.parts:
|
154
|
+
if part.function_response:
|
155
|
+
tool_messages.append(
|
156
|
+
ChatCompletionToolMessage(
|
157
|
+
role="tool",
|
158
|
+
tool_call_id=part.function_response.id,
|
159
|
+
content=_safe_json_serialize(part.function_response.response),
|
160
|
+
)
|
161
|
+
)
|
162
|
+
if tool_messages:
|
163
|
+
return tool_messages if len(tool_messages) > 1 else tool_messages[0]
|
157
164
|
|
165
|
+
# Handle user or assistant messages
|
158
166
|
role = _to_litellm_role(content.role)
|
167
|
+
message_content = _get_content(content.parts) or None
|
159
168
|
|
160
169
|
if role == "user":
|
161
|
-
return ChatCompletionUserMessage(
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
170
|
+
return ChatCompletionUserMessage(role="user", content=message_content)
|
171
|
+
else: # assistant/model
|
172
|
+
tool_calls = []
|
173
|
+
content_present = False
|
174
|
+
for part in content.parts:
|
175
|
+
if part.function_call:
|
176
|
+
tool_calls.append(
|
177
|
+
ChatCompletionMessageToolCall(
|
178
|
+
type="function",
|
179
|
+
id=part.function_call.id,
|
180
|
+
function=Function(
|
181
|
+
name=part.function_call.name,
|
182
|
+
arguments=part.function_call.args,
|
183
|
+
),
|
184
|
+
)
|
185
|
+
)
|
186
|
+
elif part.text or part.inline_data:
|
187
|
+
content_present = True
|
188
|
+
|
189
|
+
final_content = message_content if content_present else None
|
178
190
|
|
179
191
|
return ChatCompletionAssistantMessage(
|
180
192
|
role=role,
|
181
|
-
content=
|
193
|
+
content=final_content,
|
182
194
|
tool_calls=tool_calls or None,
|
183
195
|
)
|
184
196
|
|
185
197
|
|
186
|
-
def _get_content(
|
198
|
+
def _get_content(
|
199
|
+
parts: Iterable[types.Part],
|
200
|
+
) -> Union[OpenAIMessageContent, str]:
|
187
201
|
"""Converts a list of parts to litellm content.
|
188
202
|
|
189
203
|
Args:
|
@@ -435,10 +449,13 @@ def _get_completion_inputs(
|
|
435
449
|
Returns:
|
436
450
|
The litellm inputs (message list and tool dictionary).
|
437
451
|
"""
|
438
|
-
messages = [
|
439
|
-
|
440
|
-
|
441
|
-
|
452
|
+
messages = []
|
453
|
+
for content in llm_request.contents or []:
|
454
|
+
message_param_or_list = _content_to_message_param(content)
|
455
|
+
if isinstance(message_param_or_list, list):
|
456
|
+
messages.extend(message_param_or_list)
|
457
|
+
elif message_param_or_list: # Ensure it's not None before appending
|
458
|
+
messages.append(message_param_or_list)
|
442
459
|
|
443
460
|
if llm_request.config.system_instruction:
|
444
461
|
messages.insert(
|
@@ -31,9 +31,9 @@ FINAL_ANSWER_TAG = '/*FINAL_ANSWER*/'
|
|
31
31
|
|
32
32
|
|
33
33
|
class PlanReActPlanner(BasePlanner):
|
34
|
-
"""Plan-Re-Act planner that
|
34
|
+
"""Plan-Re-Act planner that constrains the LLM response to generate a plan before any action/observation.
|
35
35
|
|
36
|
-
Note: this planner does not require the model to support
|
36
|
+
Note: this planner does not require the model to support built-in thinking
|
37
37
|
features or setting the thinking config.
|
38
38
|
"""
|
39
39
|
|
google/adk/runners.py
CHANGED
@@ -108,7 +108,7 @@ class Runner:
|
|
108
108
|
"""Runs the agent.
|
109
109
|
|
110
110
|
NOTE: This sync interface is only for local testing and convenience purpose.
|
111
|
-
Consider
|
111
|
+
Consider using `run_async` for production usage.
|
112
112
|
|
113
113
|
Args:
|
114
114
|
user_id: The user ID of the session.
|
@@ -12,6 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
+
import base64
|
15
16
|
import copy
|
16
17
|
from datetime import datetime
|
17
18
|
import json
|
@@ -20,15 +21,16 @@ from typing import Any
|
|
20
21
|
from typing import Optional
|
21
22
|
import uuid
|
22
23
|
|
24
|
+
from sqlalchemy import Boolean
|
23
25
|
from sqlalchemy import delete
|
24
26
|
from sqlalchemy import Dialect
|
25
27
|
from sqlalchemy import ForeignKeyConstraint
|
26
28
|
from sqlalchemy import func
|
27
|
-
from sqlalchemy import select
|
28
29
|
from sqlalchemy import Text
|
29
30
|
from sqlalchemy.dialects import postgresql
|
30
31
|
from sqlalchemy.engine import create_engine
|
31
32
|
from sqlalchemy.engine import Engine
|
33
|
+
from sqlalchemy.exc import ArgumentError
|
32
34
|
from sqlalchemy.ext.mutable import MutableDict
|
33
35
|
from sqlalchemy.inspection import inspect
|
34
36
|
from sqlalchemy.orm import DeclarativeBase
|
@@ -53,6 +55,7 @@ from .base_session_service import ListSessionsResponse
|
|
53
55
|
from .session import Session
|
54
56
|
from .state import State
|
55
57
|
|
58
|
+
|
56
59
|
logger = logging.getLogger(__name__)
|
57
60
|
|
58
61
|
|
@@ -102,7 +105,7 @@ class StorageSession(Base):
|
|
102
105
|
String, primary_key=True, default=lambda: str(uuid.uuid4())
|
103
106
|
)
|
104
107
|
|
105
|
-
state: Mapped[
|
108
|
+
state: Mapped[MutableDict[str, Any]] = mapped_column(
|
106
109
|
MutableDict.as_mutable(DynamicJSON), default={}
|
107
110
|
)
|
108
111
|
|
@@ -133,8 +136,20 @@ class StorageEvent(Base):
|
|
133
136
|
author: Mapped[str] = mapped_column(String)
|
134
137
|
branch: Mapped[str] = mapped_column(String, nullable=True)
|
135
138
|
timestamp: Mapped[DateTime] = mapped_column(DateTime(), default=func.now())
|
136
|
-
content: Mapped[dict] = mapped_column(DynamicJSON)
|
137
|
-
actions: Mapped[
|
139
|
+
content: Mapped[dict[str, Any]] = mapped_column(DynamicJSON)
|
140
|
+
actions: Mapped[MutableDict[str, Any]] = mapped_column(PickleType)
|
141
|
+
|
142
|
+
long_running_tool_ids_json: Mapped[Optional[str]] = mapped_column(
|
143
|
+
Text, nullable=True
|
144
|
+
)
|
145
|
+
grounding_metadata: Mapped[dict[str, Any]] = mapped_column(
|
146
|
+
DynamicJSON, nullable=True
|
147
|
+
)
|
148
|
+
partial: Mapped[bool] = mapped_column(Boolean, nullable=True)
|
149
|
+
turn_complete: Mapped[bool] = mapped_column(Boolean, nullable=True)
|
150
|
+
error_code: Mapped[str] = mapped_column(String, nullable=True)
|
151
|
+
error_message: Mapped[str] = mapped_column(String, nullable=True)
|
152
|
+
interrupted: Mapped[bool] = mapped_column(Boolean, nullable=True)
|
138
153
|
|
139
154
|
storage_session: Mapped[StorageSession] = relationship(
|
140
155
|
"StorageSession",
|
@@ -149,13 +164,28 @@ class StorageEvent(Base):
|
|
149
164
|
),
|
150
165
|
)
|
151
166
|
|
167
|
+
@property
|
168
|
+
def long_running_tool_ids(self) -> set[str]:
|
169
|
+
return (
|
170
|
+
set(json.loads(self.long_running_tool_ids_json))
|
171
|
+
if self.long_running_tool_ids_json
|
172
|
+
else set()
|
173
|
+
)
|
174
|
+
|
175
|
+
@long_running_tool_ids.setter
|
176
|
+
def long_running_tool_ids(self, value: set[str]):
|
177
|
+
if value is None:
|
178
|
+
self.long_running_tool_ids_json = None
|
179
|
+
else:
|
180
|
+
self.long_running_tool_ids_json = json.dumps(list(value))
|
181
|
+
|
152
182
|
|
153
183
|
class StorageAppState(Base):
|
154
184
|
"""Represents an app state stored in the database."""
|
155
185
|
__tablename__ = "app_states"
|
156
186
|
|
157
187
|
app_name: Mapped[str] = mapped_column(String, primary_key=True)
|
158
|
-
state: Mapped[
|
188
|
+
state: Mapped[MutableDict[str, Any]] = mapped_column(
|
159
189
|
MutableDict.as_mutable(DynamicJSON), default={}
|
160
190
|
)
|
161
191
|
update_time: Mapped[DateTime] = mapped_column(
|
@@ -169,7 +199,7 @@ class StorageUserState(Base):
|
|
169
199
|
|
170
200
|
app_name: Mapped[str] = mapped_column(String, primary_key=True)
|
171
201
|
user_id: Mapped[str] = mapped_column(String, primary_key=True)
|
172
|
-
state: Mapped[
|
202
|
+
state: Mapped[MutableDict[str, Any]] = mapped_column(
|
173
203
|
MutableDict.as_mutable(DynamicJSON), default={}
|
174
204
|
)
|
175
205
|
update_time: Mapped[DateTime] = mapped_column(
|
@@ -189,13 +219,20 @@ class DatabaseSessionService(BaseSessionService):
|
|
189
219
|
# 2. Create all tables based on schema
|
190
220
|
# 3. Initialize all properies
|
191
221
|
|
192
|
-
|
193
|
-
dialect = db_url.split("://")[0]
|
194
|
-
|
195
|
-
if dialect in supported_dialects:
|
222
|
+
try:
|
196
223
|
db_engine = create_engine(db_url)
|
197
|
-
|
198
|
-
|
224
|
+
except Exception as e:
|
225
|
+
if isinstance(e, ArgumentError):
|
226
|
+
raise ValueError(
|
227
|
+
f"Invalid database URL format or argument '{db_url}'."
|
228
|
+
) from e
|
229
|
+
if isinstance(e, ImportError):
|
230
|
+
raise ValueError(
|
231
|
+
f"Database related module not found for URL '{db_url}'."
|
232
|
+
) from e
|
233
|
+
raise ValueError(
|
234
|
+
f"Failed to create database engine for URL '{db_url}'"
|
235
|
+
) from e
|
199
236
|
|
200
237
|
# Get the local timezone
|
201
238
|
local_timezone = get_localzone()
|
@@ -287,7 +324,6 @@ class DatabaseSessionService(BaseSessionService):
|
|
287
324
|
last_update_time=storage_session.update_time.timestamp(),
|
288
325
|
)
|
289
326
|
return session
|
290
|
-
return None
|
291
327
|
|
292
328
|
@override
|
293
329
|
def get_session(
|
@@ -301,7 +337,6 @@ class DatabaseSessionService(BaseSessionService):
|
|
301
337
|
# 1. Get the storage session entry from session table
|
302
338
|
# 2. Get all the events based on session id and filtering config
|
303
339
|
# 3. Convert and return the session
|
304
|
-
session: Session = None
|
305
340
|
with self.DatabaseSessionFactory() as sessionFactory:
|
306
341
|
storage_session = sessionFactory.get(
|
307
342
|
StorageSession, (app_name, user_id, session_id)
|
@@ -348,13 +383,19 @@ class DatabaseSessionService(BaseSessionService):
|
|
348
383
|
author=e.author,
|
349
384
|
branch=e.branch,
|
350
385
|
invocation_id=e.invocation_id,
|
351
|
-
content=e.content,
|
386
|
+
content=_decode_content(e.content),
|
352
387
|
actions=e.actions,
|
353
388
|
timestamp=e.timestamp.timestamp(),
|
389
|
+
long_running_tool_ids=e.long_running_tool_ids,
|
390
|
+
grounding_metadata=e.grounding_metadata,
|
391
|
+
partial=e.partial,
|
392
|
+
turn_complete=e.turn_complete,
|
393
|
+
error_code=e.error_code,
|
394
|
+
error_message=e.error_message,
|
395
|
+
interrupted=e.interrupted,
|
354
396
|
)
|
355
397
|
for e in storage_events
|
356
398
|
]
|
357
|
-
|
358
399
|
return session
|
359
400
|
|
360
401
|
@override
|
@@ -379,7 +420,6 @@ class DatabaseSessionService(BaseSessionService):
|
|
379
420
|
)
|
380
421
|
sessions.append(session)
|
381
422
|
return ListSessionsResponse(sessions=sessions)
|
382
|
-
raise ValueError("Failed to retrieve sessions.")
|
383
423
|
|
384
424
|
@override
|
385
425
|
def delete_session(
|
@@ -398,7 +438,7 @@ class DatabaseSessionService(BaseSessionService):
|
|
398
438
|
def append_event(self, session: Session, event: Event) -> Event:
|
399
439
|
logger.info(f"Append event: {event} to session {session.id}")
|
400
440
|
|
401
|
-
if event.partial
|
441
|
+
if event.partial:
|
402
442
|
return event
|
403
443
|
|
404
444
|
# 1. Check if timestamp is stale
|
@@ -447,19 +487,34 @@ class DatabaseSessionService(BaseSessionService):
|
|
447
487
|
storage_user_state.state = user_state
|
448
488
|
storage_session.state = session_state
|
449
489
|
|
450
|
-
encoded_content = event.content.model_dump(exclude_none=True)
|
451
490
|
storage_event = StorageEvent(
|
452
491
|
id=event.id,
|
453
492
|
invocation_id=event.invocation_id,
|
454
493
|
author=event.author,
|
455
494
|
branch=event.branch,
|
456
|
-
content=encoded_content,
|
457
495
|
actions=event.actions,
|
458
496
|
session_id=session.id,
|
459
497
|
app_name=session.app_name,
|
460
498
|
user_id=session.user_id,
|
461
499
|
timestamp=datetime.fromtimestamp(event.timestamp),
|
500
|
+
long_running_tool_ids=event.long_running_tool_ids,
|
501
|
+
grounding_metadata=event.grounding_metadata,
|
502
|
+
partial=event.partial,
|
503
|
+
turn_complete=event.turn_complete,
|
504
|
+
error_code=event.error_code,
|
505
|
+
error_message=event.error_message,
|
506
|
+
interrupted=event.interrupted,
|
462
507
|
)
|
508
|
+
if event.content:
|
509
|
+
encoded_content = event.content.model_dump(exclude_none=True)
|
510
|
+
# Workaround for multimodal Content throwing JSON not serializable
|
511
|
+
# error with SQLAlchemy.
|
512
|
+
for p in encoded_content["parts"]:
|
513
|
+
if "inline_data" in p:
|
514
|
+
p["inline_data"]["data"] = (
|
515
|
+
base64.b64encode(p["inline_data"]["data"]).decode("utf-8"),
|
516
|
+
)
|
517
|
+
storage_event.content = encoded_content
|
463
518
|
|
464
519
|
sessionFactory.add(storage_event)
|
465
520
|
|
@@ -481,8 +536,7 @@ class DatabaseSessionService(BaseSessionService):
|
|
481
536
|
user_id: str,
|
482
537
|
session_id: str,
|
483
538
|
) -> ListEventsResponse:
|
484
|
-
|
485
|
-
|
539
|
+
raise NotImplementedError()
|
486
540
|
|
487
541
|
def convert_event(event: StorageEvent) -> Event:
|
488
542
|
"""Converts a storage event to an event."""
|
@@ -497,7 +551,7 @@ def convert_event(event: StorageEvent) -> Event:
|
|
497
551
|
)
|
498
552
|
|
499
553
|
|
500
|
-
def _extract_state_delta(state: dict):
|
554
|
+
def _extract_state_delta(state: dict[str, Any]):
|
501
555
|
app_state_delta = {}
|
502
556
|
user_state_delta = {}
|
503
557
|
session_state_delta = {}
|
@@ -520,3 +574,10 @@ def _merge_state(app_state, user_state, session_state):
|
|
520
574
|
for key in user_state.keys():
|
521
575
|
merged_state[State.USER_PREFIX + key] = user_state[key]
|
522
576
|
return merged_state
|
577
|
+
|
578
|
+
|
579
|
+
def _decode_content(content: dict[str, Any]) -> dict[str, Any]:
|
580
|
+
for p in content["parts"]:
|
581
|
+
if "inline_data" in p:
|
582
|
+
p["inline_data"]["data"] = base64.b64decode(p["inline_data"]["data"][0])
|
583
|
+
return content
|
google/adk/sessions/state.py
CHANGED
@@ -49,7 +49,7 @@ class State:
|
|
49
49
|
return key in self._value or key in self._delta
|
50
50
|
|
51
51
|
def has_delta(self) -> bool:
|
52
|
-
"""Whether the state has pending
|
52
|
+
"""Whether the state has pending delta."""
|
53
53
|
return bool(self._delta)
|
54
54
|
|
55
55
|
def get(self, key: str, default: Any = None) -> Any:
|
google/adk/telemetry.py
CHANGED
@@ -16,8 +16,8 @@
|
|
16
16
|
#
|
17
17
|
# We expect that the underlying GenAI SDK will provide a certain
|
18
18
|
# level of tracing and logging telemetry aligned with Open Telemetry
|
19
|
-
# Semantic Conventions (such as logging prompts,
|
20
|
-
# properties, etc.) and so the information that is recorded by the
|
19
|
+
# Semantic Conventions (such as logging prompts, responses,
|
20
|
+
# request properties, etc.) and so the information that is recorded by the
|
21
21
|
# Agent Development Kit should be focused on the higher-level
|
22
22
|
# constructs of the framework that are not observable by the SDK.
|
23
23
|
|
@@ -196,11 +196,12 @@ class IntegrationClient:
|
|
196
196
|
action_details = connections_client.get_action_schema(action)
|
197
197
|
input_schema = action_details["inputSchema"]
|
198
198
|
output_schema = action_details["outputSchema"]
|
199
|
-
|
199
|
+
# Remove spaces from the display name to generate valid spec
|
200
|
+
action_display_name = action_details["displayName"].replace(" ", "")
|
200
201
|
operation = "EXECUTE_ACTION"
|
201
202
|
if action == "ExecuteCustomQuery":
|
202
203
|
connector_spec["components"]["schemas"][
|
203
|
-
f"{
|
204
|
+
f"{action_display_name}_Request"
|
204
205
|
] = connections_client.execute_custom_query_request()
|
205
206
|
operation = "EXECUTE_QUERY"
|
206
207
|
else:
|
google/adk/tools/base_tool.py
CHANGED
@@ -53,7 +53,7 @@ def _raise_for_any_of_if_mldev(schema: types.Schema):
|
|
53
53
|
|
54
54
|
def _update_for_default_if_mldev(schema: types.Schema):
|
55
55
|
if schema.default is not None:
|
56
|
-
# TODO(kech): Remove this
|
56
|
+
# TODO(kech): Remove this workaround once mldev supports default value.
|
57
57
|
schema.default = None
|
58
58
|
logger.warning(
|
59
59
|
'Default value is not supported in function declaration schema for'
|
@@ -291,7 +291,7 @@ def _parse_schema_from_parameter(
|
|
291
291
|
return schema
|
292
292
|
raise ValueError(
|
293
293
|
f'Failed to parse the parameter {param} of function {func_name} for'
|
294
|
-
' automatic function calling.Automatic function calling works best with'
|
294
|
+
' automatic function calling. Automatic function calling works best with'
|
295
295
|
' simpler function signature schema,consider manually parse your'
|
296
296
|
f' function declaration for function {func_name}.'
|
297
297
|
)
|
@@ -11,4 +11,77 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
-
|
14
|
+
__all__ = [
|
15
|
+
'bigquery_tool_set',
|
16
|
+
'calendar_tool_set',
|
17
|
+
'gmail_tool_set',
|
18
|
+
'youtube_tool_set',
|
19
|
+
'slides_tool_set',
|
20
|
+
'sheets_tool_set',
|
21
|
+
'docs_tool_set',
|
22
|
+
]
|
23
|
+
|
24
|
+
# Nothing is imported here automatically
|
25
|
+
# Each tool set will only be imported when accessed
|
26
|
+
|
27
|
+
_bigquery_tool_set = None
|
28
|
+
_calendar_tool_set = None
|
29
|
+
_gmail_tool_set = None
|
30
|
+
_youtube_tool_set = None
|
31
|
+
_slides_tool_set = None
|
32
|
+
_sheets_tool_set = None
|
33
|
+
_docs_tool_set = None
|
34
|
+
|
35
|
+
|
36
|
+
def __getattr__(name):
|
37
|
+
global _bigquery_tool_set, _calendar_tool_set, _gmail_tool_set, _youtube_tool_set, _slides_tool_set, _sheets_tool_set, _docs_tool_set
|
38
|
+
|
39
|
+
match name:
|
40
|
+
case 'bigquery_tool_set':
|
41
|
+
if _bigquery_tool_set is None:
|
42
|
+
from .google_api_tool_sets import bigquery_tool_set as bigquery
|
43
|
+
|
44
|
+
_bigquery_tool_set = bigquery
|
45
|
+
return _bigquery_tool_set
|
46
|
+
|
47
|
+
case 'calendar_tool_set':
|
48
|
+
if _calendar_tool_set is None:
|
49
|
+
from .google_api_tool_sets import calendar_tool_set as calendar
|
50
|
+
|
51
|
+
_calendar_tool_set = calendar
|
52
|
+
return _calendar_tool_set
|
53
|
+
|
54
|
+
case 'gmail_tool_set':
|
55
|
+
if _gmail_tool_set is None:
|
56
|
+
from .google_api_tool_sets import gmail_tool_set as gmail
|
57
|
+
|
58
|
+
_gmail_tool_set = gmail
|
59
|
+
return _gmail_tool_set
|
60
|
+
|
61
|
+
case 'youtube_tool_set':
|
62
|
+
if _youtube_tool_set is None:
|
63
|
+
from .google_api_tool_sets import youtube_tool_set as youtube
|
64
|
+
|
65
|
+
_youtube_tool_set = youtube
|
66
|
+
return _youtube_tool_set
|
67
|
+
|
68
|
+
case 'slides_tool_set':
|
69
|
+
if _slides_tool_set is None:
|
70
|
+
from .google_api_tool_sets import slides_tool_set as slides
|
71
|
+
|
72
|
+
_slides_tool_set = slides
|
73
|
+
return _slides_tool_set
|
74
|
+
|
75
|
+
case 'sheets_tool_set':
|
76
|
+
if _sheets_tool_set is None:
|
77
|
+
from .google_api_tool_sets import sheets_tool_set as sheets
|
78
|
+
|
79
|
+
_sheets_tool_set = sheets
|
80
|
+
return _sheets_tool_set
|
81
|
+
|
82
|
+
case 'docs_tool_set':
|
83
|
+
if _docs_tool_set is None:
|
84
|
+
from .google_api_tool_sets import docs_tool_set as docs
|
85
|
+
|
86
|
+
_docs_tool_set = docs
|
87
|
+
return _docs_tool_set
|