google-adk 1.5.0__py3-none-any.whl → 1.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/adk/a2a/converters/event_converter.py +257 -36
- google/adk/a2a/converters/part_converter.py +93 -25
- google/adk/a2a/converters/request_converter.py +12 -32
- google/adk/a2a/converters/utils.py +22 -4
- google/adk/a2a/executor/__init__.py +13 -0
- google/adk/a2a/executor/a2a_agent_executor.py +260 -0
- google/adk/a2a/executor/task_result_aggregator.py +71 -0
- google/adk/a2a/logs/__init__.py +13 -0
- google/adk/a2a/logs/log_utils.py +349 -0
- google/adk/agents/base_agent.py +54 -0
- google/adk/agents/llm_agent.py +15 -0
- google/adk/agents/remote_a2a_agent.py +532 -0
- google/adk/artifacts/in_memory_artifact_service.py +6 -3
- google/adk/cli/browser/chunk-EQDQRRRY.js +1 -0
- google/adk/cli/browser/chunk-TXJFAAIW.js +2 -0
- google/adk/cli/browser/index.html +4 -3
- google/adk/cli/browser/main-RXDVX3K6.js +3914 -0
- google/adk/cli/browser/polyfills-FFHMD2TL.js +17 -0
- google/adk/cli/cli_deploy.py +4 -1
- google/adk/cli/cli_eval.py +8 -6
- google/adk/cli/cli_tools_click.py +30 -10
- google/adk/cli/fast_api.py +120 -5
- google/adk/cli/utils/agent_loader.py +12 -0
- google/adk/evaluation/agent_evaluator.py +107 -10
- google/adk/evaluation/base_eval_service.py +157 -0
- google/adk/evaluation/constants.py +20 -0
- google/adk/evaluation/eval_case.py +3 -3
- google/adk/evaluation/eval_metrics.py +39 -0
- google/adk/evaluation/evaluation_generator.py +1 -1
- google/adk/evaluation/final_response_match_v2.py +230 -0
- google/adk/evaluation/llm_as_judge.py +141 -0
- google/adk/evaluation/llm_as_judge_utils.py +48 -0
- google/adk/evaluation/metric_evaluator_registry.py +89 -0
- google/adk/evaluation/response_evaluator.py +38 -211
- google/adk/evaluation/safety_evaluator.py +54 -0
- google/adk/evaluation/trajectory_evaluator.py +16 -2
- google/adk/evaluation/vertex_ai_eval_facade.py +147 -0
- google/adk/events/event.py +2 -4
- google/adk/flows/llm_flows/base_llm_flow.py +2 -0
- google/adk/memory/in_memory_memory_service.py +3 -2
- google/adk/models/lite_llm.py +50 -10
- google/adk/runners.py +27 -10
- google/adk/sessions/database_session_service.py +25 -7
- google/adk/sessions/in_memory_session_service.py +5 -1
- google/adk/sessions/vertex_ai_session_service.py +67 -42
- google/adk/tools/bigquery/config.py +11 -1
- google/adk/tools/bigquery/query_tool.py +306 -12
- google/adk/tools/enterprise_search_tool.py +2 -2
- google/adk/tools/function_tool.py +7 -1
- google/adk/tools/google_search_tool.py +1 -1
- google/adk/tools/mcp_tool/mcp_session_manager.py +44 -30
- google/adk/tools/mcp_tool/mcp_tool.py +44 -7
- google/adk/version.py +1 -1
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/METADATA +6 -4
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/RECORD +58 -42
- google/adk/cli/browser/main-JAAWEV7F.js +0 -92
- google/adk/cli/browser/polyfills-B6TNHZQ6.js +0 -17
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/WHEEL +0 -0
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/entry_points.txt +0 -0
- {google_adk-1.5.0.dist-info → google_adk-1.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -20,10 +20,12 @@ from google.auth.credentials import Credentials
|
|
20
20
|
from google.cloud import bigquery
|
21
21
|
|
22
22
|
from . import client
|
23
|
+
from ..tool_context import ToolContext
|
23
24
|
from .config import BigQueryToolConfig
|
24
25
|
from .config import WriteMode
|
25
26
|
|
26
27
|
MAX_DOWNLOADED_QUERY_RESULT_ROWS = 50
|
28
|
+
BIGQUERY_SESSION_INFO_KEY = "bigquery_session_info"
|
27
29
|
|
28
30
|
|
29
31
|
def execute_sql(
|
@@ -31,14 +33,17 @@ def execute_sql(
|
|
31
33
|
query: str,
|
32
34
|
credentials: Credentials,
|
33
35
|
config: BigQueryToolConfig,
|
36
|
+
tool_context: ToolContext,
|
34
37
|
) -> dict:
|
35
|
-
"""Run a BigQuery SQL query in the project and return the result.
|
38
|
+
"""Run a BigQuery or BigQuery ML SQL query in the project and return the result.
|
36
39
|
|
37
40
|
Args:
|
38
41
|
project_id (str): The GCP project id in which the query should be
|
39
42
|
executed.
|
40
43
|
query (str): The BigQuery SQL query to be executed.
|
41
44
|
credentials (Credentials): The credentials to use for the request.
|
45
|
+
config (BigQueryToolConfig): The configuration for the tool.
|
46
|
+
tool_context (ToolContext): The context for the tool.
|
42
47
|
|
43
48
|
Returns:
|
44
49
|
dict: Dictionary representing the result of the query.
|
@@ -49,11 +54,11 @@ def execute_sql(
|
|
49
54
|
Examples:
|
50
55
|
Fetch data or insights from a table:
|
51
56
|
|
52
|
-
>>> execute_sql("
|
57
|
+
>>> execute_sql("my_project",
|
53
58
|
... "SELECT island, COUNT(*) AS population "
|
54
59
|
... "FROM bigquery-public-data.ml_datasets.penguins GROUP BY island")
|
55
60
|
{
|
56
|
-
"status": "
|
61
|
+
"status": "SUCCESS",
|
57
62
|
"rows": [
|
58
63
|
{
|
59
64
|
"island": "Dream",
|
@@ -72,23 +77,87 @@ def execute_sql(
|
|
72
77
|
"""
|
73
78
|
|
74
79
|
try:
|
80
|
+
# Get BigQuery client
|
75
81
|
bq_client = client.get_bigquery_client(
|
76
82
|
project=project_id, credentials=credentials
|
77
83
|
)
|
84
|
+
|
85
|
+
# BigQuery connection properties where applicable
|
86
|
+
bq_connection_properties = None
|
87
|
+
|
78
88
|
if not config or config.write_mode == WriteMode.BLOCKED:
|
79
|
-
|
89
|
+
dry_run_query_job = bq_client.query(
|
80
90
|
query,
|
81
91
|
project=project_id,
|
82
92
|
job_config=bigquery.QueryJobConfig(dry_run=True),
|
83
93
|
)
|
84
|
-
if
|
94
|
+
if dry_run_query_job.statement_type != "SELECT":
|
85
95
|
return {
|
86
96
|
"status": "ERROR",
|
87
97
|
"error_details": "Read-only mode only supports SELECT statements.",
|
88
98
|
}
|
99
|
+
elif config.write_mode == WriteMode.PROTECTED:
|
100
|
+
# In protected write mode, write operation only to a temporary artifact is
|
101
|
+
# allowed. This artifact must have been created in a BigQuery session. In
|
102
|
+
# such a scenario the session info (session id and the anonymous dataset
|
103
|
+
# containing the artifact) is persisted in the tool context.
|
104
|
+
bq_session_info = tool_context.state.get(BIGQUERY_SESSION_INFO_KEY, None)
|
105
|
+
if bq_session_info:
|
106
|
+
bq_session_id, bq_session_dataset_id = bq_session_info
|
107
|
+
else:
|
108
|
+
session_creator_job = bq_client.query(
|
109
|
+
"SELECT 1",
|
110
|
+
project=project_id,
|
111
|
+
job_config=bigquery.QueryJobConfig(
|
112
|
+
dry_run=True, create_session=True
|
113
|
+
),
|
114
|
+
)
|
115
|
+
bq_session_id = session_creator_job.session_info.session_id
|
116
|
+
bq_session_dataset_id = session_creator_job.destination.dataset_id
|
117
|
+
|
118
|
+
# Remember the BigQuery session info for subsequent queries
|
119
|
+
tool_context.state[BIGQUERY_SESSION_INFO_KEY] = (
|
120
|
+
bq_session_id,
|
121
|
+
bq_session_dataset_id,
|
122
|
+
)
|
123
|
+
|
124
|
+
# Session connection property will be set in the query execution
|
125
|
+
bq_connection_properties = [
|
126
|
+
bigquery.ConnectionProperty("session_id", bq_session_id)
|
127
|
+
]
|
128
|
+
|
129
|
+
# Check the query type w.r.t. the BigQuery session
|
130
|
+
dry_run_query_job = bq_client.query(
|
131
|
+
query,
|
132
|
+
project=project_id,
|
133
|
+
job_config=bigquery.QueryJobConfig(
|
134
|
+
dry_run=True,
|
135
|
+
connection_properties=bq_connection_properties,
|
136
|
+
),
|
137
|
+
)
|
138
|
+
if (
|
139
|
+
dry_run_query_job.statement_type != "SELECT"
|
140
|
+
and dry_run_query_job.destination.dataset_id != bq_session_dataset_id
|
141
|
+
):
|
142
|
+
return {
|
143
|
+
"status": "ERROR",
|
144
|
+
"error_details": (
|
145
|
+
"Protected write mode only supports SELECT statements, or write"
|
146
|
+
" operations in the anonymous dataset of a BigQuery session."
|
147
|
+
),
|
148
|
+
}
|
89
149
|
|
150
|
+
# Finally execute the query and fetch the result
|
151
|
+
job_config = (
|
152
|
+
bigquery.QueryJobConfig(connection_properties=bq_connection_properties)
|
153
|
+
if bq_connection_properties
|
154
|
+
else None
|
155
|
+
)
|
90
156
|
row_iterator = bq_client.query_and_wait(
|
91
|
-
query,
|
157
|
+
query,
|
158
|
+
job_config=job_config,
|
159
|
+
project=project_id,
|
160
|
+
max_results=MAX_DOWNLOADED_QUERY_RESULT_ROWS,
|
92
161
|
)
|
93
162
|
rows = [{key: val for key, val in row.items()} for row in row_iterator]
|
94
163
|
result = {"status": "SUCCESS", "rows": rows}
|
@@ -106,9 +175,29 @@ def execute_sql(
|
|
106
175
|
|
107
176
|
|
108
177
|
_execute_sql_write_examples = """
|
178
|
+
Create a table with schema prescribed:
|
179
|
+
|
180
|
+
>>> execute_sql("my_project",
|
181
|
+
... "CREATE TABLE my_project.my_dataset.my_table "
|
182
|
+
... "(island STRING, population INT64)")
|
183
|
+
{
|
184
|
+
"status": "SUCCESS",
|
185
|
+
"rows": []
|
186
|
+
}
|
187
|
+
|
188
|
+
Insert data into an existing table:
|
189
|
+
|
190
|
+
>>> execute_sql("my_project",
|
191
|
+
... "INSERT INTO my_project.my_dataset.my_table (island, population) "
|
192
|
+
... "VALUES ('Dream', 124), ('Biscoe', 168)")
|
193
|
+
{
|
194
|
+
"status": "SUCCESS",
|
195
|
+
"rows": []
|
196
|
+
}
|
197
|
+
|
109
198
|
Create a table from the result of a query:
|
110
199
|
|
111
|
-
>>> execute_sql("
|
200
|
+
>>> execute_sql("my_project",
|
112
201
|
... "CREATE TABLE my_project.my_dataset.my_table AS "
|
113
202
|
... "SELECT island, COUNT(*) AS population "
|
114
203
|
... "FROM bigquery-public-data.ml_datasets.penguins GROUP BY island")
|
@@ -119,7 +208,7 @@ _execute_sql_write_examples = """
|
|
119
208
|
|
120
209
|
Delete a table:
|
121
210
|
|
122
|
-
>>> execute_sql("
|
211
|
+
>>> execute_sql("my_project",
|
123
212
|
... "DROP TABLE my_project.my_dataset.my_table")
|
124
213
|
{
|
125
214
|
"status": "SUCCESS",
|
@@ -128,7 +217,7 @@ _execute_sql_write_examples = """
|
|
128
217
|
|
129
218
|
Copy a table to another table:
|
130
219
|
|
131
|
-
>>> execute_sql("
|
220
|
+
>>> execute_sql("my_project",
|
132
221
|
... "CREATE TABLE my_project.my_dataset.my_table_clone "
|
133
222
|
... "CLONE my_project.my_dataset.my_table")
|
134
223
|
{
|
@@ -139,7 +228,7 @@ _execute_sql_write_examples = """
|
|
139
228
|
Create a snapshot (a lightweight, read-optimized copy) of en existing
|
140
229
|
table:
|
141
230
|
|
142
|
-
>>> execute_sql("
|
231
|
+
>>> execute_sql("my_project",
|
143
232
|
... "CREATE SNAPSHOT TABLE my_project.my_dataset.my_table_snapshot "
|
144
233
|
... "CLONE my_project.my_dataset.my_table")
|
145
234
|
{
|
@@ -147,12 +236,214 @@ _execute_sql_write_examples = """
|
|
147
236
|
"rows": []
|
148
237
|
}
|
149
238
|
|
239
|
+
Create a BigQuery ML linear regression model:
|
240
|
+
|
241
|
+
>>> execute_sql("my_project",
|
242
|
+
... "CREATE MODEL `my_dataset.my_model` "
|
243
|
+
... "OPTIONS (model_type='linear_reg', input_label_cols=['body_mass_g']) AS "
|
244
|
+
... "SELECT * FROM `bigquery-public-data.ml_datasets.penguins` "
|
245
|
+
... "WHERE body_mass_g IS NOT NULL")
|
246
|
+
{
|
247
|
+
"status": "SUCCESS",
|
248
|
+
"rows": []
|
249
|
+
}
|
250
|
+
|
251
|
+
Evaluate BigQuery ML model:
|
252
|
+
|
253
|
+
>>> execute_sql("my_project",
|
254
|
+
... "SELECT * FROM ML.EVALUATE(MODEL `my_dataset.my_model`)")
|
255
|
+
{
|
256
|
+
"status": "SUCCESS",
|
257
|
+
"rows": [{'mean_absolute_error': 227.01223667447218,
|
258
|
+
'mean_squared_error': 81838.15989216768,
|
259
|
+
'mean_squared_log_error': 0.0050704473735013,
|
260
|
+
'median_absolute_error': 173.08081641661738,
|
261
|
+
'r2_score': 0.8723772534253441,
|
262
|
+
'explained_variance': 0.8723772534253442}]
|
263
|
+
}
|
264
|
+
|
265
|
+
Evaluate BigQuery ML model on custom data:
|
266
|
+
|
267
|
+
>>> execute_sql("my_project",
|
268
|
+
... "SELECT * FROM ML.EVALUATE(MODEL `my_dataset.my_model`, "
|
269
|
+
... "(SELECT * FROM `my_dataset.my_table`))")
|
270
|
+
{
|
271
|
+
"status": "SUCCESS",
|
272
|
+
"rows": [{'mean_absolute_error': 227.01223667447218,
|
273
|
+
'mean_squared_error': 81838.15989216768,
|
274
|
+
'mean_squared_log_error': 0.0050704473735013,
|
275
|
+
'median_absolute_error': 173.08081641661738,
|
276
|
+
'r2_score': 0.8723772534253441,
|
277
|
+
'explained_variance': 0.8723772534253442}]
|
278
|
+
}
|
279
|
+
|
280
|
+
Predict using BigQuery ML model:
|
281
|
+
|
282
|
+
>>> execute_sql("my_project",
|
283
|
+
... "SELECT * FROM ML.PREDICT(MODEL `my_dataset.my_model`, "
|
284
|
+
... "(SELECT * FROM `my_dataset.my_table`))")
|
285
|
+
{
|
286
|
+
"status": "SUCCESS",
|
287
|
+
"rows": [
|
288
|
+
{
|
289
|
+
"predicted_body_mass_g": "3380.9271650847013",
|
290
|
+
...
|
291
|
+
}, {
|
292
|
+
"predicted_body_mass_g": "3873.6072435386004",
|
293
|
+
...
|
294
|
+
},
|
295
|
+
...
|
296
|
+
]
|
297
|
+
}
|
298
|
+
|
299
|
+
Delete a BigQuery ML model:
|
300
|
+
|
301
|
+
>>> execute_sql("my_project", "DROP MODEL `my_dataset.my_model`")
|
302
|
+
{
|
303
|
+
"status": "SUCCESS",
|
304
|
+
"rows": []
|
305
|
+
}
|
306
|
+
|
150
307
|
Notes:
|
151
308
|
- If a destination table already exists, there are a few ways to overwrite
|
152
309
|
it:
|
153
310
|
- Use "CREATE OR REPLACE TABLE" instead of "CREATE TABLE".
|
154
311
|
- First run "DROP TABLE", followed by "CREATE TABLE".
|
155
|
-
-
|
312
|
+
- If a model already exists, there are a few ways to overwrite it:
|
313
|
+
- Use "CREATE OR REPLACE MODEL" instead of "CREATE MODEL".
|
314
|
+
- First run "DROP MODEL", followed by "CREATE MODEL".
|
315
|
+
"""
|
316
|
+
|
317
|
+
|
318
|
+
_execute_sql_protecetd_write_examples = """
|
319
|
+
Create a temporary table with schema prescribed:
|
320
|
+
|
321
|
+
>>> execute_sql("my_project",
|
322
|
+
... "CREATE TEMP TABLE my_table (island STRING, population INT64)")
|
323
|
+
{
|
324
|
+
"status": "SUCCESS",
|
325
|
+
"rows": []
|
326
|
+
}
|
327
|
+
|
328
|
+
Insert data into an existing temporary table:
|
329
|
+
|
330
|
+
>>> execute_sql("my_project",
|
331
|
+
... "INSERT INTO my_table (island, population) "
|
332
|
+
... "VALUES ('Dream', 124), ('Biscoe', 168)")
|
333
|
+
{
|
334
|
+
"status": "SUCCESS",
|
335
|
+
"rows": []
|
336
|
+
}
|
337
|
+
|
338
|
+
Create a temporary table from the result of a query:
|
339
|
+
|
340
|
+
>>> execute_sql("my_project",
|
341
|
+
... "CREATE TEMP TABLE my_table AS "
|
342
|
+
... "SELECT island, COUNT(*) AS population "
|
343
|
+
... "FROM bigquery-public-data.ml_datasets.penguins GROUP BY island")
|
344
|
+
{
|
345
|
+
"status": "SUCCESS",
|
346
|
+
"rows": []
|
347
|
+
}
|
348
|
+
|
349
|
+
Delete a temporary table:
|
350
|
+
|
351
|
+
>>> execute_sql("my_project", "DROP TABLE my_table")
|
352
|
+
{
|
353
|
+
"status": "SUCCESS",
|
354
|
+
"rows": []
|
355
|
+
}
|
356
|
+
|
357
|
+
Copy a temporary table to another temporary table:
|
358
|
+
|
359
|
+
>>> execute_sql("my_project",
|
360
|
+
... "CREATE TEMP TABLE my_table_clone CLONE my_table")
|
361
|
+
{
|
362
|
+
"status": "SUCCESS",
|
363
|
+
"rows": []
|
364
|
+
}
|
365
|
+
|
366
|
+
Create a temporary BigQuery ML linear regression model:
|
367
|
+
|
368
|
+
>>> execute_sql("my_project",
|
369
|
+
... "CREATE TEMP MODEL my_model "
|
370
|
+
... "OPTIONS (model_type='linear_reg', input_label_cols=['body_mass_g']) AS"
|
371
|
+
... "SELECT * FROM `bigquery-public-data.ml_datasets.penguins` "
|
372
|
+
... "WHERE body_mass_g IS NOT NULL")
|
373
|
+
{
|
374
|
+
"status": "SUCCESS",
|
375
|
+
"rows": []
|
376
|
+
}
|
377
|
+
|
378
|
+
Evaluate BigQuery ML model:
|
379
|
+
|
380
|
+
>>> execute_sql("my_project", "SELECT * FROM ML.EVALUATE(MODEL my_model)")
|
381
|
+
{
|
382
|
+
"status": "SUCCESS",
|
383
|
+
"rows": [{'mean_absolute_error': 227.01223667447218,
|
384
|
+
'mean_squared_error': 81838.15989216768,
|
385
|
+
'mean_squared_log_error': 0.0050704473735013,
|
386
|
+
'median_absolute_error': 173.08081641661738,
|
387
|
+
'r2_score': 0.8723772534253441,
|
388
|
+
'explained_variance': 0.8723772534253442}]
|
389
|
+
}
|
390
|
+
|
391
|
+
Evaluate BigQuery ML model on custom data:
|
392
|
+
|
393
|
+
>>> execute_sql("my_project",
|
394
|
+
... "SELECT * FROM ML.EVALUATE(MODEL my_model, "
|
395
|
+
... "(SELECT * FROM `my_dataset.my_table`))")
|
396
|
+
{
|
397
|
+
"status": "SUCCESS",
|
398
|
+
"rows": [{'mean_absolute_error': 227.01223667447218,
|
399
|
+
'mean_squared_error': 81838.15989216768,
|
400
|
+
'mean_squared_log_error': 0.0050704473735013,
|
401
|
+
'median_absolute_error': 173.08081641661738,
|
402
|
+
'r2_score': 0.8723772534253441,
|
403
|
+
'explained_variance': 0.8723772534253442}]
|
404
|
+
}
|
405
|
+
|
406
|
+
Predict using BigQuery ML model:
|
407
|
+
|
408
|
+
>>> execute_sql("my_project",
|
409
|
+
... "SELECT * FROM ML.PREDICT(MODEL my_model, "
|
410
|
+
... "(SELECT * FROM `my_dataset.my_table`))")
|
411
|
+
{
|
412
|
+
"status": "SUCCESS",
|
413
|
+
"rows": [
|
414
|
+
{
|
415
|
+
"predicted_body_mass_g": "3380.9271650847013",
|
416
|
+
...
|
417
|
+
}, {
|
418
|
+
"predicted_body_mass_g": "3873.6072435386004",
|
419
|
+
...
|
420
|
+
},
|
421
|
+
...
|
422
|
+
]
|
423
|
+
}
|
424
|
+
|
425
|
+
Delete a BigQuery ML model:
|
426
|
+
|
427
|
+
>>> execute_sql("my_project", "DROP MODEL my_model")
|
428
|
+
{
|
429
|
+
"status": "SUCCESS",
|
430
|
+
"rows": []
|
431
|
+
}
|
432
|
+
|
433
|
+
Notes:
|
434
|
+
- If a destination table already exists, there are a few ways to overwrite
|
435
|
+
it:
|
436
|
+
- Use "CREATE OR REPLACE TEMP TABLE" instead of "CREATE TEMP TABLE".
|
437
|
+
- First run "DROP TABLE", followed by "CREATE TEMP TABLE".
|
438
|
+
- Only temporary tables can be created, inserted into or deleted. Please
|
439
|
+
do not try creating a permanent table (non-TEMP table), inserting into or
|
440
|
+
deleting one.
|
441
|
+
- If a destination model already exists, there are a few ways to overwrite
|
442
|
+
it:
|
443
|
+
- Use "CREATE OR REPLACE TEMP MODEL" instead of "CREATE TEMP MODEL".
|
444
|
+
- First run "DROP MODEL", followed by "CREATE TEMP MODEL".
|
445
|
+
- Only temporary models can be created or deleted. Please do not try
|
446
|
+
creating a permanent model (non-TEMP model) or deleting one.
|
156
447
|
"""
|
157
448
|
|
158
449
|
|
@@ -189,6 +480,9 @@ def get_execute_sql(config: BigQueryToolConfig) -> Callable[..., dict]:
|
|
189
480
|
functools.update_wrapper(execute_sql_wrapper, execute_sql)
|
190
481
|
|
191
482
|
# Now, set the new docstring
|
192
|
-
|
483
|
+
if config.write_mode == WriteMode.PROTECTED:
|
484
|
+
execute_sql_wrapper.__doc__ += _execute_sql_protecetd_write_examples
|
485
|
+
else:
|
486
|
+
execute_sql_wrapper.__doc__ += _execute_sql_write_examples
|
193
487
|
|
194
488
|
return execute_sql_wrapper
|
@@ -47,8 +47,8 @@ class EnterpriseWebSearchTool(BaseTool):
|
|
47
47
|
tool_context: ToolContext,
|
48
48
|
llm_request: LlmRequest,
|
49
49
|
) -> None:
|
50
|
-
if llm_request.model and
|
51
|
-
if
|
50
|
+
if llm_request.model and 'gemini-' in llm_request.model:
|
51
|
+
if 'gemini-1' in llm_request.model and llm_request.config.tools:
|
52
52
|
raise ValueError(
|
53
53
|
'Enterprise web search tool can not be used with other tools in'
|
54
54
|
' Gemini 1.x.'
|
@@ -12,6 +12,8 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
+
from __future__ import annotations
|
16
|
+
|
15
17
|
import inspect
|
16
18
|
from typing import Any
|
17
19
|
from typing import Callable
|
@@ -79,9 +81,13 @@ class FunctionTool(BaseTool):
|
|
79
81
|
) -> Any:
|
80
82
|
args_to_call = args.copy()
|
81
83
|
signature = inspect.signature(self.func)
|
82
|
-
|
84
|
+
valid_params = {param for param in signature.parameters}
|
85
|
+
if 'tool_context' in valid_params:
|
83
86
|
args_to_call['tool_context'] = tool_context
|
84
87
|
|
88
|
+
# Filter args_to_call to only include valid parameters for the function
|
89
|
+
args_to_call = {k: v for k, v in args_to_call.items() if k in valid_params}
|
90
|
+
|
85
91
|
# Before invoking the function, we check for if the list of args passed in
|
86
92
|
# has all the mandatory arguments or not.
|
87
93
|
# If the check fails, then we don't invoke the tool and let the Agent know
|
@@ -54,7 +54,7 @@ class GoogleSearchTool(BaseTool):
|
|
54
54
|
llm_request.config.tools.append(
|
55
55
|
types.Tool(google_search_retrieval=types.GoogleSearchRetrieval())
|
56
56
|
)
|
57
|
-
elif llm_request.model and 'gemini-
|
57
|
+
elif llm_request.model and 'gemini-' in llm_request.model:
|
58
58
|
llm_request.config.tools.append(
|
59
59
|
types.Tool(google_search=types.GoogleSearch())
|
60
60
|
)
|
@@ -251,6 +251,49 @@ class MCPSessionManager:
|
|
251
251
|
"""
|
252
252
|
return session._read_stream._closed or session._write_stream._closed
|
253
253
|
|
254
|
+
def _create_client(self, merged_headers: Optional[Dict[str, str]] = None):
|
255
|
+
"""Creates an MCP client based on the connection parameters.
|
256
|
+
|
257
|
+
Args:
|
258
|
+
merged_headers: Optional headers to include in the connection.
|
259
|
+
Only applicable for SSE and StreamableHTTP connections.
|
260
|
+
|
261
|
+
Returns:
|
262
|
+
The appropriate MCP client instance.
|
263
|
+
|
264
|
+
Raises:
|
265
|
+
ValueError: If the connection parameters are not supported.
|
266
|
+
"""
|
267
|
+
if isinstance(self._connection_params, StdioConnectionParams):
|
268
|
+
client = stdio_client(
|
269
|
+
server=self._connection_params.server_params,
|
270
|
+
errlog=self._errlog,
|
271
|
+
)
|
272
|
+
elif isinstance(self._connection_params, SseConnectionParams):
|
273
|
+
client = sse_client(
|
274
|
+
url=self._connection_params.url,
|
275
|
+
headers=merged_headers,
|
276
|
+
timeout=self._connection_params.timeout,
|
277
|
+
sse_read_timeout=self._connection_params.sse_read_timeout,
|
278
|
+
)
|
279
|
+
elif isinstance(self._connection_params, StreamableHTTPConnectionParams):
|
280
|
+
client = streamablehttp_client(
|
281
|
+
url=self._connection_params.url,
|
282
|
+
headers=merged_headers,
|
283
|
+
timeout=timedelta(seconds=self._connection_params.timeout),
|
284
|
+
sse_read_timeout=timedelta(
|
285
|
+
seconds=self._connection_params.sse_read_timeout
|
286
|
+
),
|
287
|
+
terminate_on_close=self._connection_params.terminate_on_close,
|
288
|
+
)
|
289
|
+
else:
|
290
|
+
raise ValueError(
|
291
|
+
'Unable to initialize connection. Connection should be'
|
292
|
+
' StdioServerParameters or SseServerParams, but got'
|
293
|
+
f' {self._connection_params}'
|
294
|
+
)
|
295
|
+
return client
|
296
|
+
|
254
297
|
async def create_session(
|
255
298
|
self, headers: Optional[Dict[str, str]] = None
|
256
299
|
) -> ClientSession:
|
@@ -298,36 +341,7 @@ class MCPSessionManager:
|
|
298
341
|
exit_stack = AsyncExitStack()
|
299
342
|
|
300
343
|
try:
|
301
|
-
|
302
|
-
client = stdio_client(
|
303
|
-
server=self._connection_params.server_params,
|
304
|
-
errlog=self._errlog,
|
305
|
-
)
|
306
|
-
elif isinstance(self._connection_params, SseConnectionParams):
|
307
|
-
client = sse_client(
|
308
|
-
url=self._connection_params.url,
|
309
|
-
headers=merged_headers,
|
310
|
-
timeout=self._connection_params.timeout,
|
311
|
-
sse_read_timeout=self._connection_params.sse_read_timeout,
|
312
|
-
)
|
313
|
-
elif isinstance(
|
314
|
-
self._connection_params, StreamableHTTPConnectionParams
|
315
|
-
):
|
316
|
-
client = streamablehttp_client(
|
317
|
-
url=self._connection_params.url,
|
318
|
-
headers=merged_headers,
|
319
|
-
timeout=timedelta(seconds=self._connection_params.timeout),
|
320
|
-
sse_read_timeout=timedelta(
|
321
|
-
seconds=self._connection_params.sse_read_timeout
|
322
|
-
),
|
323
|
-
terminate_on_close=self._connection_params.terminate_on_close,
|
324
|
-
)
|
325
|
-
else:
|
326
|
-
raise ValueError(
|
327
|
-
'Unable to initialize connection. Connection should be'
|
328
|
-
' StdioServerParameters or SseServerParams, but got'
|
329
|
-
f' {self._connection_params}'
|
330
|
-
)
|
344
|
+
client = self._create_client(merged_headers)
|
331
345
|
|
332
346
|
transports = await exit_stack.enter_async_context(client)
|
333
347
|
# The streamable http client returns a GetSessionCallback in addition to the read/write MemoryObjectStreams
|
@@ -15,12 +15,11 @@
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
17
|
import base64
|
18
|
-
import json
|
19
18
|
import logging
|
20
19
|
from typing import Optional
|
21
20
|
|
21
|
+
from fastapi.openapi.models import APIKeyIn
|
22
22
|
from google.genai.types import FunctionDeclaration
|
23
|
-
from google.oauth2.credentials import Credentials
|
24
23
|
from typing_extensions import override
|
25
24
|
|
26
25
|
from .._gemini_schema_util import _to_gemini_schema
|
@@ -58,6 +57,9 @@ class MCPTool(BaseAuthenticatedTool):
|
|
58
57
|
|
59
58
|
Internally, the tool initializes from a MCP Tool, and uses the MCP Session to
|
60
59
|
call the tool.
|
60
|
+
|
61
|
+
Note: For API key authentication, only header-based API keys are supported.
|
62
|
+
Query and cookie-based API keys will result in authentication errors.
|
61
63
|
"""
|
62
64
|
|
63
65
|
def __init__(
|
@@ -134,7 +136,19 @@ class MCPTool(BaseAuthenticatedTool):
|
|
134
136
|
async def _get_headers(
|
135
137
|
self, tool_context: ToolContext, credential: AuthCredential
|
136
138
|
) -> Optional[dict[str, str]]:
|
137
|
-
headers
|
139
|
+
"""Extracts authentication headers from credentials.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
tool_context: The tool context of the current invocation.
|
143
|
+
credential: The authentication credential to process.
|
144
|
+
|
145
|
+
Returns:
|
146
|
+
Dictionary of headers to add to the request, or None if no auth.
|
147
|
+
|
148
|
+
Raises:
|
149
|
+
ValueError: If API key authentication is configured for non-header location.
|
150
|
+
"""
|
151
|
+
headers: Optional[dict[str, str]] = None
|
138
152
|
if credential:
|
139
153
|
if credential.oauth2:
|
140
154
|
headers = {"Authorization": f"Bearer {credential.oauth2.access_token}"}
|
@@ -167,10 +181,33 @@ class MCPTool(BaseAuthenticatedTool):
|
|
167
181
|
)
|
168
182
|
}
|
169
183
|
elif credential.api_key:
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
184
|
+
if (
|
185
|
+
not self._credentials_manager
|
186
|
+
or not self._credentials_manager._auth_config
|
187
|
+
):
|
188
|
+
error_msg = (
|
189
|
+
"Cannot find corresponding auth scheme for API key credential"
|
190
|
+
f" {credential}"
|
191
|
+
)
|
192
|
+
logger.error(error_msg)
|
193
|
+
raise ValueError(error_msg)
|
194
|
+
elif (
|
195
|
+
self._credentials_manager._auth_config.auth_scheme.in_
|
196
|
+
!= APIKeyIn.header
|
197
|
+
):
|
198
|
+
error_msg = (
|
199
|
+
"MCPTool only supports header-based API key authentication."
|
200
|
+
" Configured location:"
|
201
|
+
f" {self._credentials_manager._auth_config.auth_scheme.in_}"
|
202
|
+
)
|
203
|
+
logger.error(error_msg)
|
204
|
+
raise ValueError(error_msg)
|
205
|
+
else:
|
206
|
+
headers = {
|
207
|
+
self._credentials_manager._auth_config.auth_scheme.name: (
|
208
|
+
credential.api_key
|
209
|
+
)
|
210
|
+
}
|
174
211
|
elif credential.service_account:
|
175
212
|
# Service accounts should be exchanged for access tokens before reaching this point
|
176
213
|
logger.warning(
|
google/adk/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-adk
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.6.1
|
4
4
|
Summary: Agent Development Kit
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
Requires-Python: >=3.9
|
@@ -41,9 +41,11 @@ Requires-Dist: python-dotenv>=1.0.0
|
|
41
41
|
Requires-Dist: requests>=2.32.4
|
42
42
|
Requires-Dist: sqlalchemy>=2.0
|
43
43
|
Requires-Dist: starlette>=0.46.2
|
44
|
+
Requires-Dist: tenacity>=8.0.0
|
44
45
|
Requires-Dist: typing-extensions>=4.5, <5
|
45
46
|
Requires-Dist: tzlocal>=5.3
|
46
47
|
Requires-Dist: uvicorn>=0.34.0
|
48
|
+
Requires-Dist: watchdog>=6.0.0
|
47
49
|
Requires-Dist: websockets>=15.0.1
|
48
50
|
Requires-Dist: a2a-sdk>=0.2.7 ; extra == "a2a" and (python_version>='3.10')
|
49
51
|
Requires-Dist: flit>=3.10.0 ; extra == "dev"
|
@@ -57,7 +59,7 @@ Requires-Dist: myst-parser ; extra == "docs"
|
|
57
59
|
Requires-Dist: sphinx ; extra == "docs"
|
58
60
|
Requires-Dist: sphinx-autodoc-typehints ; extra == "docs"
|
59
61
|
Requires-Dist: sphinx-rtd-theme ; extra == "docs"
|
60
|
-
Requires-Dist: google-cloud-aiplatform[evaluation]>=1.
|
62
|
+
Requires-Dist: google-cloud-aiplatform[evaluation]>=1.100.0 ; extra == "eval"
|
61
63
|
Requires-Dist: pandas>=2.2.3 ; extra == "eval"
|
62
64
|
Requires-Dist: tabulate>=0.9.0 ; extra == "eval"
|
63
65
|
Requires-Dist: rouge-score>=0.1.2 ; extra == "eval"
|
@@ -72,7 +74,7 @@ Requires-Dist: lxml>=5.3.0 ; extra == "extensions"
|
|
72
74
|
Requires-Dist: toolbox-core>=0.1.0 ; extra == "extensions"
|
73
75
|
Requires-Dist: anthropic>=0.43.0 ; extra == "test"
|
74
76
|
Requires-Dist: langchain-community>=0.3.17 ; extra == "test"
|
75
|
-
Requires-Dist: langgraph>=0.2.60 ; extra == "test"
|
77
|
+
Requires-Dist: langgraph>=0.2.60, <= 0.4.10 ; extra == "test"
|
76
78
|
Requires-Dist: litellm>=1.71.2 ; extra == "test"
|
77
79
|
Requires-Dist: llama-index-readers-file>=0.4.0 ; extra == "test"
|
78
80
|
Requires-Dist: pytest-asyncio>=0.25.0 ; extra == "test"
|
@@ -137,7 +139,7 @@ Agent Development Kit (ADK) is a flexible and modular framework for developing a
|
|
137
139
|
|
138
140
|
For remote agent-to-agent communication, ADK integrates with the
|
139
141
|
[A2A protocol](https://github.com/google-a2a/A2A/).
|
140
|
-
See this [example](https://github.com/
|
142
|
+
See this [example](https://github.com/a2aproject/a2a-samples/tree/main/samples/python/agents)
|
141
143
|
for how they can work together.
|
142
144
|
|
143
145
|
## 🚀 Installation
|