kagent-adk 0.7.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kagent/adk/__init__.py +8 -0
- kagent/adk/_a2a.py +178 -0
- kagent/adk/_agent_executor.py +335 -0
- kagent/adk/_lifespan.py +36 -0
- kagent/adk/_session_service.py +178 -0
- kagent/adk/_token.py +80 -0
- kagent/adk/artifacts/__init__.py +13 -0
- kagent/adk/artifacts/artifacts_toolset.py +56 -0
- kagent/adk/artifacts/return_artifacts_tool.py +160 -0
- kagent/adk/artifacts/session_path.py +106 -0
- kagent/adk/artifacts/stage_artifacts_tool.py +170 -0
- kagent/adk/cli.py +249 -0
- kagent/adk/converters/__init__.py +0 -0
- kagent/adk/converters/error_mappings.py +60 -0
- kagent/adk/converters/event_converter.py +322 -0
- kagent/adk/converters/part_converter.py +206 -0
- kagent/adk/converters/request_converter.py +35 -0
- kagent/adk/models/__init__.py +3 -0
- kagent/adk/models/_openai.py +564 -0
- kagent/adk/models/_ssl.py +245 -0
- kagent/adk/sandbox_code_executer.py +77 -0
- kagent/adk/skill_fetcher.py +103 -0
- kagent/adk/tools/README.md +217 -0
- kagent/adk/tools/__init__.py +15 -0
- kagent/adk/tools/bash_tool.py +74 -0
- kagent/adk/tools/file_tools.py +192 -0
- kagent/adk/tools/skill_tool.py +104 -0
- kagent/adk/tools/skills_plugin.py +49 -0
- kagent/adk/tools/skills_toolset.py +68 -0
- kagent/adk/types.py +268 -0
- kagent_adk-0.7.11.dist-info/METADATA +35 -0
- kagent_adk-0.7.11.dist-info/RECORD +34 -0
- kagent_adk-0.7.11.dist-info/WHEEL +4 -0
- kagent_adk-0.7.11.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
"""SSL/TLS utilities for configuring httpx clients with custom certificates."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import ssl
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_ssl_troubleshooting_message(
|
|
12
|
+
error: Exception, ca_cert_path: str | None = None, server_url: str | None = None
|
|
13
|
+
) -> str:
|
|
14
|
+
"""Generate actionable troubleshooting message for SSL errors.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
error: The original SSL error.
|
|
18
|
+
ca_cert_path: Path to custom CA certificate if one was configured.
|
|
19
|
+
server_url: URL of the server that was being accessed.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Formatted troubleshooting message with specific debugging steps.
|
|
23
|
+
"""
|
|
24
|
+
troubleshooting_steps = [
|
|
25
|
+
"\n" + "=" * 70,
|
|
26
|
+
"SSL/TLS Connection Error",
|
|
27
|
+
"=" * 70,
|
|
28
|
+
f"Error: {error}",
|
|
29
|
+
"",
|
|
30
|
+
"Troubleshooting Steps:",
|
|
31
|
+
"",
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
if ca_cert_path:
|
|
35
|
+
troubleshooting_steps.extend(
|
|
36
|
+
[
|
|
37
|
+
"1. Verify the CA certificate is correctly mounted:",
|
|
38
|
+
f" kubectl exec <pod-name> -- cat {ca_cert_path}",
|
|
39
|
+
"",
|
|
40
|
+
"2. Inspect the certificate details:",
|
|
41
|
+
f" kubectl exec <pod-name> -- openssl x509 -in {ca_cert_path} -text -noout",
|
|
42
|
+
"",
|
|
43
|
+
"3. Check the certificate validity period:",
|
|
44
|
+
f" kubectl exec <pod-name> -- openssl x509 -in {ca_cert_path} -noout -dates",
|
|
45
|
+
"",
|
|
46
|
+
]
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
if server_url:
|
|
50
|
+
troubleshooting_steps.extend(
|
|
51
|
+
[
|
|
52
|
+
"4. Test the server certificate chain:",
|
|
53
|
+
f" openssl s_client -connect {server_url} -showcerts",
|
|
54
|
+
"",
|
|
55
|
+
"5. Verify the server certificate is signed by your CA:",
|
|
56
|
+
f" openssl s_client -connect {server_url} -CAfile {ca_cert_path or '<ca-file>'} -verify 5",
|
|
57
|
+
"",
|
|
58
|
+
]
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
troubleshooting_steps.extend(
|
|
62
|
+
[
|
|
63
|
+
"6. Check Kubernetes Secret contents:",
|
|
64
|
+
" kubectl get secret <secret-name> -o yaml",
|
|
65
|
+
" # Verify the certificate data is base64-encoded PEM format",
|
|
66
|
+
"",
|
|
67
|
+
"7. Verify the ModelConfig TLS configuration:",
|
|
68
|
+
" kubectl get modelconfig <name> -o yaml",
|
|
69
|
+
" # Check spec.tls.caCertSecretRef and spec.tls.caCertSecretKey",
|
|
70
|
+
"",
|
|
71
|
+
"For more information, see:",
|
|
72
|
+
" https://kagent.dev/docs",
|
|
73
|
+
"=" * 70,
|
|
74
|
+
]
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return "\n".join(troubleshooting_steps)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def validate_certificate(cert_path: str) -> None:
|
|
81
|
+
"""Validate certificate format and log metadata (warnings only, non-blocking).
|
|
82
|
+
|
|
83
|
+
This function attempts to parse the certificate file and log useful metadata
|
|
84
|
+
including subject, serial number, and validity period. Validation issues are
|
|
85
|
+
logged as warnings but do not prevent the certificate from being loaded.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
cert_path: Path to the certificate file in PEM format.
|
|
89
|
+
|
|
90
|
+
Note:
|
|
91
|
+
This function requires the 'cryptography' library. If not available,
|
|
92
|
+
validation is skipped with an info log message.
|
|
93
|
+
"""
|
|
94
|
+
try:
|
|
95
|
+
from cryptography import x509
|
|
96
|
+
from cryptography.hazmat.backends import default_backend
|
|
97
|
+
except ImportError:
|
|
98
|
+
logger.info(
|
|
99
|
+
"cryptography library not available - skipping certificate validation. "
|
|
100
|
+
"Install with: pip install cryptography"
|
|
101
|
+
)
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
with open(cert_path, "rb") as f:
|
|
106
|
+
cert_data = f.read()
|
|
107
|
+
cert = x509.load_pem_x509_certificate(cert_data, default_backend())
|
|
108
|
+
|
|
109
|
+
# Log certificate metadata
|
|
110
|
+
logger.info("Certificate subject: %s", cert.subject.rfc4514_string())
|
|
111
|
+
logger.info("Certificate serial number: %s", hex(cert.serial_number))
|
|
112
|
+
logger.info(
|
|
113
|
+
"Certificate valid from %s to %s",
|
|
114
|
+
cert.not_valid_before_utc,
|
|
115
|
+
cert.not_valid_after_utc,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Warn about expiry (non-blocking)
|
|
119
|
+
now = datetime.now(timezone.utc)
|
|
120
|
+
if cert.not_valid_after_utc < now:
|
|
121
|
+
logger.warning(
|
|
122
|
+
"Certificate has EXPIRED on %s. Please update the certificate Secret.",
|
|
123
|
+
cert.not_valid_after_utc,
|
|
124
|
+
)
|
|
125
|
+
elif cert.not_valid_before_utc > now:
|
|
126
|
+
logger.warning(
|
|
127
|
+
"Certificate is not yet valid until %s. Check system clock or certificate validity period.",
|
|
128
|
+
cert.not_valid_before_utc,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
except Exception as e:
|
|
132
|
+
logger.warning(
|
|
133
|
+
"Could not validate certificate format at %s: %s. Certificate will still be loaded, but may be invalid.",
|
|
134
|
+
cert_path,
|
|
135
|
+
e,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def create_ssl_context(
|
|
140
|
+
disable_verify: bool,
|
|
141
|
+
ca_cert_path: str | None,
|
|
142
|
+
disable_system_cas: bool,
|
|
143
|
+
) -> ssl.SSLContext | bool:
|
|
144
|
+
"""Create SSL context for httpx client based on TLS configuration.
|
|
145
|
+
|
|
146
|
+
This function creates an appropriate SSL context based on three possible modes:
|
|
147
|
+
1. Verification disabled: Returns False (httpx accepts False to disable verification)
|
|
148
|
+
2. Custom CA only: Creates SSL context with custom CA certificate, no system CAs
|
|
149
|
+
3. System + Custom CA: Creates SSL context with system CAs plus custom CA certificate
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
disable_verify: If True, SSL verification is disabled (development/testing only).
|
|
153
|
+
When True, a prominent warning is logged.
|
|
154
|
+
ca_cert_path: Optional path to custom CA certificate file in PEM format.
|
|
155
|
+
If provided, the certificate is loaded into the SSL context.
|
|
156
|
+
disable_system_cas: If True, system CA certificates are NOT included in the trust store.
|
|
157
|
+
When False (default), system CAs are used (safe behavior).
|
|
158
|
+
When True with ca_cert_path, only the custom CA is trusted.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
- False if disable_verify=True (httpx special value to disable verification)
|
|
162
|
+
- ssl.SSLContext configured with appropriate CA certificates otherwise
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
FileNotFoundError: If ca_cert_path is provided but file does not exist.
|
|
166
|
+
ssl.SSLError: If certificate file is invalid or cannot be loaded.
|
|
167
|
+
|
|
168
|
+
Examples:
|
|
169
|
+
>>> # Disable verification (development only)
|
|
170
|
+
>>> ctx = create_ssl_context(disable_verify=True, ca_cert_path=None, disable_system_cas=False)
|
|
171
|
+
>>> assert ctx is False
|
|
172
|
+
|
|
173
|
+
>>> # Use only custom CA certificate
|
|
174
|
+
>>> ctx = create_ssl_context(
|
|
175
|
+
... disable_verify=False, ca_cert_path="/etc/ssl/certs/custom/ca.crt", disable_system_cas=True
|
|
176
|
+
... )
|
|
177
|
+
>>> assert isinstance(ctx, ssl.SSLContext)
|
|
178
|
+
|
|
179
|
+
>>> # Use system CAs plus custom CA
|
|
180
|
+
>>> ctx = create_ssl_context(
|
|
181
|
+
... disable_verify=False, ca_cert_path="/etc/ssl/certs/custom/ca.crt", disable_system_cas=False
|
|
182
|
+
... )
|
|
183
|
+
>>> assert isinstance(ctx, ssl.SSLContext)
|
|
184
|
+
"""
|
|
185
|
+
# Structured logging for TLS configuration at startup
|
|
186
|
+
if disable_verify:
|
|
187
|
+
logger.warning(
|
|
188
|
+
"\n"
|
|
189
|
+
"=" * 60 + "\n"
|
|
190
|
+
"⚠️ SSL VERIFICATION DISABLED ⚠️\n"
|
|
191
|
+
"=" * 60 + "\n"
|
|
192
|
+
"SSL certificate verification is disabled.\n"
|
|
193
|
+
"This should ONLY be used in development/testing.\n"
|
|
194
|
+
"Production deployments MUST use proper certificates.\n"
|
|
195
|
+
"=" * 60
|
|
196
|
+
)
|
|
197
|
+
logger.info("TLS Mode: Disabled (disable_verify=True)")
|
|
198
|
+
return False # httpx accepts False to disable verification
|
|
199
|
+
|
|
200
|
+
# Determine TLS mode
|
|
201
|
+
if ca_cert_path and not disable_system_cas:
|
|
202
|
+
tls_mode = "Custom CA + System CAs (additive)"
|
|
203
|
+
elif ca_cert_path:
|
|
204
|
+
tls_mode = "Custom CA only (no system CAs)"
|
|
205
|
+
else:
|
|
206
|
+
tls_mode = "System CAs only (default)"
|
|
207
|
+
|
|
208
|
+
logger.info("TLS Mode: %s", tls_mode)
|
|
209
|
+
|
|
210
|
+
# Start with system CAs or empty context
|
|
211
|
+
if not disable_system_cas:
|
|
212
|
+
# Create default context which includes system CAs
|
|
213
|
+
ctx = ssl.create_default_context()
|
|
214
|
+
logger.info("Using system CA certificates")
|
|
215
|
+
else:
|
|
216
|
+
# Create empty context without system CAs
|
|
217
|
+
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
|
218
|
+
ctx.check_hostname = True
|
|
219
|
+
ctx.verify_mode = ssl.CERT_REQUIRED
|
|
220
|
+
logger.info("System CA certificates disabled (disable_system_cas=True)")
|
|
221
|
+
|
|
222
|
+
# Load custom CA certificate if provided
|
|
223
|
+
if ca_cert_path:
|
|
224
|
+
cert_path = Path(ca_cert_path)
|
|
225
|
+
if not cert_path.exists():
|
|
226
|
+
raise FileNotFoundError(
|
|
227
|
+
f"CA certificate file not found: {ca_cert_path}\n"
|
|
228
|
+
f"Please ensure the certificate Secret is mounted correctly.\n"
|
|
229
|
+
f"Check: kubectl get secret <secret-name> -n <namespace>"
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Validate certificate format and log metadata
|
|
233
|
+
validate_certificate(str(cert_path))
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
ctx.load_verify_locations(cafile=str(cert_path))
|
|
237
|
+
logger.info("Custom CA certificate loaded from: %s", ca_cert_path)
|
|
238
|
+
except ssl.SSLError as e:
|
|
239
|
+
raise ssl.SSLError(
|
|
240
|
+
f"Failed to load CA certificate from {ca_cert_path}: {e}\n"
|
|
241
|
+
f"Please verify the certificate is in valid PEM format.\n"
|
|
242
|
+
f"You can inspect it with: openssl x509 -in {ca_cert_path} -text -noout"
|
|
243
|
+
) from e
|
|
244
|
+
|
|
245
|
+
return ctx
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import subprocess
|
|
18
|
+
|
|
19
|
+
from google.adk.agents.invocation_context import InvocationContext
|
|
20
|
+
from google.adk.code_executors.base_code_executor import BaseCodeExecutor
|
|
21
|
+
from google.adk.code_executors.code_execution_utils import CodeExecutionInput, CodeExecutionResult
|
|
22
|
+
from pydantic import Field
|
|
23
|
+
from typing_extensions import override
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SandboxedLocalCodeExecutor(BaseCodeExecutor):
|
|
27
|
+
"""A code executor that execute code in a sandbox in the current local context."""
|
|
28
|
+
|
|
29
|
+
# Overrides the BaseCodeExecutor attribute: this executor cannot be stateful.
|
|
30
|
+
stateful: bool = Field(default=False, frozen=True, exclude=True)
|
|
31
|
+
|
|
32
|
+
# Overrides the BaseCodeExecutor attribute: this executor cannot
|
|
33
|
+
# optimize_data_file.
|
|
34
|
+
optimize_data_file: bool = Field(default=False, frozen=True, exclude=True)
|
|
35
|
+
|
|
36
|
+
def __init__(self, **data):
|
|
37
|
+
"""Initializes the SandboxedLocalCodeExecutor."""
|
|
38
|
+
if "stateful" in data and data["stateful"]:
|
|
39
|
+
raise ValueError("Cannot set `stateful=True` in SandboxedLocalCodeExecutor.")
|
|
40
|
+
if "optimize_data_file" in data and data["optimize_data_file"]:
|
|
41
|
+
raise ValueError("Cannot set `optimize_data_file=True` in SandboxedLocalCodeExecutor.")
|
|
42
|
+
super().__init__(**data)
|
|
43
|
+
|
|
44
|
+
@override
|
|
45
|
+
def execute_code(
|
|
46
|
+
self,
|
|
47
|
+
invocation_context: InvocationContext,
|
|
48
|
+
code_execution_input: CodeExecutionInput,
|
|
49
|
+
) -> CodeExecutionResult:
|
|
50
|
+
"""Executes the given code in a sandboxed local context. uses the srt command to sandbox"""
|
|
51
|
+
output = ""
|
|
52
|
+
error = ""
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
# Execute the provided code by piping it to `python -` inside the sandbox.
|
|
56
|
+
proc = subprocess.run(
|
|
57
|
+
["srt", "python", "-"],
|
|
58
|
+
input=code_execution_input.code,
|
|
59
|
+
capture_output=True,
|
|
60
|
+
text=True,
|
|
61
|
+
)
|
|
62
|
+
output = proc.stdout or ""
|
|
63
|
+
error = proc.stderr or ""
|
|
64
|
+
except FileNotFoundError as e:
|
|
65
|
+
# srt or python not found
|
|
66
|
+
output = ""
|
|
67
|
+
error = f"Execution failed: {e}"
|
|
68
|
+
except Exception as e:
|
|
69
|
+
output = ""
|
|
70
|
+
error = f"Unexpected error during execution: {e}"
|
|
71
|
+
|
|
72
|
+
# Collect the final result.
|
|
73
|
+
return CodeExecutionResult(
|
|
74
|
+
stdout=output,
|
|
75
|
+
stderr=error,
|
|
76
|
+
output_files=[],
|
|
77
|
+
)
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import tarfile
|
|
6
|
+
from typing import Tuple
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _parse_image_ref(image: str) -> Tuple[str, str, str]:
|
|
12
|
+
"""
|
|
13
|
+
Parse an OCI/Docker image reference into (registry, repository, reference).
|
|
14
|
+
|
|
15
|
+
reference is either a tag (default "latest") or a digest (e.g., "sha256:...").
|
|
16
|
+
Rules (compatible with Docker/OCI name parsing):
|
|
17
|
+
- If the reference contains a digest ("@"), prefer a tag if also present (repo:tag@digest),
|
|
18
|
+
otherwise keep the digest as the reference.
|
|
19
|
+
- If there is no tag nor digest, default the reference to "latest".
|
|
20
|
+
- If the first path component contains a '.' or ':' or equals 'localhost', it is treated as the registry.
|
|
21
|
+
Otherwise the registry defaults to docker hub (docker.io), with the special library namespace for single-component names.
|
|
22
|
+
"""
|
|
23
|
+
name_part = image
|
|
24
|
+
ref = "latest"
|
|
25
|
+
|
|
26
|
+
if "@" in image:
|
|
27
|
+
# Split digest
|
|
28
|
+
name_part, digest = image.split("@", 1)
|
|
29
|
+
ref = digest
|
|
30
|
+
|
|
31
|
+
# Possibly has a tag: detect a colon after the last slash
|
|
32
|
+
slash = name_part.rfind("/")
|
|
33
|
+
colon = name_part.rfind(":")
|
|
34
|
+
if colon > slash:
|
|
35
|
+
ref = name_part[colon + 1 :]
|
|
36
|
+
name_part = name_part[:colon]
|
|
37
|
+
# else: keep default "latest"
|
|
38
|
+
|
|
39
|
+
# Determine registry and repo path
|
|
40
|
+
parts = name_part.split("/")
|
|
41
|
+
if len(parts) == 1:
|
|
42
|
+
# Implicit docker hub library image
|
|
43
|
+
registry = "registry-1.docker.io"
|
|
44
|
+
repo = f"library/{parts[0]}"
|
|
45
|
+
else:
|
|
46
|
+
first = parts[0]
|
|
47
|
+
if first == "localhost" or "." in first or ":" in first:
|
|
48
|
+
# Explicit registry (may include port)
|
|
49
|
+
registry = first
|
|
50
|
+
repo = "/".join(parts[1:])
|
|
51
|
+
else:
|
|
52
|
+
# Docker hub with user/org namespace
|
|
53
|
+
registry = "docker.io"
|
|
54
|
+
repo = "/".join(parts)
|
|
55
|
+
|
|
56
|
+
return registry, repo, ref
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def fetch_using_crane_to_dir(image: str, destination_folder: str, insecure: bool = False) -> None:
|
|
60
|
+
"""Fetch a skill using crane and extract it to destination_folder."""
|
|
61
|
+
import subprocess
|
|
62
|
+
|
|
63
|
+
tar_path = os.path.join(destination_folder, "skill.tar")
|
|
64
|
+
os.makedirs(destination_folder, exist_ok=True)
|
|
65
|
+
command = ["crane", "export", image, tar_path]
|
|
66
|
+
if insecure:
|
|
67
|
+
command.insert(1, "--insecure")
|
|
68
|
+
# Use crane to pull the image as a tarball
|
|
69
|
+
subprocess.run(
|
|
70
|
+
command,
|
|
71
|
+
check=True,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Extract the tarball
|
|
75
|
+
with tarfile.open(tar_path, "r") as tar:
|
|
76
|
+
tar.extractall(path=destination_folder, filter=tarfile.data_filter)
|
|
77
|
+
|
|
78
|
+
# Remove the tarball
|
|
79
|
+
os.remove(tar_path)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def fetch_skill(skill_image: str, destination_folder: str, insecure: bool = False) -> None:
|
|
83
|
+
"""
|
|
84
|
+
Fetch a skill packaged as an OCI/Docker image and write its files to destination_folder.
|
|
85
|
+
|
|
86
|
+
To build a compatible skill image from a folder (containing SKILL.md), use a simple Dockerfile:
|
|
87
|
+
FROM scratch
|
|
88
|
+
COPY . /
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
skill_image: The image reference (e.g., "alpine:latest", "ghcr.io/org/skill:tag", or with a digest).
|
|
92
|
+
destination_folder: The folder where the skill files should be written.
|
|
93
|
+
"""
|
|
94
|
+
registry, repo, ref = _parse_image_ref(skill_image)
|
|
95
|
+
|
|
96
|
+
# skill name is the last part of the repo
|
|
97
|
+
repo_parts = repo.split("/")
|
|
98
|
+
skill_name = repo_parts[-1]
|
|
99
|
+
logger.info(
|
|
100
|
+
f"about to fetching skill {skill_name} from image {skill_image} (registry: {registry}, repo: {repo}, ref: {ref})"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
fetch_using_crane_to_dir(skill_image, os.path.join(destination_folder, skill_name), insecure)
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
# ADK Skills
|
|
2
|
+
|
|
3
|
+
Filesystem-based skills with progressive disclosure and two-tool architecture for domain expertise.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Quick Start
|
|
8
|
+
|
|
9
|
+
### Recommended: Plugin-Based (Multi-Agent Apps)
|
|
10
|
+
|
|
11
|
+
```python
|
|
12
|
+
from kagent.adk.skills import SkillsPlugin
|
|
13
|
+
|
|
14
|
+
# Plugin automatically initializes sessions and registers all skills tools
|
|
15
|
+
app = App(
|
|
16
|
+
root_agent=agent,
|
|
17
|
+
plugins=[SkillsPlugin(skills_directory="./skills")]
|
|
18
|
+
)
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**Benefits:**
|
|
22
|
+
|
|
23
|
+
- ✅ Session paths initialized before any tool runs
|
|
24
|
+
- ✅ Automatic tool registration on all agents
|
|
25
|
+
- ✅ Handles custom skills directories correctly
|
|
26
|
+
- ✅ No tool call order dependencies
|
|
27
|
+
|
|
28
|
+
### Alternative: Direct Tool Usage
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
from kagent.adk.skills import SkillsTool
|
|
32
|
+
from kagent.adk.tools import BashTool, ReadFileTool, WriteFileTool, EditFileTool
|
|
33
|
+
|
|
34
|
+
agent = Agent(
|
|
35
|
+
tools=[
|
|
36
|
+
SkillsTool(skills_directory="./skills"),
|
|
37
|
+
BashTool(skills_directory="./skills"),
|
|
38
|
+
ReadFileTool(),
|
|
39
|
+
WriteFileTool(),
|
|
40
|
+
EditFileTool(),
|
|
41
|
+
]
|
|
42
|
+
)
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
**Note:** Without SkillsPlugin, sessions auto-initialize with `/skills` directory. For custom skills paths, use the plugin.
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## Session Initialization
|
|
50
|
+
|
|
51
|
+
Skills uses a **plugin-based initialization pattern** to ensure session working directories are set up before any tools run.
|
|
52
|
+
|
|
53
|
+
### How It Works
|
|
54
|
+
|
|
55
|
+
```text
|
|
56
|
+
App Starts
|
|
57
|
+
↓
|
|
58
|
+
SkillsPlugin initialized with skills_directory
|
|
59
|
+
↓
|
|
60
|
+
First Agent Turn
|
|
61
|
+
↓
|
|
62
|
+
before_agent_callback() hook fires
|
|
63
|
+
↓
|
|
64
|
+
Session path initialized with skills symlink
|
|
65
|
+
↓
|
|
66
|
+
Tools registered on agent
|
|
67
|
+
↓
|
|
68
|
+
Tools execute (session already initialized)
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
**Key Points:**
|
|
72
|
+
|
|
73
|
+
- `SkillsPlugin.before_agent_callback()` fires **before any tool invocation**
|
|
74
|
+
- Creates `/tmp/kagent/{session_id}/` with `skills/` symlink
|
|
75
|
+
- All tools use `get_session_path(session_id)` which returns cached path
|
|
76
|
+
- **No tool call order dependencies** - session always ready
|
|
77
|
+
|
|
78
|
+
**Without Plugin:**
|
|
79
|
+
|
|
80
|
+
- Tools auto-initialize session with default `/skills` on first call
|
|
81
|
+
- Works fine if skills are at `/skills` location
|
|
82
|
+
- For custom paths, use SkillsPlugin
|
|
83
|
+
|
|
84
|
+
---
|
|
85
|
+
|
|
86
|
+
## Architecture
|
|
87
|
+
|
|
88
|
+
### Skill Structure
|
|
89
|
+
|
|
90
|
+
```text
|
|
91
|
+
skills/
|
|
92
|
+
├── data-analysis/
|
|
93
|
+
│ ├── SKILL.md # Metadata (YAML frontmatter) + instructions
|
|
94
|
+
│ └── scripts/ # Python scripts, configs, etc.
|
|
95
|
+
│ └── analyze.py
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
**SKILL.md Example:**
|
|
99
|
+
|
|
100
|
+
```markdown
|
|
101
|
+
---
|
|
102
|
+
name: data-analysis
|
|
103
|
+
description: Analyze CSV/Excel files
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
# Data Analysis
|
|
107
|
+
|
|
108
|
+
...instructions for the agent...
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Tool Workflow
|
|
112
|
+
|
|
113
|
+
**Three Phases:**
|
|
114
|
+
|
|
115
|
+
1. **Discovery** - Agent sees available skills in SkillsTool description
|
|
116
|
+
2. **Loading** - Agent calls `skills(command='data-analysis')` → gets full SKILL.md
|
|
117
|
+
3. **Execution** - Agent uses BashTool + file tools to run scripts per instructions
|
|
118
|
+
|
|
119
|
+
| Tool | Purpose | Example |
|
|
120
|
+
| -------------- | ---------------------------- | ----------------------------------------------------- |
|
|
121
|
+
| **SkillsTool** | Load skill instructions | `skills(command='data-analysis')` |
|
|
122
|
+
| **BashTool** | Execute commands | `bash("cd skills/data-analysis && python script.py")` |
|
|
123
|
+
| **ReadFile** | Read files with line numbers | `read_file("skills/data-analysis/config.json")` |
|
|
124
|
+
| **WriteFile** | Create/overwrite files | `write_file("outputs/report.pdf", data)` |
|
|
125
|
+
| **EditFile** | Precise string replacements | `edit_file("script.py", old="x", new="y")` |
|
|
126
|
+
|
|
127
|
+
### Working Directory Structure
|
|
128
|
+
|
|
129
|
+
Each session gets an isolated working directory with symlinked skills:
|
|
130
|
+
|
|
131
|
+
```text
|
|
132
|
+
/tmp/kagent/{session_id}/
|
|
133
|
+
├── skills/ → symlink to /skills (read-only, shared across sessions)
|
|
134
|
+
├── uploads/ → staged user files (writable)
|
|
135
|
+
├── outputs/ → generated files for download (writable)
|
|
136
|
+
└── *.py → temporary scripts (writable)
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
**Path Resolution:**
|
|
140
|
+
|
|
141
|
+
- Relative paths resolve from working directory: `skills/data-analysis/script.py`
|
|
142
|
+
- Absolute paths work too: `/tmp/kagent/{session_id}/outputs/report.pdf`
|
|
143
|
+
- Skills symlink enables natural relative references while maintaining security
|
|
144
|
+
|
|
145
|
+
---
|
|
146
|
+
|
|
147
|
+
## Artifact Handling
|
|
148
|
+
|
|
149
|
+
User uploads and downloads are managed through artifact tools:
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
# 1. Stage uploaded file from artifact service
|
|
153
|
+
stage_artifacts(artifact_names=["sales_data.csv"])
|
|
154
|
+
# → Writes to: uploads/sales_data.csv
|
|
155
|
+
|
|
156
|
+
# 2. Agent processes file
|
|
157
|
+
bash("python skills/data-analysis/scripts/analyze.py uploads/sales_data.csv")
|
|
158
|
+
# → Script writes: outputs/report.pdf
|
|
159
|
+
|
|
160
|
+
# 3. Return generated file
|
|
161
|
+
return_artifacts(file_paths=["outputs/report.pdf"])
|
|
162
|
+
# → Saves to artifact service for user download
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
**Flow:** User Upload → Artifact Service → `uploads/` → Processing → `outputs/` → Artifact Service → User Download
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## Security
|
|
170
|
+
|
|
171
|
+
**Read-only skills directory:**
|
|
172
|
+
|
|
173
|
+
- Skills at `/skills` are read-only (enforced by sandbox)
|
|
174
|
+
- Symlink at `skills/` inherits read-only permissions
|
|
175
|
+
- Agents cannot modify skill code or instructions
|
|
176
|
+
|
|
177
|
+
**File tools:**
|
|
178
|
+
|
|
179
|
+
- Path traversal protection (no `..`)
|
|
180
|
+
- Session isolation (each session has separate working directory)
|
|
181
|
+
- File size limits (100 MB max)
|
|
182
|
+
|
|
183
|
+
**Bash tool:**
|
|
184
|
+
|
|
185
|
+
- Sandboxed execution via Anthropic Sandbox Runtime
|
|
186
|
+
- Command timeouts (30s default, 120s for pip install)
|
|
187
|
+
- Working directory restrictions
|
|
188
|
+
|
|
189
|
+
---
|
|
190
|
+
|
|
191
|
+
## Example Agent Flow
|
|
192
|
+
|
|
193
|
+
```python
|
|
194
|
+
# User asks: "Analyze my sales data"
|
|
195
|
+
|
|
196
|
+
# 1. Agent discovers available skills
|
|
197
|
+
# → SkillsTool description lists: data-analysis, pdf-processing, etc.
|
|
198
|
+
|
|
199
|
+
# 2. Agent loads skill instructions
|
|
200
|
+
agent: skills(command='data-analysis')
|
|
201
|
+
# → Returns full SKILL.md with detailed instructions
|
|
202
|
+
|
|
203
|
+
# 3. Agent stages uploaded file
|
|
204
|
+
agent: stage_artifacts(artifact_names=["sales_data.csv"])
|
|
205
|
+
# → File available at: uploads/sales_data.csv
|
|
206
|
+
|
|
207
|
+
# 4. Agent reads skill script to understand it
|
|
208
|
+
agent: read_file("skills/data-analysis/scripts/analyze.py")
|
|
209
|
+
|
|
210
|
+
# 5. Agent executes analysis
|
|
211
|
+
agent: bash("cd skills/data-analysis && python scripts/analyze.py ../../uploads/sales_data.csv")
|
|
212
|
+
# → Script generates: outputs/analysis_report.pdf
|
|
213
|
+
|
|
214
|
+
# 6. Agent returns result
|
|
215
|
+
agent: return_artifacts(file_paths=["outputs/analysis_report.pdf"])
|
|
216
|
+
# → User can download report.pdf
|
|
217
|
+
```
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .bash_tool import BashTool
|
|
2
|
+
from .file_tools import EditFileTool, ReadFileTool, WriteFileTool
|
|
3
|
+
from .skill_tool import SkillsTool
|
|
4
|
+
from .skills_plugin import add_skills_tool_to_agent
|
|
5
|
+
from .skills_toolset import SkillsToolset
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"SkillsTool",
|
|
9
|
+
"SkillsToolset",
|
|
10
|
+
"BashTool",
|
|
11
|
+
"EditFileTool",
|
|
12
|
+
"ReadFileTool",
|
|
13
|
+
"WriteFileTool",
|
|
14
|
+
"add_skills_tool_to_agent",
|
|
15
|
+
]
|