kagent-adk 0.7.1__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kagent-adk might be problematic. Click here for more details.
- kagent/adk/_a2a.py +9 -4
- kagent/adk/_agent_executor.py +1 -1
- kagent/adk/cli.py +1 -0
- kagent/adk/skills/README.md +217 -0
- kagent/adk/skills/__init__.py +29 -0
- kagent/adk/skills/bash_tool.py +244 -0
- kagent/adk/skills/skill_system_prompt.py +165 -0
- kagent/adk/skills/skill_tool.py +202 -0
- kagent/adk/skills/skills_plugin.py +90 -0
- kagent/adk/skills/skills_toolset.py +54 -0
- kagent/adk/skills/stage_artifacts_tool.py +164 -0
- {kagent_adk-0.7.1.dist-info → kagent_adk-0.7.2.dist-info}/METADATA +1 -1
- kagent_adk-0.7.2.dist-info/RECORD +26 -0
- kagent_adk-0.7.1.dist-info/RECORD +0 -18
- {kagent_adk-0.7.1.dist-info → kagent_adk-0.7.2.dist-info}/WHEEL +0 -0
- {kagent_adk-0.7.1.dist-info → kagent_adk-0.7.2.dist-info}/entry_points.txt +0 -0
kagent/adk/_a2a.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import faulthandler
|
|
3
3
|
import logging
|
|
4
4
|
import os
|
|
5
|
-
from typing import Callable
|
|
5
|
+
from typing import Callable, List
|
|
6
6
|
|
|
7
7
|
import httpx
|
|
8
8
|
from a2a.server.apps import A2AFastAPIApplication
|
|
@@ -14,8 +14,11 @@ from fastapi import FastAPI, Request
|
|
|
14
14
|
from fastapi.responses import PlainTextResponse
|
|
15
15
|
from google.adk.agents import BaseAgent
|
|
16
16
|
from google.adk.apps import App
|
|
17
|
+
from google.adk.plugins import BasePlugin
|
|
17
18
|
from google.adk.runners import Runner
|
|
18
19
|
from google.adk.sessions import InMemorySessionService
|
|
20
|
+
from google.adk.artifacts import InMemoryArtifactService
|
|
21
|
+
|
|
19
22
|
from google.genai import types
|
|
20
23
|
|
|
21
24
|
from kagent.core.a2a import KAgentRequestContextBuilder, KAgentTaskStore
|
|
@@ -64,11 +67,13 @@ class KAgentApp:
|
|
|
64
67
|
agent_card: AgentCard,
|
|
65
68
|
kagent_url: str,
|
|
66
69
|
app_name: str,
|
|
70
|
+
plugins: List[BasePlugin] = None,
|
|
67
71
|
):
|
|
68
72
|
self.root_agent = root_agent
|
|
69
73
|
self.kagent_url = kagent_url
|
|
70
74
|
self.app_name = app_name
|
|
71
75
|
self.agent_card = agent_card
|
|
76
|
+
self.plugins = plugins if plugins is not None else []
|
|
72
77
|
|
|
73
78
|
def build(self) -> FastAPI:
|
|
74
79
|
token_service = KAgentTokenService(self.app_name)
|
|
@@ -77,17 +82,17 @@ class KAgentApp:
|
|
|
77
82
|
)
|
|
78
83
|
session_service = KAgentSessionService(http_client)
|
|
79
84
|
|
|
80
|
-
plugins = []
|
|
81
85
|
if sts_well_known_uri:
|
|
82
86
|
sts_integration = ADKSTSIntegration(sts_well_known_uri)
|
|
83
|
-
plugins.append(ADKTokenPropagationPlugin(sts_integration))
|
|
87
|
+
self.plugins.append(ADKTokenPropagationPlugin(sts_integration))
|
|
84
88
|
|
|
85
|
-
adk_app = App(name=self.app_name, root_agent=self.root_agent, plugins=plugins)
|
|
89
|
+
adk_app = App(name=self.app_name, root_agent=self.root_agent, plugins=self.plugins)
|
|
86
90
|
|
|
87
91
|
def create_runner() -> Runner:
|
|
88
92
|
return Runner(
|
|
89
93
|
app=adk_app,
|
|
90
94
|
session_service=session_service,
|
|
95
|
+
artifact_service=InMemoryArtifactService(),
|
|
91
96
|
)
|
|
92
97
|
|
|
93
98
|
agent_executor = A2aAgentExecutor(
|
kagent/adk/_agent_executor.py
CHANGED
|
@@ -33,7 +33,7 @@ from kagent.core.a2a import TaskResultAggregator, get_kagent_metadata_key
|
|
|
33
33
|
from .converters.event_converter import convert_event_to_a2a_events
|
|
34
34
|
from .converters.request_converter import convert_a2a_request_to_adk_run_args
|
|
35
35
|
|
|
36
|
-
logger = logging.getLogger("
|
|
36
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
class A2aAgentExecutorConfig(BaseModel):
|
kagent/adk/cli.py
CHANGED
|
@@ -14,6 +14,7 @@ from kagent.core import KAgentConfig, configure_tracing
|
|
|
14
14
|
from . import AgentConfig, KAgentApp
|
|
15
15
|
|
|
16
16
|
logger = logging.getLogger(__name__)
|
|
17
|
+
logging.getLogger("google_adk.google.adk.tools.base_authenticated_tool").setLevel(logging.ERROR)
|
|
17
18
|
|
|
18
19
|
app = typer.Typer()
|
|
19
20
|
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
# ADK Skills
|
|
2
|
+
|
|
3
|
+
Filesystem-based skills with progressive disclosure and two-tool architecture.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Overview
|
|
8
|
+
|
|
9
|
+
Skills enable agents to specialize in domain expertise without bloating the main context. The **two-tool pattern** separates concerns:
|
|
10
|
+
|
|
11
|
+
- **SkillsTool** - Loads skill instructions
|
|
12
|
+
- **BashTool** - Executes commands
|
|
13
|
+
- **Semantic clarity** leads to better LLM reasoning
|
|
14
|
+
|
|
15
|
+
### Skill Structure
|
|
16
|
+
|
|
17
|
+
```text
|
|
18
|
+
skills/
|
|
19
|
+
├── data-analysis/
|
|
20
|
+
│ ├── SKILL.md # Metadata + instructions (YAML frontmatter)
|
|
21
|
+
│ └── scripts/
|
|
22
|
+
│ └── analyze.py
|
|
23
|
+
└── pdf-processing/
|
|
24
|
+
├── SKILL.md
|
|
25
|
+
└── scripts/
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
**SKILL.md:**
|
|
29
|
+
|
|
30
|
+
```markdown
|
|
31
|
+
---
|
|
32
|
+
name: data-analysis
|
|
33
|
+
description: Analyze CSV/Excel files
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
# Data Analysis
|
|
37
|
+
|
|
38
|
+
...instructions...
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
## Quick Start
|
|
44
|
+
|
|
45
|
+
**Two-Tool Pattern (Recommended):**
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
from kagent.adk.skills import SkillsTool, BashTool, StageArtifactsTool
|
|
49
|
+
|
|
50
|
+
agent = Agent(
|
|
51
|
+
tools=[
|
|
52
|
+
SkillsTool(skills_directory="./skills"),
|
|
53
|
+
BashTool(skills_directory="./skills"),
|
|
54
|
+
StageArtifactsTool(skills_directory="./skills"),
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
**With Plugin (Multi-Agent Apps):**
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
from kagent.adk.skills import SkillsPlugin
|
|
63
|
+
|
|
64
|
+
app = App(root_agent=agent, plugins=[SkillsPlugin(skills_directory="./skills")])
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
**Legacy Single-Tool (Backward Compat):**
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from kagent.adk.skills import SkillsShellTool
|
|
71
|
+
|
|
72
|
+
agent = Agent(tools=[SkillsShellTool(skills_directory="./skills")])
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
---
|
|
76
|
+
|
|
77
|
+
## How It Works
|
|
78
|
+
|
|
79
|
+
### Two-Tool Workflow
|
|
80
|
+
|
|
81
|
+
```mermaid
|
|
82
|
+
sequenceDiagram
|
|
83
|
+
participant A as Agent
|
|
84
|
+
participant S as SkillsTool
|
|
85
|
+
participant B as BashTool
|
|
86
|
+
|
|
87
|
+
A->>S: skills(command='data-analysis')
|
|
88
|
+
S-->>A: Full SKILL.md + base path
|
|
89
|
+
A->>B: bash("cd skills/data-analysis && python scripts/analyze.py file.csv")
|
|
90
|
+
B-->>A: Results
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
**Three Phases:**
|
|
94
|
+
|
|
95
|
+
1. **Discovery** - Agent sees available skills in tool description
|
|
96
|
+
2. **Loading** - Invoke skill with `command='skill-name'` → returns full SKILL.md
|
|
97
|
+
3. **Execution** - Use BashTool with instructions from SKILL.md
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## Architecture
|
|
102
|
+
|
|
103
|
+
```mermaid
|
|
104
|
+
graph LR
|
|
105
|
+
Agent[Agent] -->|Load<br/>skill details| SkillsTool["SkillsTool<br/>(Discovery)"]
|
|
106
|
+
Agent -->|Execute<br/>commands| BashTool["BashTool<br/>(Execution)"]
|
|
107
|
+
SkillsTool -->|Embedded in<br/>description| Skills["Available<br/>Skills List"]
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
| Tool | Purpose | Input | Output |
|
|
111
|
+
| ---------------------- | ------------------- | ---------------------- | ------------------------- |
|
|
112
|
+
| **SkillsTool** | Load skill metadata | `command='skill-name'` | Full SKILL.md + base path |
|
|
113
|
+
| **BashTool** | Execute safely | Command string | Script output |
|
|
114
|
+
| **StageArtifactsTool** | Stage uploads | Artifact names | File paths in `uploads/` |
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
## File Handling
|
|
119
|
+
|
|
120
|
+
User uploads → Artifact → Stage → Execute:
|
|
121
|
+
|
|
122
|
+
```python
|
|
123
|
+
# 1. Stage uploaded file
|
|
124
|
+
stage_artifacts(artifact_names=["artifact_123"])
|
|
125
|
+
|
|
126
|
+
# 2. Use in skill script
|
|
127
|
+
bash("cd skills/data-analysis && python scripts/analyze.py uploads/artifact_123")
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## Security
|
|
133
|
+
|
|
134
|
+
**SkillsTool:**
|
|
135
|
+
|
|
136
|
+
- ✅ Read-only (no execution)
|
|
137
|
+
- ✅ Validates skill existence
|
|
138
|
+
- ✅ Caches results
|
|
139
|
+
|
|
140
|
+
**BashTool:**
|
|
141
|
+
|
|
142
|
+
- ✅ Whitelisted commands only (`ls`, `cat`, `python`, `pip`, etc.)
|
|
143
|
+
- ✅ No destructive ops (`rm`, `mv`, `chmod` blocked)
|
|
144
|
+
- ✅ Directory restrictions (no `..`)
|
|
145
|
+
- ✅ 30-second timeout
|
|
146
|
+
- ✅ Subprocess isolation
|
|
147
|
+
|
|
148
|
+
---
|
|
149
|
+
|
|
150
|
+
## Components
|
|
151
|
+
|
|
152
|
+
| File | Purpose |
|
|
153
|
+
| ------------------------- | ---------------------------- |
|
|
154
|
+
| `skills_invoke_tool.py` | Discovery & loading |
|
|
155
|
+
| `bash_tool.py` | Command execution |
|
|
156
|
+
| `stage_artifacts_tool.py` | File staging |
|
|
157
|
+
| `skills_plugin.py` | Auto-registration (optional) |
|
|
158
|
+
| `skills_shell_tool.py` | Legacy all-in-one |
|
|
159
|
+
|
|
160
|
+
---
|
|
161
|
+
|
|
162
|
+
## Examples
|
|
163
|
+
|
|
164
|
+
### Example 1: Data Analysis
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
# Agent loads skill
|
|
168
|
+
agent.invoke(tools=[
|
|
169
|
+
SkillsTool(skills_directory="./skills"),
|
|
170
|
+
BashTool(skills_directory="./skills"),
|
|
171
|
+
], prompt="Analyze this CSV file")
|
|
172
|
+
|
|
173
|
+
# Agent flow:
|
|
174
|
+
# 1. Calls: skills(command='data-analysis')
|
|
175
|
+
# 2. Gets: Full SKILL.md with instructions
|
|
176
|
+
# 3. Calls: bash("cd skills/data-analysis && python scripts/analyze.py file.csv")
|
|
177
|
+
# 4. Returns: Analysis results
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### Example 2: Multi-Agent App
|
|
181
|
+
|
|
182
|
+
```python
|
|
183
|
+
# Register skills on all agents
|
|
184
|
+
app = App(
|
|
185
|
+
root_agent=agent,
|
|
186
|
+
plugins=[SkillsPlugin(skills_directory="./skills")]
|
|
187
|
+
)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
---
|
|
191
|
+
|
|
192
|
+
## Comparison with Claude
|
|
193
|
+
|
|
194
|
+
ADK follows Claude's two-tool pattern exactly:
|
|
195
|
+
|
|
196
|
+
| Aspect | Claude | ADK |
|
|
197
|
+
| -------------- | ------------------- | ---------------------- |
|
|
198
|
+
| Discovery tool | Skills tool | SkillsTool ✅ |
|
|
199
|
+
| Execution tool | Bash tool | BashTool ✅ |
|
|
200
|
+
| Parameter | `command` | `command` ✅ |
|
|
201
|
+
| Pattern | Two-tool separation | Two-tool separation ✅ |
|
|
202
|
+
|
|
203
|
+
---
|
|
204
|
+
|
|
205
|
+
## What Changed
|
|
206
|
+
|
|
207
|
+
**Before:** Single `SkillsShellTool` (all-in-one)
|
|
208
|
+
**Now:** Two-tool architecture (discovery + execution)
|
|
209
|
+
|
|
210
|
+
| Feature | Before | After |
|
|
211
|
+
| ---------------------- | --------- | ----------------- |
|
|
212
|
+
| Semantic clarity | Mixed | Separated ✅ |
|
|
213
|
+
| LLM reasoning | Implicit | Explicit ✅ |
|
|
214
|
+
| Progressive disclosure | Guideline | Enforced ✅ |
|
|
215
|
+
| Industry alignment | Custom | Claude pattern ✅ |
|
|
216
|
+
|
|
217
|
+
All previous code still works (backward compatible via `SkillsShellTool`).
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .bash_tool import BashTool
|
|
16
|
+
from .skill_system_prompt import generate_shell_skills_system_prompt
|
|
17
|
+
from .skill_tool import SkillsTool
|
|
18
|
+
from .skills_plugin import SkillsPlugin
|
|
19
|
+
from .skills_toolset import SkillsToolset
|
|
20
|
+
from .stage_artifacts_tool import StageArtifactsTool
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"BashTool",
|
|
24
|
+
"SkillsTool",
|
|
25
|
+
"SkillsPlugin",
|
|
26
|
+
"SkillsToolset",
|
|
27
|
+
"StageArtifactsTool",
|
|
28
|
+
"generate_shell_skills_system_prompt",
|
|
29
|
+
]
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""Simplified bash tool for executing shell commands in skills context."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import shlex
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, List, Set, Union
|
|
11
|
+
|
|
12
|
+
from google.adk.tools import BaseTool, ToolContext
|
|
13
|
+
from google.genai import types
|
|
14
|
+
|
|
15
|
+
from .stage_artifacts_tool import get_session_staging_path
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BashTool(BaseTool):
|
|
21
|
+
"""Execute bash commands safely in the skills environment.
|
|
22
|
+
|
|
23
|
+
This tool is for terminal operations and script execution. Use it after loading
|
|
24
|
+
skill instructions with the skills tool.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
DANGEROUS_COMMANDS: Set[str] = {
|
|
28
|
+
"rm",
|
|
29
|
+
"rmdir",
|
|
30
|
+
"mv",
|
|
31
|
+
"cp",
|
|
32
|
+
"chmod",
|
|
33
|
+
"chown",
|
|
34
|
+
"sudo",
|
|
35
|
+
"su",
|
|
36
|
+
"kill",
|
|
37
|
+
"reboot",
|
|
38
|
+
"shutdown",
|
|
39
|
+
"dd",
|
|
40
|
+
"mount",
|
|
41
|
+
"umount",
|
|
42
|
+
"alias",
|
|
43
|
+
"export",
|
|
44
|
+
"source",
|
|
45
|
+
".",
|
|
46
|
+
"eval",
|
|
47
|
+
"exec",
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def __init__(self, skills_directory: str | Path):
|
|
51
|
+
super().__init__(
|
|
52
|
+
name="bash",
|
|
53
|
+
description=(
|
|
54
|
+
"Execute bash commands in the skills environment.\n\n"
|
|
55
|
+
"Use this tool to:\n"
|
|
56
|
+
"- Execute Python scripts from files (e.g., 'python scripts/script.py')\n"
|
|
57
|
+
"- Install dependencies (e.g., 'pip install -r requirements.txt')\n"
|
|
58
|
+
"- Navigate and inspect files (e.g., 'ls', 'cat file.txt')\n"
|
|
59
|
+
"- Run shell commands with relative or absolute paths\n\n"
|
|
60
|
+
"Important:\n"
|
|
61
|
+
"- Always load skill instructions first using the skills tool\n"
|
|
62
|
+
"- Execute scripts from within their skill directory using 'cd skills/SKILL_NAME && ...'\n"
|
|
63
|
+
"- For Python code execution: ALWAYS write code to a file first, then run it with 'python file.py'\n"
|
|
64
|
+
"- Never use 'python -c \"code\"' - write to file first instead\n"
|
|
65
|
+
"- Quote paths with spaces (e.g., 'cd \"path with spaces\"')\n"
|
|
66
|
+
"- pip install commands may take longer (120s timeout)\n"
|
|
67
|
+
"- Python scripts have 60s timeout, other commands 30s\n\n"
|
|
68
|
+
"Security:\n"
|
|
69
|
+
"- Only whitelisted commands allowed (ls, cat, python, pip, etc.)\n"
|
|
70
|
+
"- No destructive operations (rm, mv, chown, etc. blocked)\n"
|
|
71
|
+
"- The sandbox environment provides additional isolation"
|
|
72
|
+
),
|
|
73
|
+
)
|
|
74
|
+
self.skills_directory = Path(skills_directory).resolve()
|
|
75
|
+
if not self.skills_directory.exists():
|
|
76
|
+
raise ValueError(f"Skills directory does not exist: {self.skills_directory}")
|
|
77
|
+
|
|
78
|
+
def _get_declaration(self) -> types.FunctionDeclaration:
|
|
79
|
+
return types.FunctionDeclaration(
|
|
80
|
+
name=self.name,
|
|
81
|
+
description=self.description,
|
|
82
|
+
parameters=types.Schema(
|
|
83
|
+
type=types.Type.OBJECT,
|
|
84
|
+
properties={
|
|
85
|
+
"command": types.Schema(
|
|
86
|
+
type=types.Type.STRING,
|
|
87
|
+
description="Bash command to execute. Use && to chain commands.",
|
|
88
|
+
),
|
|
89
|
+
"description": types.Schema(
|
|
90
|
+
type=types.Type.STRING,
|
|
91
|
+
description="Clear, concise description of what this command does (5-10 words)",
|
|
92
|
+
),
|
|
93
|
+
},
|
|
94
|
+
required=["command"],
|
|
95
|
+
),
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
async def run_async(self, *, args: Dict[str, Any], tool_context: ToolContext) -> str:
|
|
99
|
+
"""Execute a bash command safely."""
|
|
100
|
+
command = args.get("command", "").strip()
|
|
101
|
+
description = args.get("description", "")
|
|
102
|
+
|
|
103
|
+
if not command:
|
|
104
|
+
return "Error: No command provided"
|
|
105
|
+
|
|
106
|
+
if description:
|
|
107
|
+
logger.info(f"Executing: {description}")
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
parsed_commands = self._parse_and_validate_command(command)
|
|
111
|
+
result = await self._execute_command_safely(parsed_commands, tool_context)
|
|
112
|
+
logger.info(f"Executed bash command: {command}")
|
|
113
|
+
return result
|
|
114
|
+
except Exception as e:
|
|
115
|
+
error_msg = f"Error executing command '{command}': {e}"
|
|
116
|
+
logger.error(error_msg)
|
|
117
|
+
return error_msg
|
|
118
|
+
|
|
119
|
+
def _parse_and_validate_command(self, command: str) -> List[List[str]]:
|
|
120
|
+
"""Parse and validate command for security."""
|
|
121
|
+
if "&&" in command:
|
|
122
|
+
parts = [part.strip() for part in command.split("&&")]
|
|
123
|
+
else:
|
|
124
|
+
parts = [command]
|
|
125
|
+
|
|
126
|
+
parsed_parts = []
|
|
127
|
+
for part in parts:
|
|
128
|
+
parsed_part = shlex.split(part)
|
|
129
|
+
validation_error = self._validate_command_part(parsed_part)
|
|
130
|
+
if validation_error:
|
|
131
|
+
raise ValueError(validation_error)
|
|
132
|
+
parsed_parts.append(parsed_part)
|
|
133
|
+
return parsed_parts
|
|
134
|
+
|
|
135
|
+
def _validate_command_part(self, command_parts: List[str]) -> Union[str, None]:
|
|
136
|
+
"""Validate a single command part for security."""
|
|
137
|
+
if not command_parts:
|
|
138
|
+
return "Empty command"
|
|
139
|
+
|
|
140
|
+
base_command = command_parts[0]
|
|
141
|
+
|
|
142
|
+
if base_command in self.DANGEROUS_COMMANDS:
|
|
143
|
+
return f"Command '{base_command}' is not allowed for security reasons."
|
|
144
|
+
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
async def _execute_command_safely(self, parsed_commands: List[List[str]], tool_context: ToolContext) -> str:
|
|
148
|
+
"""Execute parsed commands in the sandboxed environment."""
|
|
149
|
+
staging_root = get_session_staging_path(
|
|
150
|
+
session_id=tool_context.session.id,
|
|
151
|
+
app_name=tool_context._invocation_context.app_name,
|
|
152
|
+
skills_directory=self.skills_directory,
|
|
153
|
+
)
|
|
154
|
+
original_cwd = os.getcwd()
|
|
155
|
+
output_parts = []
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
os.chdir(staging_root)
|
|
159
|
+
|
|
160
|
+
for i, command_parts in enumerate(parsed_commands):
|
|
161
|
+
if i > 0:
|
|
162
|
+
output_parts.append(f"\n--- Command {i + 1} ---")
|
|
163
|
+
|
|
164
|
+
if command_parts[0] == "cd":
|
|
165
|
+
if len(command_parts) > 1:
|
|
166
|
+
target_path = command_parts[1]
|
|
167
|
+
try:
|
|
168
|
+
# Resolve the path relative to current directory
|
|
169
|
+
target_abs = (Path(os.getcwd()) / target_path).resolve()
|
|
170
|
+
os.chdir(target_abs)
|
|
171
|
+
current_cwd = os.getcwd()
|
|
172
|
+
output_parts.append(f"Changed directory to {target_path}")
|
|
173
|
+
logger.info(f"Changed to {target_path}. Current cwd: {current_cwd}")
|
|
174
|
+
except (OSError, RuntimeError) as e:
|
|
175
|
+
output_parts.append(f"Error changing directory: {e}")
|
|
176
|
+
logger.error(f"Failed to cd to {target_path}: {e}")
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
# Determine timeout based on command type
|
|
180
|
+
timeout = self._get_command_timeout(command_parts)
|
|
181
|
+
current_cwd = os.getcwd()
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
process = await asyncio.create_subprocess_exec(
|
|
185
|
+
*command_parts,
|
|
186
|
+
stdout=asyncio.subprocess.PIPE,
|
|
187
|
+
stderr=asyncio.subprocess.PIPE,
|
|
188
|
+
cwd=current_cwd,
|
|
189
|
+
)
|
|
190
|
+
try:
|
|
191
|
+
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
|
|
192
|
+
except asyncio.TimeoutError:
|
|
193
|
+
process.kill()
|
|
194
|
+
await process.wait()
|
|
195
|
+
error_msg = f"Command '{' '.join(command_parts)}' timed out after {timeout}s"
|
|
196
|
+
output_parts.append(f"Error: {error_msg}")
|
|
197
|
+
logger.error(error_msg)
|
|
198
|
+
break
|
|
199
|
+
|
|
200
|
+
stdout_str = stdout.decode("utf-8", errors="replace") if stdout else ""
|
|
201
|
+
stderr_str = stderr.decode("utf-8", errors="replace") if stderr else ""
|
|
202
|
+
|
|
203
|
+
if process.returncode != 0:
|
|
204
|
+
output = stderr_str or stdout_str
|
|
205
|
+
error_output = f"Command failed with exit code {process.returncode}:\n{output}"
|
|
206
|
+
output_parts.append(error_output)
|
|
207
|
+
# Don't break on pip errors, continue to allow retry
|
|
208
|
+
if command_parts[0] not in ("pip", "pip3"):
|
|
209
|
+
break
|
|
210
|
+
else:
|
|
211
|
+
# Combine stdout and stderr for complete output
|
|
212
|
+
combined_output = stdout_str
|
|
213
|
+
if stderr_str and "WARNING" not in stderr_str:
|
|
214
|
+
combined_output += f"\n{stderr_str}"
|
|
215
|
+
output_parts.append(
|
|
216
|
+
combined_output.strip() if combined_output.strip() else "Command completed successfully."
|
|
217
|
+
)
|
|
218
|
+
except Exception as e:
|
|
219
|
+
error_msg = f"Error executing '{' '.join(command_parts)}': {str(e)}"
|
|
220
|
+
output_parts.append(error_msg)
|
|
221
|
+
logger.error(error_msg)
|
|
222
|
+
break
|
|
223
|
+
|
|
224
|
+
return "\n".join(output_parts)
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
return f"Error executing command: {e}"
|
|
228
|
+
finally:
|
|
229
|
+
os.chdir(original_cwd)
|
|
230
|
+
|
|
231
|
+
def _get_command_timeout(self, command_parts: List[str]) -> int:
|
|
232
|
+
"""Determine appropriate timeout for command type."""
|
|
233
|
+
if not command_parts:
|
|
234
|
+
return 30
|
|
235
|
+
|
|
236
|
+
base_command = command_parts[0]
|
|
237
|
+
|
|
238
|
+
# Extended timeouts for package management operations
|
|
239
|
+
if base_command in ("pip", "pip3"):
|
|
240
|
+
return 120 # 2 minutes for pip operations
|
|
241
|
+
elif base_command in ("python", "python3"):
|
|
242
|
+
return 60 # 1 minute for python scripts
|
|
243
|
+
else:
|
|
244
|
+
return 30 # 30 seconds for other commands
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""Optional comprehensive system prompt for skills-focused agents.
|
|
2
|
+
|
|
3
|
+
This module provides an enhanced, verbose system prompt for agents that are
|
|
4
|
+
heavily focused on skills usage. It is NOT required for basic skills functionality,
|
|
5
|
+
as the SkillsShellTool already includes sufficient guidance in its description.
|
|
6
|
+
|
|
7
|
+
Use this when:
|
|
8
|
+
- You want extremely detailed procedural guidance for the agent
|
|
9
|
+
- The agent's primary purpose is to work with skills
|
|
10
|
+
- You want to emphasize specific workflows or best practices
|
|
11
|
+
|
|
12
|
+
For most use cases, simply adding SkillsShellTool to your agent is sufficient.
|
|
13
|
+
The tool's description already includes all necessary guidance for skills usage.
|
|
14
|
+
|
|
15
|
+
Example usage:
|
|
16
|
+
# Basic usage (recommended for most cases):
|
|
17
|
+
agent = Agent(
|
|
18
|
+
tools=[SkillsShellTool(skills_directory="./skills")]
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Enhanced usage (for skills-focused agents):
|
|
22
|
+
agent = Agent(
|
|
23
|
+
instruction=generate_shell_skills_system_prompt("./skills"),
|
|
24
|
+
tools=[SkillsShellTool(skills_directory="./skills")]
|
|
25
|
+
)
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Optional
|
|
32
|
+
|
|
33
|
+
from google.adk.agents.readonly_context import ReadonlyContext
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def generate_shell_skills_system_prompt(
|
|
37
|
+
skills_directory: str | Path, readonly_context: Optional[ReadonlyContext] = None
|
|
38
|
+
) -> str:
|
|
39
|
+
"""Generate a comprehensive, verbose system prompt for shell-based skills usage.
|
|
40
|
+
|
|
41
|
+
This function provides an enhanced system prompt with detailed procedural guidance
|
|
42
|
+
for agents that heavily focus on skills usage. It supplements the guidance already
|
|
43
|
+
present in the SkillsShellTool's description.
|
|
44
|
+
|
|
45
|
+
Note: This is optional. The SkillsShellTool already includes sufficient guidance
|
|
46
|
+
in its description for most use cases.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
skills_directory: Path to directory containing skill folders (currently unused,
|
|
50
|
+
kept for API compatibility)
|
|
51
|
+
readonly_context: Optional context (currently unused, kept for API compatibility)
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
A comprehensive system prompt string with detailed skills usage guidance.
|
|
55
|
+
"""
|
|
56
|
+
prompt = """# Skills System - Two-Tool Architecture
|
|
57
|
+
|
|
58
|
+
You have access to specialized skills through two complementary tools: the `skills` tool and the `bash` tool.
|
|
59
|
+
|
|
60
|
+
## Overview
|
|
61
|
+
|
|
62
|
+
Skills provide specialized domain expertise through instructions, scripts, and reference materials. You access them using a two-phase approach:
|
|
63
|
+
1. **Discovery & Loading**: Use the `skills` tool to invoke a skill and load its instructions
|
|
64
|
+
2. **Execution**: Use the `bash` tool to execute commands based on the skill's guidance
|
|
65
|
+
|
|
66
|
+
## Workflow for User-Uploaded Files
|
|
67
|
+
|
|
68
|
+
When a user uploads a file, it is saved as an artifact. To use it with skills, follow this two-step process:
|
|
69
|
+
|
|
70
|
+
1. **Stage the Artifact:** Use the `stage_artifacts` tool to copy the file from the artifact store to your local `uploads/` directory. The system will tell you the artifact name (e.g., `artifact_...`).
|
|
71
|
+
```
|
|
72
|
+
stage_artifacts(artifact_names=["artifact_..."])
|
|
73
|
+
```
|
|
74
|
+
2. **Use the Staged File:** After staging, the tool will return the new path (e.g., `uploads/artifact_...`). You can now use this path in your `bash` commands.
|
|
75
|
+
```
|
|
76
|
+
bash("python skills/data-analysis/scripts/data_quality_check.py uploads/artifact_...")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Using the Skills Tool
|
|
80
|
+
|
|
81
|
+
The `skills` tool discovers and loads skill instructions:
|
|
82
|
+
|
|
83
|
+
### Discovery
|
|
84
|
+
Available skills are listed in the tool's description under `<available_skills>`. Review these to find relevant capabilities.
|
|
85
|
+
|
|
86
|
+
### Loading a Skill
|
|
87
|
+
Invoke a skill by name to load its full SKILL.md instructions:
|
|
88
|
+
- `skills(command="data-analysis")` - Load data analysis skill
|
|
89
|
+
- `skills(command="pdf-processing")` - Load PDF processing skill
|
|
90
|
+
|
|
91
|
+
When you invoke a skill, you'll see: `<command-message>The "skill-name" skill is loading</command-message>` followed by the skill's complete instructions.
|
|
92
|
+
|
|
93
|
+
## Using the Bash Tool
|
|
94
|
+
|
|
95
|
+
The `bash` tool executes commands in a sandboxed environment. Use it after loading a skill's instructions:
|
|
96
|
+
|
|
97
|
+
### Common Commands
|
|
98
|
+
- `bash("cd skills/SKILL_NAME && python scripts/SCRIPT.py arg1")` - Execute a skill's script
|
|
99
|
+
- `bash("pip install -r skills/SKILL_NAME/requirements.txt")` - Install dependencies
|
|
100
|
+
- `bash("ls skills/SKILL_NAME")` - List skill files
|
|
101
|
+
- `bash("cat skills/SKILL_NAME/reference.md")` - Read additional documentation
|
|
102
|
+
|
|
103
|
+
### Command Chaining
|
|
104
|
+
Chain multiple commands with `&&`:
|
|
105
|
+
```
|
|
106
|
+
bash("cd skills/data-analysis && pip install -r requirements.txt && python scripts/analyze.py data.csv")
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Progressive Disclosure Strategy
|
|
110
|
+
|
|
111
|
+
1. **Review Available Skills**: Check the `<available_skills>` section in the skills tool description to find relevant capabilities
|
|
112
|
+
2. **Invoke Relevant Skill**: Use `skills(command="skill-name")` to load full instructions
|
|
113
|
+
3. **Follow Instructions**: Read the loaded SKILL.md carefully
|
|
114
|
+
4. **Execute with Bash**: Use `bash` tool to run commands, install dependencies, and execute scripts as instructed
|
|
115
|
+
|
|
116
|
+
## Best Practices
|
|
117
|
+
|
|
118
|
+
### 1. **Dependency Management**
|
|
119
|
+
- **Before using a script**, check for a `requirements.txt` file
|
|
120
|
+
- Install dependencies with: `bash("pip install -r skills/SKILL_NAME/requirements.txt")`
|
|
121
|
+
|
|
122
|
+
### 2. **Efficient Workflow**
|
|
123
|
+
- Only invoke skills when needed for the task
|
|
124
|
+
- Don't invoke a skill that's already loaded in the conversation
|
|
125
|
+
- Read skill instructions carefully before executing
|
|
126
|
+
|
|
127
|
+
### 3. **Script Usage**
|
|
128
|
+
- **Always** execute scripts from within their skill directory: `bash("cd skills/SKILL_NAME && python scripts/SCRIPT.py")`
|
|
129
|
+
- Check script documentation in the SKILL.md before running
|
|
130
|
+
- Quote paths with spaces: `bash("cd \"path with spaces\" && python script.py")`
|
|
131
|
+
|
|
132
|
+
### 4. **Error Handling**
|
|
133
|
+
- If a bash command fails, read the error message carefully
|
|
134
|
+
- Check that dependencies are installed
|
|
135
|
+
- Verify file paths are correct
|
|
136
|
+
- Ensure you're in the correct directory
|
|
137
|
+
|
|
138
|
+
## Security and Safety
|
|
139
|
+
|
|
140
|
+
Both tools are sandboxed for safety:
|
|
141
|
+
|
|
142
|
+
**Skills Tool:**
|
|
143
|
+
- Read-only access to skill files
|
|
144
|
+
- No execution capability
|
|
145
|
+
- Only loads documented skills
|
|
146
|
+
|
|
147
|
+
**Bash Tool:**
|
|
148
|
+
- **Safe Commands Only**: Only whitelisted commands like `ls`, `cat`, `grep`, `pip`, and `python` are allowed
|
|
149
|
+
- **No Destructive Changes**: Commands like `rm`, `mv`, or `chmod` are blocked
|
|
150
|
+
- **Directory Restrictions**: You cannot access files outside of the skills directory
|
|
151
|
+
- **Timeout Protection**: Commands limited to 30 seconds
|
|
152
|
+
|
|
153
|
+
## Example Workflow
|
|
154
|
+
|
|
155
|
+
User asks: "Analyze this CSV file"
|
|
156
|
+
|
|
157
|
+
1. **Review Skills**: Check `<available_skills>` in skills tool → See "data-analysis" skill
|
|
158
|
+
2. **Invoke Skill**: `skills(command="data-analysis")` → Receive full instructions
|
|
159
|
+
3. **Stage File**: `stage_artifacts(artifact_names=["artifact_123"])` → File at `uploads/artifact_123`
|
|
160
|
+
4. **Install Deps**: `bash("pip install -r skills/data-analysis/requirements.txt")` → Dependencies installed
|
|
161
|
+
5. **Execute Script**: `bash("cd skills/data-analysis && python scripts/analyze.py uploads/artifact_123")` → Get results
|
|
162
|
+
6. **Present Results**: Share analysis with user
|
|
163
|
+
|
|
164
|
+
Remember: Skills are your specialized knowledge repositories. Use the skills tool to discover and load them, then use the bash tool to execute their instructions."""
|
|
165
|
+
return prompt
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"""Tool for discovering and loading skills."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Dict, Optional
|
|
8
|
+
|
|
9
|
+
import yaml
|
|
10
|
+
from google.adk.tools import BaseTool, ToolContext
|
|
11
|
+
from google.genai import types
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Skill(BaseModel):
|
|
18
|
+
"""Represents the metadata for a skill.
|
|
19
|
+
|
|
20
|
+
This is a simple data container used during the initial skill discovery
|
|
21
|
+
phase to hold the information parsed from a skill's SKILL.md frontmatter.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
name: str
|
|
25
|
+
"""The unique name/identifier of the skill."""
|
|
26
|
+
|
|
27
|
+
description: str
|
|
28
|
+
"""A description of what the skill does and when to use it."""
|
|
29
|
+
|
|
30
|
+
license: Optional[str] = None
|
|
31
|
+
"""Optional license information for the skill."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SkillsTool(BaseTool):
|
|
35
|
+
"""Discover and load skill instructions.
|
|
36
|
+
|
|
37
|
+
This tool dynamically discovers available skills and embeds their metadata in the
|
|
38
|
+
tool description. Agent invokes a skill by name to load its full instructions.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, skills_directory: str | Path):
|
|
42
|
+
self.skills_directory = Path(skills_directory).resolve()
|
|
43
|
+
if not self.skills_directory.exists():
|
|
44
|
+
raise ValueError(f"Skills directory does not exist: {self.skills_directory}")
|
|
45
|
+
|
|
46
|
+
self._skill_cache: Dict[str, str] = {}
|
|
47
|
+
|
|
48
|
+
# Generate description with available skills embedded
|
|
49
|
+
description = self._generate_description_with_skills()
|
|
50
|
+
|
|
51
|
+
super().__init__(
|
|
52
|
+
name="skills",
|
|
53
|
+
description=description,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def _generate_description_with_skills(self) -> str:
|
|
57
|
+
"""Generate tool description with available skills embedded."""
|
|
58
|
+
base_description = (
|
|
59
|
+
"Execute a skill within the main conversation\n\n"
|
|
60
|
+
"<skills_instructions>\n"
|
|
61
|
+
"When users ask you to perform tasks, check if any of the available skills below can help "
|
|
62
|
+
"complete the task more effectively. Skills provide specialized capabilities and domain knowledge.\n\n"
|
|
63
|
+
"How to use skills:\n"
|
|
64
|
+
"- Invoke skills using this tool with the skill name only (no arguments)\n"
|
|
65
|
+
"- When you invoke a skill, the skill's full SKILL.md will load with detailed instructions\n"
|
|
66
|
+
"- Follow the skill's instructions and use the bash tool to execute commands\n"
|
|
67
|
+
"- Examples:\n"
|
|
68
|
+
' - command: "data-analysis" - invoke the data-analysis skill\n'
|
|
69
|
+
' - command: "pdf-processing" - invoke the pdf-processing skill\n\n'
|
|
70
|
+
"Important:\n"
|
|
71
|
+
"- Only use skills listed in <available_skills> below\n"
|
|
72
|
+
"- Do not invoke a skill that is already loaded in the conversation\n"
|
|
73
|
+
"- After loading a skill, use the bash tool for execution\n"
|
|
74
|
+
"</skills_instructions>\n\n"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Discover and append available skills
|
|
78
|
+
skills_xml = self._discover_skills()
|
|
79
|
+
return base_description + skills_xml
|
|
80
|
+
|
|
81
|
+
def _discover_skills(self) -> str:
|
|
82
|
+
"""Discover available skills and format as XML."""
|
|
83
|
+
if not self.skills_directory.exists():
|
|
84
|
+
return "<available_skills>\n<!-- No skills directory found -->\n</available_skills>\n"
|
|
85
|
+
|
|
86
|
+
skills_entries = []
|
|
87
|
+
for skill_dir in sorted(self.skills_directory.iterdir()):
|
|
88
|
+
if not skill_dir.is_dir():
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
skill_file = skill_dir / "SKILL.md"
|
|
92
|
+
if not skill_file.exists():
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
metadata = self._parse_skill_metadata(skill_file)
|
|
97
|
+
if metadata:
|
|
98
|
+
skill_xml = (
|
|
99
|
+
"<skill>\n"
|
|
100
|
+
f"<name>{metadata['name']}</name>\n"
|
|
101
|
+
f"<description>{metadata['description']}</description>\n"
|
|
102
|
+
"</skill>"
|
|
103
|
+
)
|
|
104
|
+
skills_entries.append(skill_xml)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"Failed to parse skill {skill_dir.name}: {e}")
|
|
107
|
+
|
|
108
|
+
if not skills_entries:
|
|
109
|
+
return "<available_skills>\n<!-- No skills found -->\n</available_skills>\n"
|
|
110
|
+
|
|
111
|
+
return "<available_skills>\n" + "\n".join(skills_entries) + "\n</available_skills>\n"
|
|
112
|
+
|
|
113
|
+
def _get_declaration(self) -> types.FunctionDeclaration:
|
|
114
|
+
return types.FunctionDeclaration(
|
|
115
|
+
name=self.name,
|
|
116
|
+
description=self.description,
|
|
117
|
+
parameters=types.Schema(
|
|
118
|
+
type=types.Type.OBJECT,
|
|
119
|
+
properties={
|
|
120
|
+
"command": types.Schema(
|
|
121
|
+
type=types.Type.STRING,
|
|
122
|
+
description='The skill name (no arguments). E.g., "data-analysis" or "pdf-processing"',
|
|
123
|
+
),
|
|
124
|
+
},
|
|
125
|
+
required=["command"],
|
|
126
|
+
),
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
async def run_async(self, *, args: Dict[str, Any], tool_context: ToolContext) -> str:
|
|
130
|
+
"""Execute skill loading by name."""
|
|
131
|
+
skill_name = args.get("command", "").strip()
|
|
132
|
+
|
|
133
|
+
if not skill_name:
|
|
134
|
+
return "Error: No skill name provided"
|
|
135
|
+
|
|
136
|
+
return self._invoke_skill(skill_name)
|
|
137
|
+
|
|
138
|
+
def _invoke_skill(self, skill_name: str) -> str:
|
|
139
|
+
"""Load and return the full content of a skill."""
|
|
140
|
+
# Check cache first
|
|
141
|
+
if skill_name in self._skill_cache:
|
|
142
|
+
return self._skill_cache[skill_name]
|
|
143
|
+
|
|
144
|
+
# Find skill directory
|
|
145
|
+
skill_dir = self.skills_directory / skill_name
|
|
146
|
+
if not skill_dir.exists() or not skill_dir.is_dir():
|
|
147
|
+
return f"Error: Skill '{skill_name}' not found. Check the available skills list in the tool description."
|
|
148
|
+
|
|
149
|
+
skill_file = skill_dir / "SKILL.md"
|
|
150
|
+
if not skill_file.exists():
|
|
151
|
+
return f"Error: Skill '{skill_name}' has no SKILL.md file."
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
with open(skill_file, "r", encoding="utf-8") as f:
|
|
155
|
+
content = f.read()
|
|
156
|
+
|
|
157
|
+
formatted_content = self._format_skill_content(skill_name, content)
|
|
158
|
+
|
|
159
|
+
# Cache the formatted content
|
|
160
|
+
self._skill_cache[skill_name] = formatted_content
|
|
161
|
+
|
|
162
|
+
return formatted_content
|
|
163
|
+
|
|
164
|
+
except Exception as e:
|
|
165
|
+
logger.error(f"Failed to load skill {skill_name}: {e}")
|
|
166
|
+
return f"Error loading skill '{skill_name}': {e}"
|
|
167
|
+
|
|
168
|
+
def _parse_skill_metadata(self, skill_file: Path) -> Dict[str, str] | None:
|
|
169
|
+
"""Parse YAML frontmatter from a SKILL.md file."""
|
|
170
|
+
try:
|
|
171
|
+
with open(skill_file, "r", encoding="utf-8") as f:
|
|
172
|
+
content = f.read()
|
|
173
|
+
|
|
174
|
+
if not content.startswith("---"):
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
parts = content.split("---", 2)
|
|
178
|
+
if len(parts) < 3:
|
|
179
|
+
return None
|
|
180
|
+
|
|
181
|
+
metadata = yaml.safe_load(parts[1])
|
|
182
|
+
if isinstance(metadata, dict) and "name" in metadata and "description" in metadata:
|
|
183
|
+
return {
|
|
184
|
+
"name": metadata["name"],
|
|
185
|
+
"description": metadata["description"],
|
|
186
|
+
}
|
|
187
|
+
return None
|
|
188
|
+
except Exception as e:
|
|
189
|
+
logger.error(f"Failed to parse metadata from {skill_file}: {e}")
|
|
190
|
+
return None
|
|
191
|
+
|
|
192
|
+
def _format_skill_content(self, skill_name: str, content: str) -> str:
|
|
193
|
+
"""Format skill content for display to the agent."""
|
|
194
|
+
header = (
|
|
195
|
+
f'<command-message>The "{skill_name}" skill is loading</command-message>\n\n'
|
|
196
|
+
f"Base directory for this skill: skills/{skill_name}\n\n"
|
|
197
|
+
)
|
|
198
|
+
footer = (
|
|
199
|
+
"\n\n---\n"
|
|
200
|
+
"The skill has been loaded. Follow the instructions above and use the bash tool to execute commands."
|
|
201
|
+
)
|
|
202
|
+
return header + content + footer
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import logging
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Optional
|
|
20
|
+
|
|
21
|
+
from google.adk.agents import BaseAgent, LlmAgent
|
|
22
|
+
from google.adk.agents.callback_context import CallbackContext
|
|
23
|
+
from google.adk.plugins import BasePlugin
|
|
24
|
+
from google.genai import types
|
|
25
|
+
|
|
26
|
+
from .bash_tool import BashTool
|
|
27
|
+
from .skill_tool import SkillsTool
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class SkillsPlugin(BasePlugin):
|
|
33
|
+
"""Convenience plugin for multi-agent apps to automatically register Skills tools.
|
|
34
|
+
|
|
35
|
+
This plugin is purely a convenience wrapper that automatically adds the SkillsTool
|
|
36
|
+
and BashTool to all LLM agents in an application. It does not add any additional
|
|
37
|
+
functionality beyond tool registration.
|
|
38
|
+
|
|
39
|
+
For single-agent use cases or when you prefer explicit control, you can skip this plugin
|
|
40
|
+
and directly add both tools to your agent's tools list.
|
|
41
|
+
|
|
42
|
+
Example:
|
|
43
|
+
# Without plugin (direct tool usage):
|
|
44
|
+
agent = Agent(
|
|
45
|
+
tools=[
|
|
46
|
+
SkillsTool(skills_directory="./skills"),
|
|
47
|
+
BashTool(skills_directory="./skills"),
|
|
48
|
+
]
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# With plugin (auto-registration for multi-agent apps):
|
|
52
|
+
app = App(
|
|
53
|
+
root_agent=agent,
|
|
54
|
+
plugins=[SkillsPlugin(skills_directory="./skills")]
|
|
55
|
+
)
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self, skills_directory: str | Path, name: str = "skills_plugin"):
|
|
59
|
+
"""Initialize the skills plugin.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
skills_directory: Path to directory containing skill folders.
|
|
63
|
+
name: Name of the plugin instance.
|
|
64
|
+
"""
|
|
65
|
+
super().__init__(name)
|
|
66
|
+
self.skills_directory = Path(skills_directory)
|
|
67
|
+
self.skills_invoke_tool = SkillsTool(skills_directory)
|
|
68
|
+
self.bash_tool = BashTool(skills_directory)
|
|
69
|
+
|
|
70
|
+
async def before_agent_callback(
|
|
71
|
+
self, *, agent: BaseAgent, callback_context: CallbackContext
|
|
72
|
+
) -> Optional[types.Content]:
|
|
73
|
+
"""Add skills tools to agents if not already present."""
|
|
74
|
+
|
|
75
|
+
if not isinstance(agent, LlmAgent):
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
existing_tool_names = {getattr(t, "name", None) for t in agent.tools}
|
|
79
|
+
|
|
80
|
+
# Add SkillsTool if not already present
|
|
81
|
+
if "skills" not in existing_tool_names:
|
|
82
|
+
agent.tools.append(self.skills_invoke_tool)
|
|
83
|
+
logger.debug(f"Added skills invoke tool to agent: {agent.name}")
|
|
84
|
+
|
|
85
|
+
# Add BashTool if not already present
|
|
86
|
+
if "bash" not in existing_tool_names:
|
|
87
|
+
agent.tools.append(self.bash_tool)
|
|
88
|
+
logger.debug(f"Added bash tool to agent: {agent.name}")
|
|
89
|
+
|
|
90
|
+
return None
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from typing_extensions import override
|
|
9
|
+
except ImportError:
|
|
10
|
+
from typing import override
|
|
11
|
+
|
|
12
|
+
from google.adk.agents.readonly_context import ReadonlyContext
|
|
13
|
+
from google.adk.tools import BaseTool
|
|
14
|
+
from google.adk.tools.base_toolset import BaseToolset
|
|
15
|
+
|
|
16
|
+
from .bash_tool import BashTool
|
|
17
|
+
from .skill_tool import SkillsTool
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SkillsToolset(BaseToolset):
|
|
23
|
+
"""Toolset that provides Skills functionality through two focused tools.
|
|
24
|
+
|
|
25
|
+
This toolset provides skills access through two complementary tools following
|
|
26
|
+
progressive disclosure:
|
|
27
|
+
1. SkillsTool - Discover and load skill instructions
|
|
28
|
+
2. BashTool - Execute commands based on skill guidance
|
|
29
|
+
|
|
30
|
+
This separation provides clear semantic distinction between skill discovery
|
|
31
|
+
(what can I do?) and skill execution (how do I do it?).
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, skills_directory: str | Path):
|
|
35
|
+
"""Initialize the skills toolset.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
skills_directory: Path to directory containing skill folders.
|
|
39
|
+
"""
|
|
40
|
+
super().__init__()
|
|
41
|
+
self.skills_directory = Path(skills_directory)
|
|
42
|
+
|
|
43
|
+
# Create the two tools for skills operations
|
|
44
|
+
self.skills_invoke_tool = SkillsTool(skills_directory)
|
|
45
|
+
self.bash_tool = BashTool(skills_directory)
|
|
46
|
+
|
|
47
|
+
@override
|
|
48
|
+
async def get_tools(self, readonly_context: Optional[ReadonlyContext] = None) -> List[BaseTool]:
|
|
49
|
+
"""Get both skills tools.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
List containing SkillsTool and BashTool.
|
|
53
|
+
"""
|
|
54
|
+
return [self.skills_invoke_tool, self.bash_tool]
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import tempfile
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, List
|
|
8
|
+
|
|
9
|
+
from typing_extensions import override
|
|
10
|
+
|
|
11
|
+
from google.adk.tools import BaseTool, ToolContext
|
|
12
|
+
from google.genai import types
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger("kagent_adk." + __name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_session_staging_path(session_id: str, app_name: str, skills_directory: Path) -> Path:
|
|
18
|
+
"""Creates (if needed) and returns the path to a session's staging directory.
|
|
19
|
+
|
|
20
|
+
This function provides a consistent, isolated filesystem environment for each
|
|
21
|
+
session. It creates a root directory for the session and populates it with
|
|
22
|
+
an 'uploads' folder and a symlink to the static 'skills' directory.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
session_id: The unique ID of the current session.
|
|
26
|
+
app_name: The name of the application, used for namespacing.
|
|
27
|
+
skills_directory: The path to the static skills directory.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
The resolved path to the session's root staging directory.
|
|
31
|
+
"""
|
|
32
|
+
base_path = Path(tempfile.gettempdir()) / "adk_sessions" / app_name
|
|
33
|
+
session_path = base_path / session_id
|
|
34
|
+
|
|
35
|
+
# Create the session and uploads directories
|
|
36
|
+
(session_path / "uploads").mkdir(parents=True, exist_ok=True)
|
|
37
|
+
|
|
38
|
+
# Symlink the static skills directory into the session directory
|
|
39
|
+
if skills_directory and skills_directory.exists():
|
|
40
|
+
skills_symlink = session_path / "skills"
|
|
41
|
+
if not skills_symlink.exists():
|
|
42
|
+
try:
|
|
43
|
+
os.symlink(
|
|
44
|
+
skills_directory.resolve(),
|
|
45
|
+
skills_symlink,
|
|
46
|
+
target_is_directory=True,
|
|
47
|
+
)
|
|
48
|
+
except OSError as e:
|
|
49
|
+
logger.error(f"Failed to create skills symlink: {e}")
|
|
50
|
+
|
|
51
|
+
return session_path.resolve()
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class StageArtifactsTool(BaseTool):
|
|
55
|
+
"""A tool to stage artifacts from the artifact service to the local filesystem.
|
|
56
|
+
|
|
57
|
+
This tool bridges the gap between the artifact store and the skills system,
|
|
58
|
+
enabling skills to work with user-uploaded files through a two-phase workflow:
|
|
59
|
+
1. Stage: Copy artifacts from artifact store to local 'uploads/' directory
|
|
60
|
+
2. Execute: Use the staged files in bash commands with skills
|
|
61
|
+
|
|
62
|
+
This is essential for the skills workflow where user-uploaded files must be
|
|
63
|
+
accessible to skill scripts and commands.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(self, skills_directory: Path):
|
|
67
|
+
super().__init__(
|
|
68
|
+
name="stage_artifacts",
|
|
69
|
+
description=(
|
|
70
|
+
"Stage artifacts from the artifact store to a local filesystem path, "
|
|
71
|
+
"making them available for use with skills and the bash tool.\n\n"
|
|
72
|
+
"WORKFLOW:\n"
|
|
73
|
+
"1. When a user uploads a file, it's stored as an artifact (e.g., 'artifact_xyz')\n"
|
|
74
|
+
"2. Use this tool to copy the artifact to your local 'uploads/' directory\n"
|
|
75
|
+
"3. Then reference the staged file path in bash commands\n\n"
|
|
76
|
+
"USAGE EXAMPLE:\n"
|
|
77
|
+
"- stage_artifacts(artifact_names=['artifact_xyz'])\n"
|
|
78
|
+
" Returns: 'Successfully staged 1 artifact(s) to: uploads/artifact_xyz'\n"
|
|
79
|
+
"- Use the returned path in bash: bash('python skills/data-analysis/scripts/process.py uploads/artifact_xyz')\n\n"
|
|
80
|
+
"PARAMETERS:\n"
|
|
81
|
+
"- artifact_names: List of artifact names to stage (required)\n"
|
|
82
|
+
"- destination_path: Target directory within session (default: 'uploads/')\n\n"
|
|
83
|
+
"BEST PRACTICES:\n"
|
|
84
|
+
"- Always stage artifacts before using them in skills\n"
|
|
85
|
+
"- Use default 'uploads/' destination for consistency\n"
|
|
86
|
+
"- Stage all artifacts at the start of your workflow\n"
|
|
87
|
+
"- Check returned paths to confirm successful staging"
|
|
88
|
+
),
|
|
89
|
+
)
|
|
90
|
+
self._skills_directory = skills_directory
|
|
91
|
+
|
|
92
|
+
def _get_declaration(self) -> types.FunctionDeclaration | None:
|
|
93
|
+
return types.FunctionDeclaration(
|
|
94
|
+
name=self.name,
|
|
95
|
+
description=self.description,
|
|
96
|
+
parameters=types.Schema(
|
|
97
|
+
type=types.Type.OBJECT,
|
|
98
|
+
properties={
|
|
99
|
+
"artifact_names": types.Schema(
|
|
100
|
+
type=types.Type.ARRAY,
|
|
101
|
+
description=(
|
|
102
|
+
"List of artifact names to stage. These are artifact identifiers "
|
|
103
|
+
"provided by the system when files are uploaded (e.g., 'artifact_abc123'). "
|
|
104
|
+
"The tool will copy each artifact from the artifact store to the destination directory."
|
|
105
|
+
),
|
|
106
|
+
items=types.Schema(type=types.Type.STRING),
|
|
107
|
+
),
|
|
108
|
+
"destination_path": types.Schema(
|
|
109
|
+
type=types.Type.STRING,
|
|
110
|
+
description=(
|
|
111
|
+
"Relative path within the session directory to save the files. "
|
|
112
|
+
"Default is 'uploads/' where user-uploaded files are conventionally stored. "
|
|
113
|
+
"Path must be within the session directory for security. "
|
|
114
|
+
"Useful for organizing different types of artifacts (e.g., 'uploads/input/', 'uploads/processed/')."
|
|
115
|
+
),
|
|
116
|
+
default="uploads/",
|
|
117
|
+
),
|
|
118
|
+
},
|
|
119
|
+
required=["artifact_names"],
|
|
120
|
+
),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
@override
|
|
124
|
+
async def run_async(self, *, args: dict[str, Any], tool_context: ToolContext) -> str:
|
|
125
|
+
artifact_names: List[str] = args.get("artifact_names", [])
|
|
126
|
+
destination_path_str: str = args.get("destination_path", "uploads/")
|
|
127
|
+
|
|
128
|
+
if not tool_context._invocation_context.artifact_service:
|
|
129
|
+
return "Error: Artifact service is not available in this context."
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
staging_root = get_session_staging_path(
|
|
133
|
+
session_id=tool_context.session.id,
|
|
134
|
+
app_name=tool_context._invocation_context.app_name,
|
|
135
|
+
skills_directory=self._skills_directory,
|
|
136
|
+
)
|
|
137
|
+
destination_dir = (staging_root / destination_path_str).resolve()
|
|
138
|
+
|
|
139
|
+
# Security: Ensure the destination is within the staging path
|
|
140
|
+
if staging_root not in destination_dir.parents and destination_dir != staging_root:
|
|
141
|
+
return f"Error: Invalid destination path '{destination_path_str}'."
|
|
142
|
+
|
|
143
|
+
destination_dir.mkdir(parents=True, exist_ok=True)
|
|
144
|
+
|
|
145
|
+
output_paths = []
|
|
146
|
+
for name in artifact_names:
|
|
147
|
+
artifact = await tool_context.load_artifact(name)
|
|
148
|
+
if artifact is None or artifact.inline_data is None:
|
|
149
|
+
logger.warning('Artifact "%s" not found or has no data, skipping', name)
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
output_file = destination_dir / name
|
|
153
|
+
output_file.write_bytes(artifact.inline_data.data)
|
|
154
|
+
relative_path = output_file.relative_to(staging_root)
|
|
155
|
+
output_paths.append(str(relative_path))
|
|
156
|
+
|
|
157
|
+
if not output_paths:
|
|
158
|
+
return "No valid artifacts were staged."
|
|
159
|
+
|
|
160
|
+
return f"Successfully staged {len(output_paths)} artifact(s) to: {', '.join(output_paths)}"
|
|
161
|
+
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error("Error staging artifacts: %s", e, exc_info=True)
|
|
164
|
+
return f"An error occurred while staging artifacts: {e}"
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
kagent/adk/__init__.py,sha256=Cam9hwhl6Z1tZ3WJIB1KATX24MwRMpiyBLYF64DTqQI,182
|
|
2
|
+
kagent/adk/_a2a.py,sha256=I95C9VgjFH7D15uOhKRexNbMqUrOTNtWgvH_dcq02kU,6568
|
|
3
|
+
kagent/adk/_agent_executor.py,sha256=J3Th_wLUN-vhd6GlunEwSffurIvx2QWnYZHCWpxNAbM,11469
|
|
4
|
+
kagent/adk/_session_service.py,sha256=6kwFS4FF10HlUyyCV2SgwxqKIJ47s0ZZ8KW-iIRDyec,5858
|
|
5
|
+
kagent/adk/_token.py,sha256=OL46m7U5vUTby1WWjVB7Jqzig4TWddzoAmLVLlfSdAg,2515
|
|
6
|
+
kagent/adk/cli.py,sha256=u-Yo8kflxUr_GzVa-v-g1FcZePLztO41_HrjCJPYsc4,3348
|
|
7
|
+
kagent/adk/types.py,sha256=kcNpRwo6XcfMtgsOchgDuslYjMY-7pSlVnglihoUKss,5685
|
|
8
|
+
kagent/adk/converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
+
kagent/adk/converters/error_mappings.py,sha256=1KUJPS8VrcaTv6yUKb5Whg-S2XX8YGJmtTIeZqnqvuw,2769
|
|
10
|
+
kagent/adk/converters/event_converter.py,sha256=WKQRqREB11TbgGp6U_--mmukvJJgew6-VEkrGBqGVA4,10519
|
|
11
|
+
kagent/adk/converters/part_converter.py,sha256=8Ej9xGRYW8YoPnExGDnEUw1beurCfkNhAvFa-LE_-VM,7512
|
|
12
|
+
kagent/adk/converters/request_converter.py,sha256=iTmTmhlnyRfuYyFi4WmpTSXPz22xjjotbe750j-CvYA,1072
|
|
13
|
+
kagent/adk/models/__init__.py,sha256=mqD0JhS9kT1rMpFNLq5-qnjstpp6lzT9xADaOfjrUKY,78
|
|
14
|
+
kagent/adk/models/_openai.py,sha256=EpZTqxAEaKhgi-98sqyQhArckWwoGh-C34fC8MyClHk,17187
|
|
15
|
+
kagent/adk/skills/README.md,sha256=L9ssnaj6FjSrlyZrhy6yEydg1Bo0mkD8MnTZemw7SWY,5580
|
|
16
|
+
kagent/adk/skills/__init__.py,sha256=mAp2RBuTkvI-sMcqbAe0lpTuJF-lLmqH3xTBwnNFicE,1005
|
|
17
|
+
kagent/adk/skills/bash_tool.py,sha256=AeIg_sXERZCfHwNGSfUUTQhWJpZEZG3ynUFYuBz4eBA,9990
|
|
18
|
+
kagent/adk/skills/skill_system_prompt.py,sha256=AU6xbpg7iHDbaOyD6y_HO5ahCPW5Wt0qlGtG91C5GDk,7183
|
|
19
|
+
kagent/adk/skills/skill_tool.py,sha256=G9AQ9L66RoZnEzoG-rpQinktS8LNJqQeyJLNtfRbklw,7786
|
|
20
|
+
kagent/adk/skills/skills_plugin.py,sha256=Uc9i2zOKUro-3h9jmX_FNbZQK9Tk7bRc2f3yt5ZmsgQ,3198
|
|
21
|
+
kagent/adk/skills/skills_toolset.py,sha256=o3qNUyEhypG27C2c4qjfRJKOP09kaF5ufIvWha28_k8,1713
|
|
22
|
+
kagent/adk/skills/stage_artifacts_tool.py,sha256=Wg53SH2MRCR3dsDlb2vLFqNGufdRHX5zjHM2BmxqXxs,7495
|
|
23
|
+
kagent_adk-0.7.2.dist-info/METADATA,sha256=31-vWsw4yZkA37vkM2a3izkvhfnP79F5GC9lL1kzppE,1013
|
|
24
|
+
kagent_adk-0.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
25
|
+
kagent_adk-0.7.2.dist-info/entry_points.txt,sha256=a1Q2Inc9L0dvXWEkwnCdf9cfXdpX5Dl2Q6DhNWNjhxw,50
|
|
26
|
+
kagent_adk-0.7.2.dist-info/RECORD,,
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
kagent/adk/__init__.py,sha256=Cam9hwhl6Z1tZ3WJIB1KATX24MwRMpiyBLYF64DTqQI,182
|
|
2
|
-
kagent/adk/_a2a.py,sha256=oSpj5fAzPPA_KP5fTNLUpFSQ2J28tLXrTCBpQSNCjq0,6309
|
|
3
|
-
kagent/adk/_agent_executor.py,sha256=U_ZxJhaGyn-FPxvoNn7L5ZQComZ-vfFG9TdCWRLoTm8,11469
|
|
4
|
-
kagent/adk/_session_service.py,sha256=6kwFS4FF10HlUyyCV2SgwxqKIJ47s0ZZ8KW-iIRDyec,5858
|
|
5
|
-
kagent/adk/_token.py,sha256=OL46m7U5vUTby1WWjVB7Jqzig4TWddzoAmLVLlfSdAg,2515
|
|
6
|
-
kagent/adk/cli.py,sha256=_yluK6p7sbUld5juWCyIu3E8U1_Zys7eWxBabcHXXIo,3251
|
|
7
|
-
kagent/adk/types.py,sha256=kcNpRwo6XcfMtgsOchgDuslYjMY-7pSlVnglihoUKss,5685
|
|
8
|
-
kagent/adk/converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
-
kagent/adk/converters/error_mappings.py,sha256=1KUJPS8VrcaTv6yUKb5Whg-S2XX8YGJmtTIeZqnqvuw,2769
|
|
10
|
-
kagent/adk/converters/event_converter.py,sha256=WKQRqREB11TbgGp6U_--mmukvJJgew6-VEkrGBqGVA4,10519
|
|
11
|
-
kagent/adk/converters/part_converter.py,sha256=8Ej9xGRYW8YoPnExGDnEUw1beurCfkNhAvFa-LE_-VM,7512
|
|
12
|
-
kagent/adk/converters/request_converter.py,sha256=iTmTmhlnyRfuYyFi4WmpTSXPz22xjjotbe750j-CvYA,1072
|
|
13
|
-
kagent/adk/models/__init__.py,sha256=mqD0JhS9kT1rMpFNLq5-qnjstpp6lzT9xADaOfjrUKY,78
|
|
14
|
-
kagent/adk/models/_openai.py,sha256=EpZTqxAEaKhgi-98sqyQhArckWwoGh-C34fC8MyClHk,17187
|
|
15
|
-
kagent_adk-0.7.1.dist-info/METADATA,sha256=SwjFy_fdXmSgvtWm1W3p-l9LRnZqL6mubQKPnuV4Asw,1013
|
|
16
|
-
kagent_adk-0.7.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
17
|
-
kagent_adk-0.7.1.dist-info/entry_points.txt,sha256=a1Q2Inc9L0dvXWEkwnCdf9cfXdpX5Dl2Q6DhNWNjhxw,50
|
|
18
|
-
kagent_adk-0.7.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|