kagent-skills 0.7.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kagent_skills-0.7.13/PKG-INFO +19 -0
- kagent_skills-0.7.13/README.md +5 -0
- kagent_skills-0.7.13/pyproject.toml +35 -0
- kagent_skills-0.7.13/setup.cfg +4 -0
- kagent_skills-0.7.13/src/kagent/skills/__init__.py +38 -0
- kagent_skills-0.7.13/src/kagent/skills/discovery.py +80 -0
- kagent_skills-0.7.13/src/kagent/skills/models.py +20 -0
- kagent_skills-0.7.13/src/kagent/skills/prompts.py +116 -0
- kagent_skills-0.7.13/src/kagent/skills/session.py +97 -0
- kagent_skills-0.7.13/src/kagent/skills/shell.py +175 -0
- kagent_skills-0.7.13/src/kagent/tests/unittests/test_skill_execution.py +142 -0
- kagent_skills-0.7.13/src/kagent_skills.egg-info/PKG-INFO +19 -0
- kagent_skills-0.7.13/src/kagent_skills.egg-info/SOURCES.txt +14 -0
- kagent_skills-0.7.13/src/kagent_skills.egg-info/dependency_links.txt +1 -0
- kagent_skills-0.7.13/src/kagent_skills.egg-info/requires.txt +8 -0
- kagent_skills-0.7.13/src/kagent_skills.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kagent-skills
|
|
3
|
+
Version: 0.7.13
|
|
4
|
+
Summary: Core library for discovering and loading KAgent skills.
|
|
5
|
+
Requires-Python: >=3.13
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: pydantic>=2.0.0
|
|
8
|
+
Requires-Dist: pyyaml>=6.0
|
|
9
|
+
Provides-Extra: dev
|
|
10
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
11
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
12
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
13
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
14
|
+
|
|
15
|
+
# KAgent Skills
|
|
16
|
+
|
|
17
|
+
Core library for discovering, parsing, and loading KAgent skills from the filesystem.
|
|
18
|
+
|
|
19
|
+
For example usage, see `kagent-adk` and `kagent-openai` packages.
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "kagent-skills"
|
|
3
|
+
version = "0.7.13"
|
|
4
|
+
description = "Core library for discovering and loading KAgent skills."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.13"
|
|
7
|
+
dependencies = [
|
|
8
|
+
"pydantic>=2.0.0",
|
|
9
|
+
"pyyaml>=6.0"
|
|
10
|
+
]
|
|
11
|
+
|
|
12
|
+
[project.optional-dependencies]
|
|
13
|
+
dev = [
|
|
14
|
+
"pytest>=7.0.0",
|
|
15
|
+
"pytest-asyncio>=0.21.0",
|
|
16
|
+
"black>=23.0.0",
|
|
17
|
+
"ruff>=0.1.0",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
[build-system]
|
|
21
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
22
|
+
build-backend = "setuptools.build_meta"
|
|
23
|
+
|
|
24
|
+
[tool.hatch.build.targets.wheel]
|
|
25
|
+
packages = ["src/kagent"]
|
|
26
|
+
|
|
27
|
+
[tool.black]
|
|
28
|
+
line-length = 120
|
|
29
|
+
target-version = ['py313']
|
|
30
|
+
|
|
31
|
+
[tool.ruff]
|
|
32
|
+
line-length = 120
|
|
33
|
+
target-version = "py313"
|
|
34
|
+
lint.select = ["E", "F", "I", "N", "W", "UP"]
|
|
35
|
+
lint.ignore = ["E501", "N803", "N806"]
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from .discovery import discover_skills, load_skill_content
|
|
2
|
+
from .models import Skill
|
|
3
|
+
from .prompts import (
|
|
4
|
+
generate_skills_tool_description,
|
|
5
|
+
get_bash_description,
|
|
6
|
+
get_edit_file_description,
|
|
7
|
+
get_read_file_description,
|
|
8
|
+
get_write_file_description,
|
|
9
|
+
)
|
|
10
|
+
from .session import (
|
|
11
|
+
clear_session_cache,
|
|
12
|
+
get_session_path,
|
|
13
|
+
initialize_session_path,
|
|
14
|
+
)
|
|
15
|
+
from .shell import (
|
|
16
|
+
edit_file_content,
|
|
17
|
+
execute_command,
|
|
18
|
+
read_file_content,
|
|
19
|
+
write_file_content,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"discover_skills",
|
|
24
|
+
"load_skill_content",
|
|
25
|
+
"Skill",
|
|
26
|
+
"read_file_content",
|
|
27
|
+
"write_file_content",
|
|
28
|
+
"edit_file_content",
|
|
29
|
+
"execute_command",
|
|
30
|
+
"generate_skills_tool_description",
|
|
31
|
+
"get_read_file_description",
|
|
32
|
+
"get_write_file_description",
|
|
33
|
+
"get_edit_file_description",
|
|
34
|
+
"get_bash_description",
|
|
35
|
+
"initialize_session_path",
|
|
36
|
+
"get_session_path",
|
|
37
|
+
"clear_session_cache",
|
|
38
|
+
]
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import yaml
|
|
7
|
+
|
|
8
|
+
from .models import Skill
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def parse_skill_metadata(skill_file: Path) -> dict[str, str] | None:
|
|
14
|
+
"""Parse YAML frontmatter from a SKILL.md file."""
|
|
15
|
+
try:
|
|
16
|
+
with open(skill_file, encoding="utf-8") as f:
|
|
17
|
+
content = f.read()
|
|
18
|
+
|
|
19
|
+
if not content.startswith("---"):
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
parts = content.split("---", 2)
|
|
23
|
+
if len(parts) < 3:
|
|
24
|
+
return None
|
|
25
|
+
|
|
26
|
+
metadata = yaml.safe_load(parts[1])
|
|
27
|
+
if isinstance(metadata, dict) and "name" in metadata and "description" in metadata:
|
|
28
|
+
return {
|
|
29
|
+
"name": metadata["name"],
|
|
30
|
+
"description": metadata["description"],
|
|
31
|
+
}
|
|
32
|
+
return None
|
|
33
|
+
except Exception as e:
|
|
34
|
+
logger.error(f"Failed to parse metadata from {skill_file}: {e}")
|
|
35
|
+
return None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def discover_skills(skills_directory: Path) -> list[Skill]:
|
|
39
|
+
"""Discover available skills and return their metadata."""
|
|
40
|
+
if not skills_directory.exists():
|
|
41
|
+
logger.warning(f"Skills directory not found: {skills_directory}")
|
|
42
|
+
return []
|
|
43
|
+
|
|
44
|
+
skills = []
|
|
45
|
+
for skill_dir in sorted(skills_directory.iterdir()):
|
|
46
|
+
if not skill_dir.is_dir():
|
|
47
|
+
continue
|
|
48
|
+
|
|
49
|
+
skill_file = skill_dir / "SKILL.md"
|
|
50
|
+
if not skill_file.exists():
|
|
51
|
+
continue
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
metadata = parse_skill_metadata(skill_file)
|
|
55
|
+
if metadata:
|
|
56
|
+
skills.append(Skill(**metadata))
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.error(f"Failed to parse skill {skill_dir.name}: {e}")
|
|
59
|
+
|
|
60
|
+
return skills
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def load_skill_content(skills_directory: Path, skill_name: str) -> str:
|
|
64
|
+
"""Load and return the full content of a skill's SKILL.md file."""
|
|
65
|
+
# Find skill directory
|
|
66
|
+
skill_dir = skills_directory / skill_name
|
|
67
|
+
if not skill_dir.exists() or not skill_dir.is_dir():
|
|
68
|
+
raise FileNotFoundError(f"Skill '{skill_name}' not found in {skills_directory}")
|
|
69
|
+
|
|
70
|
+
skill_file = skill_dir / "SKILL.md"
|
|
71
|
+
if not skill_file.exists():
|
|
72
|
+
raise FileNotFoundError(f"Skill '{skill_name}' has no SKILL.md file in {skill_dir}")
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
with open(skill_file, encoding="utf-8") as f:
|
|
76
|
+
content = f.read()
|
|
77
|
+
return content
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Failed to load skill {skill_name}: {e}")
|
|
80
|
+
raise OSError(f"Error loading skill '{skill_name}': {e}") from e
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Skill(BaseModel):
|
|
7
|
+
"""Represents the metadata for a skill.
|
|
8
|
+
|
|
9
|
+
This is a simple data container used during the initial skill discovery
|
|
10
|
+
phase to hold the information parsed from a skill's SKILL.md frontmatter.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
name: str
|
|
14
|
+
"""The unique name/identifier of the skill."""
|
|
15
|
+
|
|
16
|
+
description: str
|
|
17
|
+
"""A description of what the skill does and when to use it."""
|
|
18
|
+
|
|
19
|
+
license: str | None = None
|
|
20
|
+
"""Optional license information for the skill."""
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
from .models import Skill
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def generate_skills_xml(skills: list[Skill]) -> str:
|
|
5
|
+
"""Formats a list of skills into an XML block for tool descriptions."""
|
|
6
|
+
if not skills:
|
|
7
|
+
return "<available_skills>\n<!-- No skills found -->\n</available_skills>"
|
|
8
|
+
|
|
9
|
+
skills_entries = []
|
|
10
|
+
for skill in skills:
|
|
11
|
+
skill_xml = f"<skill>\n<name>{skill.name}</name>\n<description>{skill.description}</description>\n</skill>"
|
|
12
|
+
skills_entries.append(skill_xml)
|
|
13
|
+
|
|
14
|
+
return "<available_skills>\n" + "\n".join(skills_entries) + "\n</available_skills>"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate_skills_tool_description(skills: list[Skill]) -> str:
|
|
18
|
+
"""Generates the full, standardized description for the 'skills' tool."""
|
|
19
|
+
skills_xml = generate_skills_xml(skills)
|
|
20
|
+
|
|
21
|
+
# This description is based on the ADK version, which is the source of truth.
|
|
22
|
+
description = f"""Execute a skill within the main conversation
|
|
23
|
+
|
|
24
|
+
<skills_instructions>
|
|
25
|
+
When users ask you to perform tasks, check if any of the available skills below can help complete the task more effectively. Skills provide specialized capabilities and domain knowledge.
|
|
26
|
+
|
|
27
|
+
How to use skills:
|
|
28
|
+
- Invoke skills using this tool with the skill name only (no arguments)
|
|
29
|
+
- When you invoke a skill, the skill's full SKILL.md will load with detailed instructions
|
|
30
|
+
- Follow the skill's instructions and use the bash tool to execute commands
|
|
31
|
+
- Examples:
|
|
32
|
+
- command: \"data-analysis\" - invoke the data-analysis skill
|
|
33
|
+
- command: \"pdf-processing\" - invoke the pdf-processing skill
|
|
34
|
+
|
|
35
|
+
Important:
|
|
36
|
+
- Only use skills listed in <available_skills> below
|
|
37
|
+
- Do not invoke a skill that is already loaded in the conversation
|
|
38
|
+
- After loading a skill, use the bash tool for execution
|
|
39
|
+
- If not specified, scripts are located in the skill-name/scripts subdirectory
|
|
40
|
+
</skills_instructions>
|
|
41
|
+
|
|
42
|
+
{skills_xml}
|
|
43
|
+
"""
|
|
44
|
+
return description
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_read_file_description() -> str:
|
|
48
|
+
"""Returns the standardized description for the read_file tool."""
|
|
49
|
+
return """Reads a file from the filesystem with line numbers.
|
|
50
|
+
|
|
51
|
+
Usage:
|
|
52
|
+
- Provide a path to the file (absolute or relative to your working directory)
|
|
53
|
+
- Returns content with line numbers (format: LINE_NUMBER|CONTENT)
|
|
54
|
+
- Optional offset and limit parameters for reading specific line ranges
|
|
55
|
+
- Lines longer than 2000 characters are truncated
|
|
56
|
+
- Always read a file before editing it
|
|
57
|
+
- You can read from skills/ directory, uploads/, outputs/, or any file in your session
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def get_write_file_description() -> str:
|
|
62
|
+
"""Returns the standardized description for the write_file tool."""
|
|
63
|
+
return """Writes content to a file on the filesystem.
|
|
64
|
+
|
|
65
|
+
Usage:
|
|
66
|
+
- Provide a path (absolute or relative to working directory) and content to write
|
|
67
|
+
- Overwrites existing files
|
|
68
|
+
- Creates parent directories if needed
|
|
69
|
+
- For existing files, read them first using read_file
|
|
70
|
+
- Prefer editing existing files over writing new ones
|
|
71
|
+
- You can write to your working directory, outputs/, or any writable location
|
|
72
|
+
- Note: skills/ directory is read-only
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def get_edit_file_description() -> str:
|
|
77
|
+
"""Returns the standardized description for the edit_file tool."""
|
|
78
|
+
return """Performs exact string replacements in files.
|
|
79
|
+
|
|
80
|
+
Usage:
|
|
81
|
+
- You must read the file first using read_file
|
|
82
|
+
- Provide path (absolute or relative to working directory)
|
|
83
|
+
- When editing, preserve exact indentation from the file content
|
|
84
|
+
- Do NOT include line number prefixes in old_string or new_string
|
|
85
|
+
- old_string must be unique unless replace_all=true
|
|
86
|
+
- Use replace_all to rename variables/strings throughout the file
|
|
87
|
+
- old_string and new_string must be different
|
|
88
|
+
- Note: skills/ directory is read-only
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def get_bash_description() -> str:
|
|
93
|
+
"""Returns the standardized description for the bash tool."""
|
|
94
|
+
# This combines the useful parts from both ADK and OpenAI descriptions
|
|
95
|
+
return """Execute bash commands in the skills environment with sandbox protection.
|
|
96
|
+
|
|
97
|
+
Working Directory & Structure:
|
|
98
|
+
- Commands run in a temporary session directory: /tmp/kagent/{session_id}/
|
|
99
|
+
- /skills -> All skills are available here (read-only).
|
|
100
|
+
- Your current working directory and /skills are added to PYTHONPATH.
|
|
101
|
+
|
|
102
|
+
Python Imports (CRITICAL):
|
|
103
|
+
- To import from a skill, use the name of the skill.
|
|
104
|
+
Example: from skills_name.module import function
|
|
105
|
+
- If the skills name contains a dash '-', you need to use importlib to import it.
|
|
106
|
+
Example:
|
|
107
|
+
import importlib
|
|
108
|
+
skill_module = importlib.import_module('skill-name.module')
|
|
109
|
+
|
|
110
|
+
For file operations:
|
|
111
|
+
- Use read_file, write_file, and edit_file for interacting with the filesystem.
|
|
112
|
+
|
|
113
|
+
Timeouts:
|
|
114
|
+
- python scripts: 60s
|
|
115
|
+
- other commands: 30s
|
|
116
|
+
"""
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Manages isolated filesystem paths for agent sessions."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import tempfile
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
# Cache of initialized session paths to avoid re-creating symlinks
|
|
10
|
+
_session_path_cache: dict[str, Path] = {}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def initialize_session_path(session_id: str, skills_directory: str) -> Path:
|
|
14
|
+
"""Initialize a session's working directory with skills symlink.
|
|
15
|
+
|
|
16
|
+
Creates the directory structure and symlink to the skills directory.
|
|
17
|
+
|
|
18
|
+
Directory structure:
|
|
19
|
+
/tmp/kagent/{session_id}/
|
|
20
|
+
├── skills/ -> symlink to skills_directory (read-only shared skills)
|
|
21
|
+
├── uploads/ -> staged user files (temporary)
|
|
22
|
+
└── outputs/ -> generated files for return
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
session_id: The unique ID of the current session.
|
|
26
|
+
skills_directory: Path to the shared skills directory.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
The resolved path to the session's root directory.
|
|
30
|
+
"""
|
|
31
|
+
# Return cached path if already initialized
|
|
32
|
+
if session_id in _session_path_cache:
|
|
33
|
+
return _session_path_cache[session_id]
|
|
34
|
+
|
|
35
|
+
# Initialize new session path
|
|
36
|
+
base_path = Path(tempfile.gettempdir()) / "kagent"
|
|
37
|
+
session_path = base_path / session_id
|
|
38
|
+
|
|
39
|
+
# Create working directories
|
|
40
|
+
(session_path / "uploads").mkdir(parents=True, exist_ok=True)
|
|
41
|
+
(session_path / "outputs").mkdir(parents=True, exist_ok=True)
|
|
42
|
+
|
|
43
|
+
# Create symlink to skills directory
|
|
44
|
+
skills_mount = Path(skills_directory)
|
|
45
|
+
skills_link = session_path / "skills"
|
|
46
|
+
if skills_mount.exists() and not skills_link.exists():
|
|
47
|
+
try:
|
|
48
|
+
skills_link.symlink_to(skills_mount)
|
|
49
|
+
logger.debug(f"Created symlink: {skills_link} -> {skills_mount}")
|
|
50
|
+
except FileExistsError:
|
|
51
|
+
# Symlink already exists (race condition from concurrent session setup)
|
|
52
|
+
pass
|
|
53
|
+
except Exception as e:
|
|
54
|
+
# Log but don't fail - skills can still be accessed via absolute path
|
|
55
|
+
logger.warning(f"Failed to create skills symlink for session {session_id}: {e}")
|
|
56
|
+
|
|
57
|
+
# Cache and return
|
|
58
|
+
resolved_path = session_path.resolve()
|
|
59
|
+
_session_path_cache[session_id] = resolved_path
|
|
60
|
+
return resolved_path
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_session_path(session_id: str) -> Path:
|
|
64
|
+
"""Get the working directory path for a session.
|
|
65
|
+
|
|
66
|
+
This function retrieves the cached session path. If the session hasn't been
|
|
67
|
+
initialized, it falls back to auto-initialization with default /skills directory.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
session_id: The unique ID of the current session.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
The resolved path to the session's root directory.
|
|
74
|
+
"""
|
|
75
|
+
# Return cached path if already initialized
|
|
76
|
+
if session_id in _session_path_cache:
|
|
77
|
+
return _session_path_cache[session_id]
|
|
78
|
+
|
|
79
|
+
# Fallback: auto-initialize with default /skills
|
|
80
|
+
logger.warning(
|
|
81
|
+
f"Session {session_id} not initialized. "
|
|
82
|
+
f"Auto-initializing with default /skills. "
|
|
83
|
+
f"For custom skills directories, ensure the executor performs initialization."
|
|
84
|
+
)
|
|
85
|
+
return initialize_session_path(session_id, "/skills")
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def clear_session_cache(session_id: str | None = None) -> None:
|
|
89
|
+
"""Clear cached session path(s).
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
session_id: Specific session to clear. If None, clears all cached sessions.
|
|
93
|
+
"""
|
|
94
|
+
if session_id:
|
|
95
|
+
_session_path_cache.pop(session_id, None)
|
|
96
|
+
else:
|
|
97
|
+
_session_path_cache.clear()
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""Core, framework-agnostic logic for system tools (file and shell operations)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# --- File Operation Tools ---
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def read_file_content(
|
|
17
|
+
file_path: Path,
|
|
18
|
+
offset: int | None = None,
|
|
19
|
+
limit: int | None = None,
|
|
20
|
+
) -> str:
|
|
21
|
+
"""Reads a file with line numbers, raising errors on failure."""
|
|
22
|
+
if not file_path.exists():
|
|
23
|
+
raise FileNotFoundError(f"File not found: {file_path}")
|
|
24
|
+
|
|
25
|
+
if not file_path.is_file():
|
|
26
|
+
raise IsADirectoryError(f"Path is not a file: {file_path}")
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
lines = file_path.read_text(encoding="utf-8").splitlines()
|
|
30
|
+
except Exception as e:
|
|
31
|
+
raise OSError(f"Error reading file {file_path}: {e}") from e
|
|
32
|
+
|
|
33
|
+
start = (offset - 1) if offset and offset > 0 else 0
|
|
34
|
+
end = (start + limit) if limit else len(lines)
|
|
35
|
+
|
|
36
|
+
result_lines = []
|
|
37
|
+
for i, line in enumerate(lines[start:end], start=start + 1):
|
|
38
|
+
if len(line) > 2000:
|
|
39
|
+
line = line[:2000] + "..."
|
|
40
|
+
result_lines.append(f"{i:6d}|{line}")
|
|
41
|
+
|
|
42
|
+
if not result_lines:
|
|
43
|
+
return "File is empty."
|
|
44
|
+
|
|
45
|
+
return "\n".join(result_lines)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def write_file_content(file_path: Path, content: str) -> str:
|
|
49
|
+
"""Writes content to a file, creating parent directories if needed."""
|
|
50
|
+
try:
|
|
51
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
52
|
+
file_path.write_text(content, encoding="utf-8")
|
|
53
|
+
logger.info(f"Successfully wrote to {file_path}")
|
|
54
|
+
return f"Successfully wrote to {file_path}"
|
|
55
|
+
except Exception as e:
|
|
56
|
+
raise OSError(f"Error writing file {file_path}: {e}") from e
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def edit_file_content(
|
|
60
|
+
file_path: Path,
|
|
61
|
+
old_string: str,
|
|
62
|
+
new_string: str,
|
|
63
|
+
replace_all: bool = False,
|
|
64
|
+
) -> str:
|
|
65
|
+
"""Performs an exact string replacement in a file."""
|
|
66
|
+
if old_string == new_string:
|
|
67
|
+
raise ValueError("old_string and new_string must be different")
|
|
68
|
+
|
|
69
|
+
if not file_path.exists():
|
|
70
|
+
raise FileNotFoundError(f"File not found: {file_path}")
|
|
71
|
+
|
|
72
|
+
if not file_path.is_file():
|
|
73
|
+
raise IsADirectoryError(f"Path is not a file: {file_path}")
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
content = file_path.read_text(encoding="utf-8")
|
|
77
|
+
except Exception as e:
|
|
78
|
+
raise OSError(f"Error reading file {file_path}: {e}") from e
|
|
79
|
+
|
|
80
|
+
if old_string not in content:
|
|
81
|
+
raise ValueError(f"old_string not found in {file_path}")
|
|
82
|
+
|
|
83
|
+
count = content.count(old_string)
|
|
84
|
+
if not replace_all and count > 1:
|
|
85
|
+
raise ValueError(
|
|
86
|
+
f"old_string appears {count} times in {file_path}. Provide more context or set replace_all=true."
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
if replace_all:
|
|
90
|
+
new_content = content.replace(old_string, new_string)
|
|
91
|
+
else:
|
|
92
|
+
new_content = content.replace(old_string, new_string, 1)
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
file_path.write_text(new_content, encoding="utf-8")
|
|
96
|
+
logger.info(f"Successfully replaced {count} occurrence(s) in {file_path}")
|
|
97
|
+
return f"Successfully replaced {count} occurrence(s) in {file_path}"
|
|
98
|
+
except Exception as e:
|
|
99
|
+
raise OSError(f"Error writing file {file_path}: {e}") from e
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# --- Shell Operation Tools ---
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _get_command_timeout_seconds(command: str) -> float:
|
|
106
|
+
"""Determine appropriate timeout for a command."""
|
|
107
|
+
if "python " in command or "python3 " in command:
|
|
108
|
+
return 60.0 # 1 minute for python scripts
|
|
109
|
+
else:
|
|
110
|
+
return 30.0 # 30 seconds for other commands
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
async def execute_command(
|
|
114
|
+
command: str,
|
|
115
|
+
working_dir: Path,
|
|
116
|
+
) -> str:
|
|
117
|
+
"""Executes a shell command in a sandboxed environment."""
|
|
118
|
+
timeout = _get_command_timeout_seconds(command)
|
|
119
|
+
|
|
120
|
+
env = os.environ.copy()
|
|
121
|
+
# Add skills directory and working directory to PYTHONPATH
|
|
122
|
+
pythonpath_additions = [str(working_dir), "/skills"]
|
|
123
|
+
if "PYTHONPATH" in env:
|
|
124
|
+
pythonpath_additions.append(env["PYTHONPATH"])
|
|
125
|
+
env["PYTHONPATH"] = ":".join(pythonpath_additions)
|
|
126
|
+
|
|
127
|
+
# If a separate venv for shell commands is specified, use its python and pip
|
|
128
|
+
# Otherwise the system python/pip will be used for backward compatibility
|
|
129
|
+
bash_venv_path = os.environ.get("BASH_VENV_PATH")
|
|
130
|
+
if bash_venv_path:
|
|
131
|
+
bash_venv_bin = os.path.join(bash_venv_path, "bin")
|
|
132
|
+
# Prepend bash venv to PATH so its python and pip are used
|
|
133
|
+
env["PATH"] = f"{bash_venv_bin}:{env.get('PATH', '')}"
|
|
134
|
+
env["VIRTUAL_ENV"] = bash_venv_path
|
|
135
|
+
|
|
136
|
+
sandboxed_command = f'srt "{command}"'
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
process = await asyncio.create_subprocess_shell(
|
|
140
|
+
sandboxed_command,
|
|
141
|
+
stdout=asyncio.subprocess.PIPE,
|
|
142
|
+
stderr=asyncio.subprocess.PIPE,
|
|
143
|
+
cwd=working_dir,
|
|
144
|
+
env=env,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
|
|
149
|
+
except TimeoutError:
|
|
150
|
+
process.kill()
|
|
151
|
+
await process.wait()
|
|
152
|
+
return f"Error: Command timed out after {timeout}s"
|
|
153
|
+
|
|
154
|
+
stdout_str = stdout.decode("utf-8", errors="replace") if stdout else ""
|
|
155
|
+
stderr_str = stderr.decode("utf-8", errors="replace") if stderr else ""
|
|
156
|
+
|
|
157
|
+
if process.returncode != 0:
|
|
158
|
+
error_msg = f"Command failed with exit code {process.returncode}"
|
|
159
|
+
if stderr_str:
|
|
160
|
+
error_msg += f":\n{stderr_str}"
|
|
161
|
+
elif stdout_str:
|
|
162
|
+
error_msg += f":\n{stdout_str}"
|
|
163
|
+
return error_msg
|
|
164
|
+
|
|
165
|
+
output = stdout_str
|
|
166
|
+
if stderr_str and "WARNING" not in stderr_str:
|
|
167
|
+
output += f"\n{stderr_str}"
|
|
168
|
+
|
|
169
|
+
logger.info(f"Command executed successfully: {output}")
|
|
170
|
+
|
|
171
|
+
return output.strip() if output.strip() else "Command completed successfully."
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logger.error(f"Error executing command: {e}")
|
|
175
|
+
return f"Error: {e}"
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import shutil
|
|
3
|
+
import tempfile
|
|
4
|
+
import textwrap
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
from kagent.skills import (
|
|
10
|
+
discover_skills,
|
|
11
|
+
execute_command,
|
|
12
|
+
load_skill_content,
|
|
13
|
+
read_file_content,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.fixture
|
|
18
|
+
def skill_test_env() -> Path:
|
|
19
|
+
"""
|
|
20
|
+
Creates a temporary environment that mimics a real session and ensures cleanup.
|
|
21
|
+
|
|
22
|
+
This fixture manually creates and deletes the temporary directory structure
|
|
23
|
+
to guarantee that no files are left behind after the test run.
|
|
24
|
+
"""
|
|
25
|
+
# 1. Create a single top-level temporary directory
|
|
26
|
+
top_level_dir = Path(tempfile.mkdtemp())
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
session_dir = top_level_dir / "session"
|
|
30
|
+
skills_root_dir = top_level_dir / "skills_root"
|
|
31
|
+
|
|
32
|
+
# 2. Create session directories
|
|
33
|
+
(session_dir / "uploads").mkdir(parents=True, exist_ok=True)
|
|
34
|
+
(session_dir / "outputs").mkdir(parents=True, exist_ok=True)
|
|
35
|
+
|
|
36
|
+
# 3. Create the skill to be tested
|
|
37
|
+
skill_dir = skills_root_dir / "csv-to-json"
|
|
38
|
+
script_dir = skill_dir / "scripts"
|
|
39
|
+
script_dir.mkdir(parents=True, exist_ok=True)
|
|
40
|
+
|
|
41
|
+
# SKILL.md
|
|
42
|
+
(skill_dir / "SKILL.md").write_text(
|
|
43
|
+
textwrap.dedent("""\
|
|
44
|
+
---
|
|
45
|
+
name: csv-to-json
|
|
46
|
+
description: Converts a CSV file to a JSON file.
|
|
47
|
+
---
|
|
48
|
+
# CSV to JSON Conversion
|
|
49
|
+
Use the `convert.py` script to convert a CSV file from the `uploads` directory
|
|
50
|
+
to a JSON file in the `outputs` directory.
|
|
51
|
+
Example: `bash("python skills/csv-to-json/scripts/convert.py uploads/data.csv outputs/result.json")`
|
|
52
|
+
""")
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Python script for the skill
|
|
56
|
+
(script_dir / "convert.py").write_text(
|
|
57
|
+
textwrap.dedent("""
|
|
58
|
+
import csv
|
|
59
|
+
import json
|
|
60
|
+
import sys
|
|
61
|
+
if len(sys.argv) != 3:
|
|
62
|
+
print(f"Usage: python {sys.argv[0]} <input_csv> <output_json>")
|
|
63
|
+
sys.exit(1)
|
|
64
|
+
input_path, output_path = sys.argv[1], sys.argv[2]
|
|
65
|
+
try:
|
|
66
|
+
data = []
|
|
67
|
+
with open(input_path, 'r', encoding='utf-8') as f:
|
|
68
|
+
reader = csv.DictReader(f)
|
|
69
|
+
for row in reader:
|
|
70
|
+
data.append(row)
|
|
71
|
+
with open(output_path, 'w', encoding='utf-8') as f:
|
|
72
|
+
json.dump(data, f, indent=2)
|
|
73
|
+
print(f"Successfully converted {input_path} to {output_path}")
|
|
74
|
+
except FileNotFoundError:
|
|
75
|
+
print(f"Error: Input file not found at {input_path}")
|
|
76
|
+
sys.exit(1)
|
|
77
|
+
""")
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# 4. Create a symlink from the session to the skills root
|
|
81
|
+
(session_dir / "skills").symlink_to(skills_root_dir, target_is_directory=True)
|
|
82
|
+
|
|
83
|
+
# 5. Yield the session directory path to the test
|
|
84
|
+
yield session_dir
|
|
85
|
+
|
|
86
|
+
finally:
|
|
87
|
+
# 6. Explicitly clean up the entire temporary directory
|
|
88
|
+
shutil.rmtree(top_level_dir)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@pytest.mark.asyncio
|
|
92
|
+
async def test_skill_core_logic(skill_test_env: Path):
|
|
93
|
+
"""
|
|
94
|
+
Tests the core logic of the 'csv-to-json' skill by directly
|
|
95
|
+
calling the centralized tool functions.
|
|
96
|
+
"""
|
|
97
|
+
session_dir = skill_test_env
|
|
98
|
+
|
|
99
|
+
# 1. "Upload" a file for the skill to process
|
|
100
|
+
input_csv_path = session_dir / "uploads" / "data.csv"
|
|
101
|
+
input_csv_path.write_text("id,name\n1,Alice\n2,Bob\n")
|
|
102
|
+
|
|
103
|
+
# 2. Execute the skill's core command, just as an agent would
|
|
104
|
+
# We use the centralized `execute_command` function directly
|
|
105
|
+
command = "python skills/csv-to-json/scripts/convert.py uploads/data.csv outputs/result.json"
|
|
106
|
+
result = await execute_command(command, working_dir=session_dir)
|
|
107
|
+
|
|
108
|
+
assert "Successfully converted" in result
|
|
109
|
+
|
|
110
|
+
# 3. Verify the output by reading the generated file
|
|
111
|
+
# We use the centralized `read_file_content` function directly
|
|
112
|
+
output_json_path = session_dir / "outputs" / "result.json"
|
|
113
|
+
|
|
114
|
+
# The read_file_content function returns a string with line numbers,
|
|
115
|
+
# so we need to parse it.
|
|
116
|
+
raw_output = read_file_content(output_json_path)
|
|
117
|
+
json_content_str = "\n".join(line.split("|", 1)[1] for line in raw_output.splitlines())
|
|
118
|
+
|
|
119
|
+
# Assert the content is correct
|
|
120
|
+
expected_data = [{"id": "1", "name": "Alice"}, {"id": "2", "name": "Bob"}]
|
|
121
|
+
assert json.loads(json_content_str) == expected_data
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def test_skill_discovery_and_loading(skill_test_env: Path):
|
|
125
|
+
"""
|
|
126
|
+
Tests the core logic of discovering a skill and loading its instructions.
|
|
127
|
+
"""
|
|
128
|
+
# The fixture creates the session dir, the skills are one level up in a separate dir
|
|
129
|
+
skills_root_dir = skill_test_env.parent / "skills_root"
|
|
130
|
+
|
|
131
|
+
# 1. Test skill discovery
|
|
132
|
+
discovered = discover_skills(skills_root_dir)
|
|
133
|
+
assert len(discovered) == 1
|
|
134
|
+
skill_meta = discovered[0]
|
|
135
|
+
assert skill_meta.name == "csv-to-json"
|
|
136
|
+
assert "Converts a CSV file" in skill_meta.description
|
|
137
|
+
|
|
138
|
+
# 2. Test skill content loading
|
|
139
|
+
skill_content = load_skill_content(skills_root_dir, "csv-to-json")
|
|
140
|
+
assert "name: csv-to-json" in skill_content
|
|
141
|
+
assert "# CSV to JSON Conversion" in skill_content
|
|
142
|
+
assert 'Example: `bash("python skills/csv-to-json/scripts/convert.py' in skill_content
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kagent-skills
|
|
3
|
+
Version: 0.7.13
|
|
4
|
+
Summary: Core library for discovering and loading KAgent skills.
|
|
5
|
+
Requires-Python: >=3.13
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: pydantic>=2.0.0
|
|
8
|
+
Requires-Dist: pyyaml>=6.0
|
|
9
|
+
Provides-Extra: dev
|
|
10
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
11
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
12
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
13
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
14
|
+
|
|
15
|
+
# KAgent Skills
|
|
16
|
+
|
|
17
|
+
Core library for discovering, parsing, and loading KAgent skills from the filesystem.
|
|
18
|
+
|
|
19
|
+
For example usage, see `kagent-adk` and `kagent-openai` packages.
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
src/kagent/skills/__init__.py
|
|
4
|
+
src/kagent/skills/discovery.py
|
|
5
|
+
src/kagent/skills/models.py
|
|
6
|
+
src/kagent/skills/prompts.py
|
|
7
|
+
src/kagent/skills/session.py
|
|
8
|
+
src/kagent/skills/shell.py
|
|
9
|
+
src/kagent/tests/unittests/test_skill_execution.py
|
|
10
|
+
src/kagent_skills.egg-info/PKG-INFO
|
|
11
|
+
src/kagent_skills.egg-info/SOURCES.txt
|
|
12
|
+
src/kagent_skills.egg-info/dependency_links.txt
|
|
13
|
+
src/kagent_skills.egg-info/requires.txt
|
|
14
|
+
src/kagent_skills.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
kagent
|