mcpkernel 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcpkernel/__init__.py +27 -0
- mcpkernel/agent_manifest/__init__.py +19 -0
- mcpkernel/agent_manifest/hooks.py +71 -0
- mcpkernel/agent_manifest/loader.py +326 -0
- mcpkernel/agent_manifest/policy_bridge.py +505 -0
- mcpkernel/agent_manifest/tool_validator.py +138 -0
- mcpkernel/audit/__init__.py +11 -0
- mcpkernel/audit/exporter.py +106 -0
- mcpkernel/audit/logger.py +203 -0
- mcpkernel/cli.py +398 -0
- mcpkernel/config.py +272 -0
- mcpkernel/context/__init__.py +14 -0
- mcpkernel/context/dependency_graph.py +102 -0
- mcpkernel/context/pruning.py +56 -0
- mcpkernel/context/reducer.py +142 -0
- mcpkernel/dee/__init__.py +16 -0
- mcpkernel/dee/drift.py +131 -0
- mcpkernel/dee/envelope.py +128 -0
- mcpkernel/dee/replay.py +83 -0
- mcpkernel/dee/snapshot.py +54 -0
- mcpkernel/dee/trace_store.py +157 -0
- mcpkernel/ebpf/__init__.py +10 -0
- mcpkernel/ebpf/probe.py +179 -0
- mcpkernel/ebpf/redirector.py +70 -0
- mcpkernel/observability/__init__.py +14 -0
- mcpkernel/observability/health.py +70 -0
- mcpkernel/observability/metrics.py +91 -0
- mcpkernel/observability/tracing.py +67 -0
- mcpkernel/policy/__init__.py +13 -0
- mcpkernel/policy/engine.py +215 -0
- mcpkernel/policy/loader.py +88 -0
- mcpkernel/proxy/__init__.py +12 -0
- mcpkernel/proxy/auth.py +87 -0
- mcpkernel/proxy/hooks.py +188 -0
- mcpkernel/proxy/interceptor.py +178 -0
- mcpkernel/proxy/rate_limit.py +85 -0
- mcpkernel/proxy/server.py +292 -0
- mcpkernel/proxy/transform.py +40 -0
- mcpkernel/sandbox/__init__.py +41 -0
- mcpkernel/sandbox/base.py +102 -0
- mcpkernel/sandbox/docker_backend.py +150 -0
- mcpkernel/sandbox/firecracker_backend.py +131 -0
- mcpkernel/sandbox/microsandbox_backend.py +101 -0
- mcpkernel/sandbox/wasm_backend.py +93 -0
- mcpkernel/taint/__init__.py +23 -0
- mcpkernel/taint/propagation.py +134 -0
- mcpkernel/taint/report.py +42 -0
- mcpkernel/taint/sinks.py +110 -0
- mcpkernel/taint/sources.py +142 -0
- mcpkernel/taint/static_analysis.py +149 -0
- mcpkernel/taint/tracker.py +121 -0
- mcpkernel/utils.py +188 -0
- mcpkernel-0.1.0.dist-info/METADATA +367 -0
- mcpkernel-0.1.0.dist-info/RECORD +57 -0
- mcpkernel-0.1.0.dist-info/WHEEL +4 -0
- mcpkernel-0.1.0.dist-info/entry_points.txt +2 -0
- mcpkernel-0.1.0.dist-info/licenses/LICENSE +190 -0
mcpkernel/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""mcpkernel — The mandatory, deterministic MCP/A2A gateway.
|
|
2
|
+
|
|
3
|
+
Turns every agent tool call into a provably replayable, taint-safe,
|
|
4
|
+
policy-enforced execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
__version__ = "0.1.0"
|
|
10
|
+
__all__ = [
|
|
11
|
+
"AuthError",
|
|
12
|
+
"ConfigError",
|
|
13
|
+
"MCPKernelError",
|
|
14
|
+
"PolicyViolation",
|
|
15
|
+
"SandboxError",
|
|
16
|
+
"TaintViolation",
|
|
17
|
+
"__version__",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
from mcpkernel.utils import (
|
|
21
|
+
AuthError,
|
|
22
|
+
ConfigError,
|
|
23
|
+
MCPKernelError,
|
|
24
|
+
PolicyViolation,
|
|
25
|
+
SandboxError,
|
|
26
|
+
TaintViolation,
|
|
27
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Agent manifest integration — load agent definitions and enforce them at runtime.
|
|
2
|
+
|
|
3
|
+
Inspired by the open gitagent specification (MIT-licensed). MCPKernel reads
|
|
4
|
+
``agent.yaml`` manifests and converts compliance declarations into runtime
|
|
5
|
+
policy rules, tool-schema validation, and proxy hooks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"AgentManifestDefinition",
|
|
12
|
+
"ToolSchemaValidator",
|
|
13
|
+
"load_agent_manifest",
|
|
14
|
+
"manifest_to_policy_rules",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
from mcpkernel.agent_manifest.loader import AgentManifestDefinition, load_agent_manifest
|
|
18
|
+
from mcpkernel.agent_manifest.policy_bridge import manifest_to_policy_rules
|
|
19
|
+
from mcpkernel.agent_manifest.tool_validator import ToolSchemaValidator
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""Proxy hook that validates tool calls against an agent manifest definition.
|
|
2
|
+
|
|
3
|
+
When active, this hook checks every MCP tool call against the agent's declared
|
|
4
|
+
tool schemas and annotations (read-only, requires_confirmation).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
from mcpkernel.agent_manifest.tool_validator import ToolSchemaValidator
|
|
12
|
+
from mcpkernel.proxy.interceptor import InterceptorContext, PluginHook
|
|
13
|
+
from mcpkernel.utils import get_logger
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from mcpkernel.agent_manifest.loader import AgentManifestDefinition
|
|
17
|
+
|
|
18
|
+
logger = get_logger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AgentManifestHook(PluginHook):
|
|
22
|
+
"""Pre-execution hook: validate tool calls against agent manifest tool schemas."""
|
|
23
|
+
|
|
24
|
+
PRIORITY = 950 # Runs after policy (1000) but before taint (900)
|
|
25
|
+
NAME = "agent_manifest"
|
|
26
|
+
|
|
27
|
+
def __init__(self, definition: AgentManifestDefinition) -> None:
|
|
28
|
+
self._definition = definition
|
|
29
|
+
self._validator = ToolSchemaValidator(definition)
|
|
30
|
+
self._allowed_tools: set[str] = set()
|
|
31
|
+
for tool in definition.tools_list:
|
|
32
|
+
self._allowed_tools.add(tool)
|
|
33
|
+
self._allowed_tools.add(tool.replace("-", "_"))
|
|
34
|
+
|
|
35
|
+
async def pre_execution(self, ctx: InterceptorContext) -> None:
|
|
36
|
+
tool_name = ctx.call.tool_name
|
|
37
|
+
|
|
38
|
+
# Check tool is declared in agent.yaml
|
|
39
|
+
if self._allowed_tools and tool_name not in self._allowed_tools:
|
|
40
|
+
ctx.aborted = True
|
|
41
|
+
ctx.abort_reason = (
|
|
42
|
+
f"agent_manifest: tool '{tool_name}' not declared in agent.yaml for agent '{self._definition.name}'"
|
|
43
|
+
)
|
|
44
|
+
logger.warning(
|
|
45
|
+
"agent_manifest hook blocked undeclared tool",
|
|
46
|
+
tool=tool_name,
|
|
47
|
+
agent=self._definition.name,
|
|
48
|
+
)
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
# Validate arguments against tool schema
|
|
52
|
+
if self._validator.has_schema(tool_name):
|
|
53
|
+
errors = self._validator.validate(tool_name, ctx.call.arguments)
|
|
54
|
+
if errors:
|
|
55
|
+
ctx.aborted = True
|
|
56
|
+
ctx.abort_reason = f"agent_manifest: schema validation failed for '{tool_name}': " + "; ".join(errors)
|
|
57
|
+
logger.warning(
|
|
58
|
+
"agent_manifest hook schema validation failed",
|
|
59
|
+
tool=tool_name,
|
|
60
|
+
errors=errors,
|
|
61
|
+
)
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
# Add annotations as metadata
|
|
65
|
+
if self._validator.requires_confirmation(tool_name):
|
|
66
|
+
ctx.extra["manifest_requires_confirmation"] = True
|
|
67
|
+
if self._validator.is_read_only(tool_name):
|
|
68
|
+
ctx.extra["manifest_read_only"] = True
|
|
69
|
+
|
|
70
|
+
ctx.extra["manifest_agent"] = self._definition.name
|
|
71
|
+
ctx.extra["manifest_version"] = self._definition.version
|
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
"""Load and parse agent manifest definitions from a repository directory.
|
|
2
|
+
|
|
3
|
+
Reads ``agent.yaml``, optional ``SOUL.md``/``RULES.md``, tool schemas from
|
|
4
|
+
``tools/*.yaml``, hooks from ``hooks/hooks.yaml``, skills directories, and the
|
|
5
|
+
compliance section to produce a structured ``AgentManifestDefinition``.
|
|
6
|
+
|
|
7
|
+
Inspired by the open gitagent specification (MIT-licensed).
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
import yaml
|
|
17
|
+
|
|
18
|
+
from mcpkernel.utils import ConfigError, get_logger
|
|
19
|
+
|
|
20
|
+
logger = get_logger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class ToolSchema:
|
|
25
|
+
"""Parsed tool definition from ``tools/<name>.yaml``."""
|
|
26
|
+
|
|
27
|
+
name: str
|
|
28
|
+
description: str = ""
|
|
29
|
+
version: str = ""
|
|
30
|
+
input_schema: dict[str, Any] = field(default_factory=dict)
|
|
31
|
+
output_schema: dict[str, Any] = field(default_factory=dict)
|
|
32
|
+
annotations: dict[str, Any] = field(default_factory=dict)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class ComplianceConfig:
|
|
37
|
+
"""Parsed ``compliance`` section of ``agent.yaml``."""
|
|
38
|
+
|
|
39
|
+
risk_tier: str = "low"
|
|
40
|
+
frameworks: list[str] = field(default_factory=list)
|
|
41
|
+
supervision: dict[str, Any] = field(default_factory=dict)
|
|
42
|
+
recordkeeping: dict[str, Any] = field(default_factory=dict)
|
|
43
|
+
model_risk: dict[str, Any] = field(default_factory=dict)
|
|
44
|
+
data_governance: dict[str, Any] = field(default_factory=dict)
|
|
45
|
+
communications: dict[str, Any] = field(default_factory=dict)
|
|
46
|
+
segregation_of_duties: dict[str, Any] = field(default_factory=dict)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class HookDefinition:
|
|
51
|
+
"""Parsed hook entry from ``hooks/hooks.yaml``."""
|
|
52
|
+
|
|
53
|
+
event: str
|
|
54
|
+
script: str
|
|
55
|
+
timeout: int = 30
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class SkillInfo:
|
|
60
|
+
"""Minimal info about a discovered skill directory."""
|
|
61
|
+
|
|
62
|
+
name: str
|
|
63
|
+
path: Path
|
|
64
|
+
has_skill_md: bool = False
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass
|
|
68
|
+
class SubAgentRef:
|
|
69
|
+
"""Reference to a sub-agent declared in ``agents`` section."""
|
|
70
|
+
|
|
71
|
+
name: str
|
|
72
|
+
role: str = ""
|
|
73
|
+
config: dict[str, Any] = field(default_factory=dict)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class AgentManifestDefinition:
|
|
78
|
+
"""Complete parsed agent manifest definition."""
|
|
79
|
+
|
|
80
|
+
name: str
|
|
81
|
+
version: str
|
|
82
|
+
description: str = ""
|
|
83
|
+
spec_version: str = ""
|
|
84
|
+
author: str = ""
|
|
85
|
+
license: str = ""
|
|
86
|
+
model: dict[str, Any] = field(default_factory=dict)
|
|
87
|
+
skills: list[str] = field(default_factory=list)
|
|
88
|
+
tools_list: list[str] = field(default_factory=list)
|
|
89
|
+
tool_schemas: list[ToolSchema] = field(default_factory=list)
|
|
90
|
+
runtime: dict[str, Any] = field(default_factory=dict)
|
|
91
|
+
compliance: ComplianceConfig | None = None
|
|
92
|
+
|
|
93
|
+
# Extended fields
|
|
94
|
+
soul_md: str = ""
|
|
95
|
+
rules_md: str = ""
|
|
96
|
+
hooks: list[HookDefinition] = field(default_factory=list)
|
|
97
|
+
skill_infos: list[SkillInfo] = field(default_factory=list)
|
|
98
|
+
sub_agents: list[SubAgentRef] = field(default_factory=list)
|
|
99
|
+
a2a: dict[str, Any] = field(default_factory=dict)
|
|
100
|
+
dependencies: list[dict[str, Any]] = field(default_factory=list)
|
|
101
|
+
delegation: dict[str, Any] = field(default_factory=dict)
|
|
102
|
+
tags: list[str] = field(default_factory=list)
|
|
103
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
104
|
+
|
|
105
|
+
raw: dict[str, Any] = field(default_factory=dict)
|
|
106
|
+
repo_path: Path | None = None
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def load_agent_manifest(repo_path: str | Path) -> AgentManifestDefinition:
|
|
110
|
+
"""Load an agent manifest definition from a repository directory.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
repo_path:
|
|
115
|
+
Path to the root of a repository containing ``agent.yaml``.
|
|
116
|
+
|
|
117
|
+
Returns
|
|
118
|
+
-------
|
|
119
|
+
AgentManifestDefinition
|
|
120
|
+
The parsed agent definition with all resolved metadata.
|
|
121
|
+
|
|
122
|
+
Raises
|
|
123
|
+
------
|
|
124
|
+
ConfigError
|
|
125
|
+
If ``agent.yaml`` is missing or invalid.
|
|
126
|
+
"""
|
|
127
|
+
repo_path = Path(repo_path)
|
|
128
|
+
manifest_path = repo_path / "agent.yaml"
|
|
129
|
+
|
|
130
|
+
if not manifest_path.exists():
|
|
131
|
+
raise ConfigError(f"No agent.yaml found in {repo_path}")
|
|
132
|
+
|
|
133
|
+
with open(manifest_path) as f:
|
|
134
|
+
raw = yaml.safe_load(f)
|
|
135
|
+
|
|
136
|
+
if not isinstance(raw, dict):
|
|
137
|
+
raise ConfigError(f"agent.yaml must be a YAML mapping: {manifest_path}")
|
|
138
|
+
|
|
139
|
+
# Required fields
|
|
140
|
+
name = raw.get("name")
|
|
141
|
+
version = raw.get("version")
|
|
142
|
+
description = raw.get("description")
|
|
143
|
+
if not name or not version or not description:
|
|
144
|
+
raise ConfigError("agent.yaml missing required fields: name, version, description")
|
|
145
|
+
|
|
146
|
+
# Parse compliance section
|
|
147
|
+
compliance = None
|
|
148
|
+
if "compliance" in raw:
|
|
149
|
+
compliance = _parse_compliance(raw["compliance"])
|
|
150
|
+
|
|
151
|
+
# Load tool schemas from tools/*.yaml
|
|
152
|
+
tool_schemas = _load_tool_schemas(repo_path / "tools")
|
|
153
|
+
|
|
154
|
+
# Load optional markdown files
|
|
155
|
+
soul_md = _load_markdown(repo_path / "SOUL.md")
|
|
156
|
+
rules_md = _load_markdown(repo_path / "RULES.md")
|
|
157
|
+
|
|
158
|
+
# Load hooks
|
|
159
|
+
hooks = _load_hooks(repo_path / "hooks" / "hooks.yaml")
|
|
160
|
+
|
|
161
|
+
# Discover skills
|
|
162
|
+
skill_infos = _discover_skills(repo_path / "skills")
|
|
163
|
+
|
|
164
|
+
# Parse sub-agents
|
|
165
|
+
sub_agents = _parse_sub_agents(raw.get("agents", []))
|
|
166
|
+
|
|
167
|
+
definition = AgentManifestDefinition(
|
|
168
|
+
name=name,
|
|
169
|
+
version=version,
|
|
170
|
+
description=description,
|
|
171
|
+
spec_version=raw.get("spec_version", ""),
|
|
172
|
+
author=raw.get("author", ""),
|
|
173
|
+
license=raw.get("license", ""),
|
|
174
|
+
model=raw.get("model", {}),
|
|
175
|
+
skills=raw.get("skills", []),
|
|
176
|
+
tools_list=raw.get("tools", []),
|
|
177
|
+
tool_schemas=tool_schemas,
|
|
178
|
+
runtime=raw.get("runtime", {}),
|
|
179
|
+
compliance=compliance,
|
|
180
|
+
soul_md=soul_md,
|
|
181
|
+
rules_md=rules_md,
|
|
182
|
+
hooks=hooks,
|
|
183
|
+
skill_infos=skill_infos,
|
|
184
|
+
sub_agents=sub_agents,
|
|
185
|
+
a2a=raw.get("a2a", {}),
|
|
186
|
+
dependencies=raw.get("dependencies", []),
|
|
187
|
+
delegation=raw.get("delegation", {}),
|
|
188
|
+
tags=raw.get("tags", []),
|
|
189
|
+
metadata=raw.get("metadata", {}),
|
|
190
|
+
raw=raw,
|
|
191
|
+
repo_path=repo_path,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
logger.info(
|
|
195
|
+
"agent manifest loaded",
|
|
196
|
+
agent=name,
|
|
197
|
+
version=version,
|
|
198
|
+
tools=len(tool_schemas),
|
|
199
|
+
hooks=len(hooks),
|
|
200
|
+
skills=len(skill_infos),
|
|
201
|
+
sub_agents=len(sub_agents),
|
|
202
|
+
has_compliance=compliance is not None,
|
|
203
|
+
has_soul=bool(soul_md),
|
|
204
|
+
)
|
|
205
|
+
return definition
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _parse_compliance(data: dict[str, Any]) -> ComplianceConfig:
|
|
209
|
+
"""Parse the compliance section into a structured config."""
|
|
210
|
+
return ComplianceConfig(
|
|
211
|
+
risk_tier=data.get("risk_tier", "low"),
|
|
212
|
+
frameworks=data.get("frameworks", []),
|
|
213
|
+
supervision=data.get("supervision", {}),
|
|
214
|
+
recordkeeping=data.get("recordkeeping", {}),
|
|
215
|
+
model_risk=data.get("model_risk", {}),
|
|
216
|
+
data_governance=data.get("data_governance", {}),
|
|
217
|
+
communications=data.get("communications", {}),
|
|
218
|
+
segregation_of_duties=data.get("segregation_of_duties", {}),
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _load_tool_schemas(tools_dir: Path) -> list[ToolSchema]:
|
|
223
|
+
"""Load all tool YAML definitions from the tools/ directory."""
|
|
224
|
+
if not tools_dir.is_dir():
|
|
225
|
+
return []
|
|
226
|
+
|
|
227
|
+
schemas: list[ToolSchema] = []
|
|
228
|
+
for p in sorted(tools_dir.glob("*.y*ml")):
|
|
229
|
+
if p.suffix not in (".yaml", ".yml"):
|
|
230
|
+
continue
|
|
231
|
+
try:
|
|
232
|
+
with open(p) as f:
|
|
233
|
+
raw = yaml.safe_load(f)
|
|
234
|
+
if not isinstance(raw, dict):
|
|
235
|
+
logger.warning("skipping non-mapping tool file", path=str(p))
|
|
236
|
+
continue
|
|
237
|
+
schema = ToolSchema(
|
|
238
|
+
name=raw.get("name", p.stem),
|
|
239
|
+
description=raw.get("description", ""),
|
|
240
|
+
version=raw.get("version", ""),
|
|
241
|
+
input_schema=raw.get("input_schema", {}),
|
|
242
|
+
output_schema=raw.get("output_schema", {}),
|
|
243
|
+
annotations=raw.get("annotations", {}),
|
|
244
|
+
)
|
|
245
|
+
schemas.append(schema)
|
|
246
|
+
except yaml.YAMLError:
|
|
247
|
+
logger.warning("failed to parse tool schema", path=str(p))
|
|
248
|
+
|
|
249
|
+
return schemas
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def _load_markdown(path: Path) -> str:
|
|
253
|
+
"""Load a markdown file, returning empty string if absent."""
|
|
254
|
+
if not path.is_file():
|
|
255
|
+
return ""
|
|
256
|
+
return path.read_text(encoding="utf-8").strip()
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def _load_hooks(hooks_path: Path) -> list[HookDefinition]:
|
|
260
|
+
"""Load hooks from ``hooks/hooks.yaml``."""
|
|
261
|
+
if not hooks_path.is_file():
|
|
262
|
+
return []
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
with open(hooks_path) as f:
|
|
266
|
+
raw = yaml.safe_load(f)
|
|
267
|
+
except yaml.YAMLError:
|
|
268
|
+
logger.warning("failed to parse hooks.yaml", path=str(hooks_path))
|
|
269
|
+
return []
|
|
270
|
+
|
|
271
|
+
if not isinstance(raw, dict):
|
|
272
|
+
return []
|
|
273
|
+
|
|
274
|
+
hooks: list[HookDefinition] = []
|
|
275
|
+
for entry in raw.get("hooks", []):
|
|
276
|
+
if not isinstance(entry, dict):
|
|
277
|
+
continue
|
|
278
|
+
event = entry.get("event", "")
|
|
279
|
+
script = entry.get("script", "")
|
|
280
|
+
if event and script:
|
|
281
|
+
hooks.append(
|
|
282
|
+
HookDefinition(
|
|
283
|
+
event=event,
|
|
284
|
+
script=script,
|
|
285
|
+
timeout=entry.get("timeout", 30),
|
|
286
|
+
)
|
|
287
|
+
)
|
|
288
|
+
return hooks
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def _discover_skills(skills_dir: Path) -> list[SkillInfo]:
|
|
292
|
+
"""Discover skill directories under ``skills/``."""
|
|
293
|
+
if not skills_dir.is_dir():
|
|
294
|
+
return []
|
|
295
|
+
|
|
296
|
+
infos: list[SkillInfo] = []
|
|
297
|
+
for child in sorted(skills_dir.iterdir()):
|
|
298
|
+
if child.is_dir():
|
|
299
|
+
infos.append(
|
|
300
|
+
SkillInfo(
|
|
301
|
+
name=child.name,
|
|
302
|
+
path=child,
|
|
303
|
+
has_skill_md=(child / "SKILL.md").is_file(),
|
|
304
|
+
)
|
|
305
|
+
)
|
|
306
|
+
return infos
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def _parse_sub_agents(agents_data: list[Any]) -> list[SubAgentRef]:
|
|
310
|
+
"""Parse the ``agents`` section into sub-agent references."""
|
|
311
|
+
refs: list[SubAgentRef] = []
|
|
312
|
+
if not isinstance(agents_data, list):
|
|
313
|
+
return refs
|
|
314
|
+
|
|
315
|
+
for entry in agents_data:
|
|
316
|
+
if isinstance(entry, dict) and "name" in entry:
|
|
317
|
+
refs.append(
|
|
318
|
+
SubAgentRef(
|
|
319
|
+
name=entry["name"],
|
|
320
|
+
role=entry.get("role", ""),
|
|
321
|
+
config={k: v for k, v in entry.items() if k not in ("name", "role")},
|
|
322
|
+
)
|
|
323
|
+
)
|
|
324
|
+
elif isinstance(entry, str):
|
|
325
|
+
refs.append(SubAgentRef(name=entry))
|
|
326
|
+
return refs
|