skilllite 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- skilllite/__init__.py +159 -0
- skilllite/analyzer.py +391 -0
- skilllite/builtin_tools.py +240 -0
- skilllite/cli.py +217 -0
- skilllite/core/__init__.py +65 -0
- skilllite/core/executor.py +182 -0
- skilllite/core/handler.py +332 -0
- skilllite/core/loops.py +770 -0
- skilllite/core/manager.py +507 -0
- skilllite/core/metadata.py +338 -0
- skilllite/core/prompt_builder.py +321 -0
- skilllite/core/registry.py +185 -0
- skilllite/core/skill_info.py +181 -0
- skilllite/core/tool_builder.py +338 -0
- skilllite/core/tools.py +253 -0
- skilllite/mcp/__init__.py +45 -0
- skilllite/mcp/server.py +734 -0
- skilllite/quick.py +420 -0
- skilllite/sandbox/__init__.py +36 -0
- skilllite/sandbox/base.py +93 -0
- skilllite/sandbox/config.py +229 -0
- skilllite/sandbox/skillbox/__init__.py +44 -0
- skilllite/sandbox/skillbox/binary.py +421 -0
- skilllite/sandbox/skillbox/executor.py +608 -0
- skilllite/sandbox/utils.py +77 -0
- skilllite/validation.py +137 -0
- skilllite-0.1.0.dist-info/METADATA +293 -0
- skilllite-0.1.0.dist-info/RECORD +32 -0
- skilllite-0.1.0.dist-info/WHEEL +5 -0
- skilllite-0.1.0.dist-info/entry_points.txt +3 -0
- skilllite-0.1.0.dist-info/licenses/LICENSE +21 -0
- skilllite-0.1.0.dist-info/top_level.txt +1 -0
skilllite/core/tools.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool definitions and protocol adapters for Claude/OpenAI integration.
|
|
3
|
+
|
|
4
|
+
This is a CORE module - do not modify without explicit permission.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
|
+
import json
|
|
11
|
+
|
|
12
|
+
class ToolFormat(Enum):
|
|
13
|
+
"""Supported LLM tool formats."""
|
|
14
|
+
CLAUDE = "claude"
|
|
15
|
+
OPENAI = "openai"
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ToolDefinition:
|
|
19
|
+
"""Tool definition that can be sent to LLM APIs."""
|
|
20
|
+
name: str
|
|
21
|
+
description: str
|
|
22
|
+
input_schema: Dict[str, Any]
|
|
23
|
+
|
|
24
|
+
def to_claude_format(self) -> Dict[str, Any]:
|
|
25
|
+
"""Convert to Claude API format."""
|
|
26
|
+
return {
|
|
27
|
+
"name": self.name,
|
|
28
|
+
"description": self.description,
|
|
29
|
+
"input_schema": self.input_schema
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
def to_openai_format(self) -> Dict[str, Any]:
|
|
33
|
+
"""Convert to OpenAI API format (function calling)."""
|
|
34
|
+
return {
|
|
35
|
+
"type": "function",
|
|
36
|
+
"function": {
|
|
37
|
+
"name": self.name,
|
|
38
|
+
"description": self.description,
|
|
39
|
+
"parameters": self.input_schema
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
def to_format(self, format: ToolFormat) -> Dict[str, Any]:
|
|
44
|
+
"""Convert to specified format."""
|
|
45
|
+
if format == ToolFormat.CLAUDE:
|
|
46
|
+
return self.to_claude_format()
|
|
47
|
+
elif format == ToolFormat.OPENAI:
|
|
48
|
+
return self.to_openai_format()
|
|
49
|
+
else:
|
|
50
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class ToolUseRequest:
|
|
54
|
+
"""Parsed tool use request from LLM response."""
|
|
55
|
+
id: str
|
|
56
|
+
name: str
|
|
57
|
+
input: Dict[str, Any]
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def from_claude_response(cls, content: Dict[str, Any]) -> Optional["ToolUseRequest"]:
|
|
61
|
+
"""Parse from Claude API tool_use content block."""
|
|
62
|
+
if content.get("type") != "tool_use":
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
return cls(
|
|
66
|
+
id=content.get("id", ""),
|
|
67
|
+
name=content.get("name", ""),
|
|
68
|
+
input=content.get("input", {})
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
def from_openai_response(cls, tool_call: Dict[str, Any]) -> Optional["ToolUseRequest"]:
|
|
73
|
+
"""Parse from OpenAI API tool_calls."""
|
|
74
|
+
function = tool_call.get("function", {})
|
|
75
|
+
arguments_str = function.get("arguments", "{}")
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
arguments = json.loads(arguments_str)
|
|
79
|
+
except json.JSONDecodeError:
|
|
80
|
+
arguments = {}
|
|
81
|
+
|
|
82
|
+
return cls(
|
|
83
|
+
id=tool_call.get("id", ""),
|
|
84
|
+
name=function.get("name", ""),
|
|
85
|
+
input=arguments
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
@classmethod
|
|
89
|
+
def parse_from_response(cls, response: Any, format: ToolFormat) -> List["ToolUseRequest"]:
|
|
90
|
+
"""
|
|
91
|
+
Parse tool use requests from an LLM response.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
response: The raw response from the LLM API
|
|
95
|
+
format: The format of the response (Claude or OpenAI)
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
List of ToolUseRequest objects
|
|
99
|
+
"""
|
|
100
|
+
if format == ToolFormat.CLAUDE:
|
|
101
|
+
return cls.parse_from_claude_response(response)
|
|
102
|
+
elif format == ToolFormat.OPENAI:
|
|
103
|
+
return cls.parse_from_openai_response(response)
|
|
104
|
+
else:
|
|
105
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
106
|
+
|
|
107
|
+
@classmethod
|
|
108
|
+
def parse_from_openai_response(cls, response: Any) -> List["ToolUseRequest"]:
|
|
109
|
+
"""
|
|
110
|
+
Parse tool use requests from an OpenAI-compatible LLM response.
|
|
111
|
+
|
|
112
|
+
Works with any OpenAI-compatible provider:
|
|
113
|
+
- OpenAI (GPT-4, GPT-3.5, etc.)
|
|
114
|
+
- Azure OpenAI
|
|
115
|
+
- Ollama
|
|
116
|
+
- vLLM
|
|
117
|
+
- LMStudio
|
|
118
|
+
- DeepSeek
|
|
119
|
+
- Qwen
|
|
120
|
+
- Moonshot
|
|
121
|
+
- etc.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
response: The response from any OpenAI-compatible API
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of ToolUseRequest objects
|
|
128
|
+
"""
|
|
129
|
+
requests = []
|
|
130
|
+
|
|
131
|
+
# Handle both object and dict responses
|
|
132
|
+
if hasattr(response, 'choices'):
|
|
133
|
+
message = response.choices[0].message
|
|
134
|
+
elif isinstance(response, dict) and 'choices' in response:
|
|
135
|
+
message = response['choices'][0].get('message', {})
|
|
136
|
+
else:
|
|
137
|
+
message = response
|
|
138
|
+
|
|
139
|
+
# Get tool_calls from message
|
|
140
|
+
if hasattr(message, 'tool_calls'):
|
|
141
|
+
tool_calls = message.tool_calls or []
|
|
142
|
+
elif isinstance(message, dict):
|
|
143
|
+
tool_calls = message.get('tool_calls', []) or []
|
|
144
|
+
else:
|
|
145
|
+
tool_calls = []
|
|
146
|
+
|
|
147
|
+
for tool_call in tool_calls:
|
|
148
|
+
if isinstance(tool_call, dict):
|
|
149
|
+
req = cls.from_openai_response(tool_call)
|
|
150
|
+
else:
|
|
151
|
+
# Handle object-style tool_call
|
|
152
|
+
try:
|
|
153
|
+
arguments = tool_call.function.arguments or "{}"
|
|
154
|
+
if isinstance(arguments, str):
|
|
155
|
+
arguments = json.loads(arguments)
|
|
156
|
+
except (json.JSONDecodeError, AttributeError):
|
|
157
|
+
arguments = {}
|
|
158
|
+
|
|
159
|
+
req = cls(
|
|
160
|
+
id=getattr(tool_call, 'id', ''),
|
|
161
|
+
name=getattr(tool_call.function, 'name', ''),
|
|
162
|
+
input=arguments
|
|
163
|
+
)
|
|
164
|
+
if req:
|
|
165
|
+
requests.append(req)
|
|
166
|
+
|
|
167
|
+
return requests
|
|
168
|
+
|
|
169
|
+
@classmethod
|
|
170
|
+
def parse_from_claude_response(cls, response: Any) -> List["ToolUseRequest"]:
|
|
171
|
+
"""
|
|
172
|
+
Parse tool use requests from Claude's native API response.
|
|
173
|
+
|
|
174
|
+
Use this only if you're using the Anthropic SDK directly
|
|
175
|
+
(not via OpenAI-compatible endpoint).
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
response: The response from Claude's native API
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
List of ToolUseRequest objects
|
|
182
|
+
"""
|
|
183
|
+
requests = []
|
|
184
|
+
|
|
185
|
+
# Get content from response
|
|
186
|
+
if hasattr(response, 'content'):
|
|
187
|
+
content = response.content
|
|
188
|
+
elif isinstance(response, dict):
|
|
189
|
+
content = response.get('content', [])
|
|
190
|
+
else:
|
|
191
|
+
content = []
|
|
192
|
+
|
|
193
|
+
for block in content:
|
|
194
|
+
if isinstance(block, dict):
|
|
195
|
+
req = cls.from_claude_response(block)
|
|
196
|
+
elif hasattr(block, 'type') and block.type == 'tool_use':
|
|
197
|
+
req = cls(
|
|
198
|
+
id=getattr(block, 'id', ''),
|
|
199
|
+
name=getattr(block, 'name', ''),
|
|
200
|
+
input=block.input if isinstance(block.input, dict) else {}
|
|
201
|
+
)
|
|
202
|
+
else:
|
|
203
|
+
continue
|
|
204
|
+
|
|
205
|
+
if req:
|
|
206
|
+
requests.append(req)
|
|
207
|
+
|
|
208
|
+
return requests
|
|
209
|
+
|
|
210
|
+
@dataclass
|
|
211
|
+
class ToolResult:
|
|
212
|
+
"""Result of a tool execution to send back to the LLM."""
|
|
213
|
+
tool_use_id: str
|
|
214
|
+
content: str
|
|
215
|
+
is_error: bool = False
|
|
216
|
+
|
|
217
|
+
@classmethod
|
|
218
|
+
def success(cls, tool_use_id: str, content: Any) -> "ToolResult":
|
|
219
|
+
"""Create a successful result."""
|
|
220
|
+
if not isinstance(content, str):
|
|
221
|
+
content = json.dumps(content)
|
|
222
|
+
return cls(tool_use_id=tool_use_id, content=content, is_error=False)
|
|
223
|
+
|
|
224
|
+
@classmethod
|
|
225
|
+
def error(cls, tool_use_id: str, error: str) -> "ToolResult":
|
|
226
|
+
"""Create an error result."""
|
|
227
|
+
return cls(tool_use_id=tool_use_id, content=error, is_error=True)
|
|
228
|
+
|
|
229
|
+
def to_claude_format(self) -> Dict[str, Any]:
|
|
230
|
+
"""Convert to Claude API format."""
|
|
231
|
+
return {
|
|
232
|
+
"type": "tool_result",
|
|
233
|
+
"tool_use_id": self.tool_use_id,
|
|
234
|
+
"content": self.content,
|
|
235
|
+
"is_error": self.is_error
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
def to_openai_format(self) -> Dict[str, Any]:
|
|
239
|
+
"""Convert to OpenAI API format."""
|
|
240
|
+
return {
|
|
241
|
+
"role": "tool",
|
|
242
|
+
"tool_call_id": self.tool_use_id,
|
|
243
|
+
"content": self.content
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
def to_format(self, format: ToolFormat) -> Dict[str, Any]:
|
|
247
|
+
"""Convert to specified format."""
|
|
248
|
+
if format == ToolFormat.CLAUDE:
|
|
249
|
+
return self.to_claude_format()
|
|
250
|
+
elif format == ToolFormat.OPENAI:
|
|
251
|
+
return self.to_openai_format()
|
|
252
|
+
else:
|
|
253
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP (Model Context Protocol) Integration for SkillLite.
|
|
3
|
+
|
|
4
|
+
This module provides MCP server functionality for SkillLite, allowing
|
|
5
|
+
you to use SkillLite as an MCP tool server.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
```python
|
|
9
|
+
from skilllite.mcp import MCPServer
|
|
10
|
+
|
|
11
|
+
# Start MCP server
|
|
12
|
+
server = MCPServer()
|
|
13
|
+
await server.run()
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
Or via CLI:
|
|
17
|
+
```bash
|
|
18
|
+
skilllite mcp server
|
|
19
|
+
```
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# Check MCP availability
|
|
23
|
+
try:
|
|
24
|
+
from mcp.server import Server
|
|
25
|
+
MCP_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
MCP_AVAILABLE = False
|
|
28
|
+
|
|
29
|
+
# Lazy imports to avoid module loading issues when running with -m
|
|
30
|
+
def __getattr__(name):
|
|
31
|
+
if name == "MCPServer":
|
|
32
|
+
from .server import MCPServer
|
|
33
|
+
return MCPServer
|
|
34
|
+
elif name == "SandboxExecutor":
|
|
35
|
+
from .server import SandboxExecutor
|
|
36
|
+
return SandboxExecutor
|
|
37
|
+
elif name == "MCP_AVAILABLE":
|
|
38
|
+
return MCP_AVAILABLE
|
|
39
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
40
|
+
|
|
41
|
+
__all__ = [
|
|
42
|
+
"MCPServer",
|
|
43
|
+
"SandboxExecutor",
|
|
44
|
+
"MCP_AVAILABLE",
|
|
45
|
+
]
|