skilllite 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,332 @@
1
+ """
2
+ Handler - LLM response handling and tool call execution.
3
+
4
+ This module handles:
5
+ - Parsing tool calls from LLM responses
6
+ - Executing tool calls
7
+ - Formatting tool results
8
+ """
9
+
10
+ import json
11
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING
12
+
13
+ from .executor import ExecutionResult, SkillExecutor
14
+ from .tools import ToolResult, ToolUseRequest
15
+
16
+ if TYPE_CHECKING:
17
+ from .registry import SkillRegistry
18
+
19
+
20
+ class ToolCallHandler:
21
+ """
22
+ Handler for LLM tool calls.
23
+
24
+ Parses tool calls from LLM responses and executes them
25
+ using the skill executor.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ registry: "SkillRegistry",
31
+ executor: SkillExecutor
32
+ ):
33
+ """
34
+ Initialize the handler.
35
+
36
+ Args:
37
+ registry: Skill registry for accessing skill info
38
+ executor: Skill executor for running skills
39
+ """
40
+ self._registry = registry
41
+ self._executor = executor
42
+
43
+ # ==================== Skill Execution ====================
44
+
45
+ def execute(
46
+ self,
47
+ skill_name: str,
48
+ input_data: Dict[str, Any],
49
+ allow_network: Optional[bool] = None,
50
+ timeout: Optional[int] = None
51
+ ) -> ExecutionResult:
52
+ """
53
+ Execute a skill or multi-script tool with the given input.
54
+
55
+ Args:
56
+ skill_name: Name of the skill or multi-script tool
57
+ (e.g., "calculator" or "skill-creator__init-skill")
58
+ input_data: Input data for the skill
59
+ allow_network: Whether to allow network access
60
+ timeout: Execution timeout in seconds
61
+
62
+ Returns:
63
+ ExecutionResult with output or error
64
+ """
65
+ # Check if it's a multi-script tool
66
+ tool_info = self._registry.get_multi_script_tool_info(skill_name)
67
+ if tool_info:
68
+ parent_skill = self._registry.get_skill(tool_info["skill_name"])
69
+ if not parent_skill:
70
+ return ExecutionResult(
71
+ success=False,
72
+ error=f"Parent skill not found: {tool_info['skill_name']}"
73
+ )
74
+
75
+ # Execute with the specific script as entry point
76
+ return self._executor.execute(
77
+ skill_dir=parent_skill.path,
78
+ input_data=input_data,
79
+ allow_network=allow_network,
80
+ timeout=timeout,
81
+ entry_point=tool_info["script_path"]
82
+ )
83
+
84
+ # Regular skill execution
85
+ info = self._registry.get_skill(skill_name)
86
+ if not info:
87
+ return ExecutionResult(
88
+ success=False,
89
+ error=f"Skill not found: {skill_name}"
90
+ )
91
+
92
+ return self._executor.execute(
93
+ skill_dir=info.path,
94
+ input_data=input_data,
95
+ allow_network=allow_network,
96
+ timeout=timeout
97
+ )
98
+
99
+ def execute_tool_call(
100
+ self,
101
+ request: ToolUseRequest,
102
+ allow_network: Optional[bool] = None,
103
+ timeout: Optional[int] = None
104
+ ) -> ToolResult:
105
+ """
106
+ Execute a tool call request from an LLM.
107
+
108
+ Args:
109
+ request: Tool use request from LLM
110
+ allow_network: Whether to allow network access
111
+ timeout: Execution timeout in seconds
112
+
113
+ Returns:
114
+ ToolResult with success or error
115
+ """
116
+ result = self.execute(
117
+ skill_name=request.name,
118
+ input_data=request.input,
119
+ allow_network=allow_network,
120
+ timeout=timeout
121
+ )
122
+
123
+ if result.success:
124
+ return ToolResult.success(
125
+ tool_use_id=request.id,
126
+ content=result.output
127
+ )
128
+ else:
129
+ return ToolResult.error(
130
+ tool_use_id=request.id,
131
+ error=result.error or "Unknown error"
132
+ )
133
+
134
+ # ==================== LLM Response Parsing ====================
135
+
136
+ def parse_tool_calls(self, response: Any) -> List[ToolUseRequest]:
137
+ """
138
+ Parse tool calls from an OpenAI-compatible LLM response.
139
+
140
+ Args:
141
+ response: Response from OpenAI-compatible API
142
+
143
+ Returns:
144
+ List of ToolUseRequest objects
145
+ """
146
+ return ToolUseRequest.parse_from_openai_response(response)
147
+
148
+ def parse_tool_calls_claude_native(self, response: Any) -> List[ToolUseRequest]:
149
+ """
150
+ Parse tool calls from Claude's native API response.
151
+
152
+ Args:
153
+ response: Response from Claude's native API
154
+
155
+ Returns:
156
+ List of ToolUseRequest objects
157
+ """
158
+ return ToolUseRequest.parse_from_claude_response(response)
159
+
160
+ # ==================== Batch Handling ====================
161
+
162
+ def handle_tool_calls(
163
+ self,
164
+ response: Any,
165
+ allow_network: Optional[bool] = None,
166
+ timeout: Optional[int] = None
167
+ ) -> List[ToolResult]:
168
+ """
169
+ Parse and execute all tool calls from an OpenAI-compatible LLM response.
170
+
171
+ Args:
172
+ response: Response from OpenAI-compatible API
173
+ allow_network: Whether to allow network access
174
+ timeout: Execution timeout in seconds
175
+
176
+ Returns:
177
+ List of ToolResult objects
178
+ """
179
+ requests = self.parse_tool_calls(response)
180
+ results = []
181
+ for request in requests:
182
+ result = self.execute_tool_call(
183
+ request,
184
+ allow_network=allow_network,
185
+ timeout=timeout
186
+ )
187
+ results.append(result)
188
+ return results
189
+
190
+ def handle_tool_calls_claude_native(
191
+ self,
192
+ response: Any,
193
+ allow_network: Optional[bool] = None,
194
+ timeout: Optional[int] = None
195
+ ) -> List[ToolResult]:
196
+ """
197
+ Parse and execute all tool calls from Claude's native API response.
198
+
199
+ Args:
200
+ response: Response from Claude's native API
201
+ allow_network: Whether to allow network access
202
+ timeout: Execution timeout in seconds
203
+
204
+ Returns:
205
+ List of ToolResult objects
206
+ """
207
+ requests = self.parse_tool_calls_claude_native(response)
208
+ results = []
209
+ for request in requests:
210
+ result = self.execute_tool_call(
211
+ request,
212
+ allow_network=allow_network,
213
+ timeout=timeout
214
+ )
215
+ results.append(result)
216
+ return results
217
+
218
+ def format_tool_results_claude_native(
219
+ self,
220
+ results: List[ToolResult]
221
+ ) -> List[Dict[str, Any]]:
222
+ """
223
+ Format tool results for Claude's native API.
224
+
225
+ Args:
226
+ results: List of ToolResult objects
227
+
228
+ Returns:
229
+ List of formatted tool result dicts
230
+ """
231
+ return [r.to_claude_format() for r in results]
232
+
233
+ # ==================== Enhanced Workflow ====================
234
+
235
+ def create_enhanced_skill_workflow(
236
+ self,
237
+ skill_name: str,
238
+ user_request: str,
239
+ llm_client: Any,
240
+ llm_model: str = "gpt-4"
241
+ ) -> Dict[str, Any]:
242
+ """
243
+ Create an enhanced workflow for a skill that involves planning and execution.
244
+
245
+ Implements a two-stage process:
246
+ 1. Read skill information and create an execution plan
247
+ 2. Execute the plan step by step
248
+
249
+ Args:
250
+ skill_name: Name of the skill to use
251
+ user_request: User's request or requirements
252
+ llm_client: OpenAI-compatible client for LLM interaction
253
+ llm_model: Model name to use for planning
254
+
255
+ Returns:
256
+ Dictionary with execution results and status
257
+ """
258
+ skill_info = self._registry.get_skill(skill_name)
259
+ if not skill_info:
260
+ return {"success": False, "error": f"Skill '{skill_name}' not found"}
261
+
262
+ skill_context = skill_info.get_context(
263
+ include_references=True,
264
+ include_assets=True
265
+ )
266
+ if not skill_context:
267
+ return {
268
+ "success": False,
269
+ "error": f"Failed to get context for skill '{skill_name}'"
270
+ }
271
+
272
+ plan_prompt = f"""
273
+ You are tasked with creating an execution plan for the following skill:
274
+
275
+ Skill Name: {skill_name}
276
+ Skill Description: {skill_info.description or 'No description available'}
277
+
278
+ Skill Content:
279
+ {skill_context.get('full_instructions', 'No instructions available')}
280
+
281
+ User Request: {user_request}
282
+
283
+ Please create a detailed execution plan that includes:
284
+ 1. Analysis of what needs to be done based on the user request
285
+ 2. Steps to accomplish the task using this skill
286
+ 3. Expected outcomes
287
+ 4. Any potential challenges or considerations
288
+
289
+ Respond in JSON format with the following structure:
290
+ {{
291
+ "analysis": "Brief analysis of the request and skill compatibility",
292
+ "steps": [
293
+ {{
294
+ "step_number": 1,
295
+ "description": "What to do in this step",
296
+ "expected_input": "What input is needed",
297
+ "expected_output": "What output is expected"
298
+ }}
299
+ ],
300
+ "considerations": ["Any important points to consider"]
301
+ }}
302
+ """
303
+
304
+ try:
305
+ plan_response = llm_client.chat.completions.create(
306
+ model=llm_model,
307
+ messages=[{"role": "user", "content": plan_prompt}],
308
+ response_format={"type": "json_object"}
309
+ )
310
+
311
+ plan = json.loads(plan_response.choices[0].message.content)
312
+
313
+ execution_results = []
314
+ for step in plan.get("steps", []):
315
+ execution_results.append({
316
+ "step": step["step_number"],
317
+ "status": "planned",
318
+ "description": step["description"]
319
+ })
320
+
321
+ return {
322
+ "success": True,
323
+ "skill_name": skill_name,
324
+ "plan": plan,
325
+ "execution_results": execution_results
326
+ }
327
+
328
+ except Exception as e:
329
+ return {
330
+ "success": False,
331
+ "error": f"Failed to create execution plan: {str(e)}"
332
+ }