skilllite 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,507 @@
1
+ """
2
+ SkillManager - Main interface for managing and executing skills.
3
+
4
+ This module uses OpenAI-compatible API format as the unified interface,
5
+ which is supported by most LLM providers including:
6
+ - OpenAI (GPT-4, GPT-3.5, etc.)
7
+ - Anthropic Claude (via OpenAI-compatible endpoint or native)
8
+ - Azure OpenAI
9
+ - Google Gemini (via OpenAI-compatible endpoint)
10
+ - Local models (Ollama, vLLM, LMStudio, etc.)
11
+ - DeepSeek, Qwen, Moonshot, Zhipu, and other providers
12
+
13
+ Usage with different providers:
14
+
15
+ # OpenAI
16
+ from openai import OpenAI
17
+ client = OpenAI()
18
+
19
+ # Azure OpenAI
20
+ from openai import AzureOpenAI
21
+ client = AzureOpenAI(azure_endpoint="...", api_key="...")
22
+
23
+ # Ollama (local)
24
+ from openai import OpenAI
25
+ client = OpenAI(base_url="http://localhost:11434/v1", api_key="ollama")
26
+
27
+ # DeepSeek
28
+ from openai import OpenAI
29
+ client = OpenAI(base_url="https://api.deepseek.com/v1", api_key="...")
30
+
31
+ # Qwen (Alibaba Cloud)
32
+ from openai import OpenAI
33
+ client = OpenAI(base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", api_key="...")
34
+
35
+ # Moonshot (Kimi)
36
+ from openai import OpenAI
37
+ client = OpenAI(base_url="https://api.moonshot.cn/v1", api_key="...")
38
+ """
39
+
40
+ from pathlib import Path
41
+ from typing import Any, Callable, Dict, List, Optional, Union
42
+
43
+ from .executor import ExecutionResult, SkillExecutor
44
+ from .loops import AgenticLoop, AgenticLoopClaudeNative, ApiFormat
45
+ from .registry import SkillRegistry
46
+ from .tool_builder import ToolBuilder
47
+ from .prompt_builder import PromptBuilder
48
+ from .handler import ToolCallHandler
49
+ from .skill_info import SkillInfo
50
+ from .tools import ToolDefinition, ToolResult, ToolUseRequest
51
+
52
+
53
+ class SkillManager:
54
+ """
55
+ Main interface for managing and executing skills.
56
+
57
+ This is a facade class that composes:
58
+ - SkillRegistry: Skill registration and discovery
59
+ - ToolBuilder: Tool definition generation and schema inference
60
+ - PromptBuilder: System prompt context generation
61
+ - ToolCallHandler: LLM response handling and tool execution
62
+
63
+ Example:
64
+ ```python
65
+ from openai import OpenAI
66
+ from skilllite import SkillManager
67
+
68
+ # Works with any OpenAI-compatible client
69
+ client = OpenAI() # or OpenAI(base_url="...", api_key="...")
70
+ manager = SkillManager(skills_dir="./my_skills")
71
+
72
+ # Get tools in OpenAI format (universal)
73
+ tools = manager.get_tools()
74
+
75
+ # Call any OpenAI-compatible API
76
+ response = client.chat.completions.create(
77
+ model="gpt-4",
78
+ tools=tools,
79
+ messages=[{"role": "user", "content": "..."}]
80
+ )
81
+
82
+ # Handle tool calls
83
+ if response.choices[0].message.tool_calls:
84
+ results = manager.handle_tool_calls(response)
85
+ ```
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ skills_dir: Optional[Union[str, Path]] = None,
91
+ binary_path: Optional[str] = None,
92
+ cache_dir: Optional[str] = None,
93
+ allow_network: bool = False,
94
+ enable_sandbox: bool = True,
95
+ execution_timeout: Optional[int] = None,
96
+ max_memory_mb: Optional[int] = None,
97
+ sandbox_level: Optional[str] = None
98
+ ):
99
+ """
100
+ Initialize the SkillManager.
101
+
102
+ Args:
103
+ skills_dir: Directory containing skills. If None, no skills are loaded initially.
104
+ binary_path: Path to the skillbox binary. If None, searches PATH.
105
+ cache_dir: Directory for caching virtual environments.
106
+ allow_network: Whether to allow network access by default.
107
+ enable_sandbox: Whether to enable sandbox protection (default: True).
108
+ execution_timeout: Skill execution timeout in seconds (default: 120).
109
+ max_memory_mb: Maximum memory limit in MB (default: 512).
110
+ sandbox_level: Sandbox security level (1/2/3, default from env or 3).
111
+ """
112
+ # Initialize executor
113
+ self._executor = SkillExecutor(
114
+ binary_path=binary_path,
115
+ cache_dir=cache_dir,
116
+ allow_network=allow_network,
117
+ enable_sandbox=enable_sandbox,
118
+ execution_timeout=execution_timeout,
119
+ max_memory_mb=max_memory_mb,
120
+ sandbox_level=sandbox_level
121
+ )
122
+
123
+ # Initialize registry
124
+ self._registry = SkillRegistry()
125
+
126
+ # Initialize builders and handler
127
+ self._tool_builder = ToolBuilder(self._registry)
128
+ self._prompt_builder = PromptBuilder(self._registry)
129
+ self._handler = ToolCallHandler(self._registry, self._executor)
130
+
131
+ # Scan skills directory if provided
132
+ if skills_dir:
133
+ self.scan_directory(Path(skills_dir))
134
+
135
+ # ==================== Skill Registration (delegated to registry) ====================
136
+
137
+ def scan_directory(self, directory: Path) -> int:
138
+ """Scan a directory for skills."""
139
+ return self._registry.scan_directory(directory)
140
+
141
+ def register_skill(self, skill_dir: Path) -> SkillInfo:
142
+ """Register a single skill from a directory."""
143
+ return self._registry.register_skill(skill_dir)
144
+
145
+ def get_skill(self, name: str) -> Optional[SkillInfo]:
146
+ """Get a skill by name."""
147
+ return self._registry.get_skill(name)
148
+
149
+ def list_skills(self) -> List[SkillInfo]:
150
+ """Get all registered skills."""
151
+ return self._registry.list_skills()
152
+
153
+ def skill_names(self) -> List[str]:
154
+ """Get names of all registered skills."""
155
+ return self._registry.skill_names()
156
+
157
+ def has_skill(self, name: str) -> bool:
158
+ """Check if a skill exists."""
159
+ return self._registry.has_skill(name)
160
+
161
+ def is_executable(self, name: str) -> bool:
162
+ """Check if a skill or tool is executable."""
163
+ return self._registry.is_executable(name)
164
+
165
+ def list_executable_skills(self) -> List[SkillInfo]:
166
+ """Get all executable skills."""
167
+ return self._registry.list_executable_skills()
168
+
169
+ def list_prompt_only_skills(self) -> List[SkillInfo]:
170
+ """Get all prompt-only skills."""
171
+ return self._registry.list_prompt_only_skills()
172
+
173
+ def list_multi_script_tools(self) -> List[str]:
174
+ """Get all multi-script tool names."""
175
+ return self._registry.list_multi_script_tools()
176
+
177
+ # ==================== Tool Definition (delegated to tool_builder) ====================
178
+
179
+ def get_tool_definitions(self, include_prompt_only: bool = False) -> List[ToolDefinition]:
180
+ """Get tool definitions for registered skills."""
181
+ return self._tool_builder.get_tool_definitions(include_prompt_only)
182
+
183
+ def get_tools(self) -> List[Dict[str, Any]]:
184
+ """Get tool definitions in OpenAI-compatible format."""
185
+ return self._tool_builder.get_tools_openai()
186
+
187
+ def get_tools_for_claude_native(self) -> List[Dict[str, Any]]:
188
+ """Get tool definitions in Claude's native API format."""
189
+ return self._tool_builder.get_tools_claude_native()
190
+
191
+ def infer_all_schemas(self, force: bool = False) -> Dict[str, Dict[str, Any]]:
192
+ """Infer schemas for all skills that don't have one defined."""
193
+ return self._tool_builder.infer_all_schemas(force)
194
+
195
+ # ==================== System Prompt (delegated to prompt_builder) ====================
196
+
197
+ def get_system_prompt_context(
198
+ self,
199
+ include_full_instructions: bool = True,
200
+ include_references: bool = False,
201
+ include_assets: bool = False,
202
+ skills: Optional[List[str]] = None,
203
+ mode: str = "full",
204
+ max_tokens_per_skill: Optional[int] = None
205
+ ) -> str:
206
+ """Generate system prompt context containing skill information."""
207
+ return self._prompt_builder.get_system_prompt_context(
208
+ include_full_instructions=include_full_instructions,
209
+ include_references=include_references,
210
+ include_assets=include_assets,
211
+ skills=skills,
212
+ mode=mode,
213
+ max_tokens_per_skill=max_tokens_per_skill
214
+ )
215
+
216
+ def get_skill_details(self, skill_name: str) -> Optional[str]:
217
+ """Get full details for a specific skill."""
218
+ return self._prompt_builder.get_skill_details(skill_name)
219
+
220
+ def get_skills_summary(self) -> str:
221
+ """Get a compact summary of all available skills."""
222
+ return self._prompt_builder.get_skills_summary()
223
+
224
+ def estimate_context_tokens(
225
+ self,
226
+ mode: str = "full",
227
+ include_references: bool = False,
228
+ include_assets: bool = False
229
+ ) -> int:
230
+ """Estimate the number of tokens the system prompt context will use."""
231
+ return self._prompt_builder.estimate_context_tokens(mode, include_references, include_assets)
232
+
233
+ def get_skill_context(self, skill_name: str) -> Optional[Dict[str, Any]]:
234
+ """Get complete context for a specific skill."""
235
+ return self._prompt_builder.get_skill_context(skill_name)
236
+
237
+ def get_all_skill_contexts(self) -> Dict[str, Dict[str, Any]]:
238
+ """Get complete context for all skills."""
239
+ return self._prompt_builder.get_all_skill_contexts()
240
+
241
+ # ==================== Skills Status (delegated to prompt_builder) ====================
242
+
243
+ def get_skills_status(self) -> Dict[str, Any]:
244
+ """Get structured status information about all loaded skills."""
245
+ return self._prompt_builder.get_skills_status()
246
+
247
+ def print_skills_status(self, verbose: bool = False) -> None:
248
+ """Print a formatted status of all loaded skills."""
249
+ self._prompt_builder.print_skills_status(verbose)
250
+
251
+ def get_prompt_only_status(self) -> List[Dict[str, str]]:
252
+ """Get status info for prompt-only skills."""
253
+ return self._prompt_builder.get_prompt_only_status()
254
+
255
+ def print_prompt_only_status(self) -> None:
256
+ """Print status of prompt-only skills."""
257
+ self._prompt_builder.print_prompt_only_status()
258
+
259
+ # ==================== Skill Execution (delegated to handler) ====================
260
+
261
+ def execute(
262
+ self,
263
+ skill_name: str,
264
+ input_data: Dict[str, Any],
265
+ allow_network: Optional[bool] = None,
266
+ timeout: Optional[int] = None
267
+ ) -> ExecutionResult:
268
+ """Execute a skill or multi-script tool with the given input."""
269
+ return self._handler.execute(skill_name, input_data, allow_network, timeout)
270
+
271
+ def execute_tool_call(
272
+ self,
273
+ request: ToolUseRequest,
274
+ allow_network: Optional[bool] = None,
275
+ timeout: Optional[int] = None
276
+ ) -> ToolResult:
277
+ """Execute a tool call request from an LLM."""
278
+ return self._handler.execute_tool_call(request, allow_network, timeout)
279
+
280
+ # ==================== LLM Response Handling (delegated to handler) ====================
281
+
282
+ def parse_tool_calls(self, response: Any) -> List[ToolUseRequest]:
283
+ """Parse tool calls from an OpenAI-compatible LLM response."""
284
+ return self._handler.parse_tool_calls(response)
285
+
286
+ def parse_tool_calls_claude_native(self, response: Any) -> List[ToolUseRequest]:
287
+ """Parse tool calls from Claude's native API response."""
288
+ return self._handler.parse_tool_calls_claude_native(response)
289
+
290
+ def handle_tool_calls(
291
+ self,
292
+ response: Any,
293
+ allow_network: Optional[bool] = None,
294
+ timeout: Optional[int] = None
295
+ ) -> List[ToolResult]:
296
+ """Parse and execute all tool calls from an OpenAI-compatible LLM response."""
297
+ return self._handler.handle_tool_calls(response, allow_network, timeout)
298
+
299
+ def handle_tool_calls_claude_native(
300
+ self,
301
+ response: Any,
302
+ allow_network: Optional[bool] = None,
303
+ timeout: Optional[int] = None
304
+ ) -> List[ToolResult]:
305
+ """Parse and execute all tool calls from Claude's native API response."""
306
+ return self._handler.handle_tool_calls_claude_native(response, allow_network, timeout)
307
+
308
+ def format_tool_results_claude_native(self, results: List[ToolResult]) -> List[Dict[str, Any]]:
309
+ """Format tool results for Claude's native API."""
310
+ return self._handler.format_tool_results_claude_native(results)
311
+
312
+ # ==================== Enhanced Workflow (delegated to handler) ====================
313
+
314
+ def create_enhanced_skill_workflow(
315
+ self,
316
+ skill_name: str,
317
+ user_request: str,
318
+ llm_client: Any,
319
+ llm_model: str = "gpt-4"
320
+ ) -> Dict[str, Any]:
321
+ """Create an enhanced workflow for a skill that involves planning and execution."""
322
+ return self._handler.create_enhanced_skill_workflow(
323
+ skill_name, user_request, llm_client, llm_model
324
+ )
325
+
326
+ # ==================== Agentic Loop Creation ====================
327
+
328
+ def create_agentic_loop(
329
+ self,
330
+ client: Any,
331
+ model: str,
332
+ system_prompt: Optional[str] = None,
333
+ max_iterations: int = 10,
334
+ api_format: str = "openai",
335
+ custom_tool_handler: Optional[Callable] = None,
336
+ enable_task_planning: bool = True,
337
+ verbose: bool = True,
338
+ **kwargs
339
+ ) -> AgenticLoop:
340
+ """
341
+ Create a unified agentic loop for LLM-tool interactions.
342
+
343
+ Supports both OpenAI-compatible APIs and Claude's native API.
344
+
345
+ Args:
346
+ client: LLM client (OpenAI or Anthropic)
347
+ model: Model name to use
348
+ system_prompt: Optional system prompt
349
+ max_iterations: Maximum number of iterations
350
+ api_format: API format - "openai" or "claude_native"
351
+ custom_tool_handler: Optional custom tool handler
352
+ enable_task_planning: Whether to generate task list before execution
353
+ verbose: Whether to print detailed logs
354
+ **kwargs: Additional arguments passed to the LLM
355
+
356
+ Returns:
357
+ AgenticLoop instance
358
+
359
+ Example:
360
+ # OpenAI-compatible (default)
361
+ loop = manager.create_agentic_loop(client, "gpt-4")
362
+
363
+ # Claude native API
364
+ loop = manager.create_agentic_loop(client, "claude-3-opus",
365
+ api_format="claude_native")
366
+ """
367
+ format_enum = ApiFormat.CLAUDE_NATIVE if api_format == "claude_native" else ApiFormat.OPENAI
368
+ return AgenticLoop(
369
+ manager=self,
370
+ client=client,
371
+ model=model,
372
+ system_prompt=system_prompt,
373
+ max_iterations=max_iterations,
374
+ api_format=format_enum,
375
+ custom_tool_handler=custom_tool_handler,
376
+ enable_task_planning=enable_task_planning,
377
+ verbose=verbose,
378
+ **kwargs
379
+ )
380
+
381
+ def create_agentic_loop_claude_native(
382
+ self,
383
+ client: Any,
384
+ model: str,
385
+ system_prompt: Optional[str] = None,
386
+ max_iterations: int = 10,
387
+ **kwargs
388
+ ) -> AgenticLoop:
389
+ """
390
+ Create an agentic loop for Claude's native API.
391
+
392
+ This is a convenience method that calls create_agentic_loop with
393
+ api_format="claude_native".
394
+
395
+ Args:
396
+ client: Anthropic client
397
+ model: Model name to use
398
+ system_prompt: Optional system prompt
399
+ max_iterations: Maximum number of iterations
400
+ **kwargs: Additional arguments passed to the LLM
401
+
402
+ Returns:
403
+ AgenticLoop instance configured for Claude native API
404
+ """
405
+ return self.create_agentic_loop(
406
+ client=client,
407
+ model=model,
408
+ system_prompt=system_prompt,
409
+ max_iterations=max_iterations,
410
+ api_format="claude_native",
411
+ **kwargs
412
+ )
413
+
414
+ def create_enhanced_agentic_loop(
415
+ self,
416
+ client: Any,
417
+ model: str,
418
+ system_prompt: Optional[str] = None,
419
+ max_iterations: int = 10,
420
+ custom_tools: Optional[List[Dict[str, Any]]] = None,
421
+ custom_tool_executor: Optional[Callable] = None,
422
+ enable_task_planning: bool = True,
423
+ verbose: bool = True,
424
+ **kwargs
425
+ ) -> AgenticLoop:
426
+ """
427
+ Create an enhanced agentic loop with custom tools support.
428
+
429
+ This method creates an AgenticLoop that can handle both skill tools
430
+ and custom tools (like file operations).
431
+
432
+ Args:
433
+ client: LLM client (OpenAI-compatible)
434
+ model: Model name to use
435
+ system_prompt: Optional system prompt
436
+ max_iterations: Maximum number of iterations
437
+ custom_tools: Additional custom tool definitions
438
+ custom_tool_executor: Executor function for custom tools
439
+ enable_task_planning: Whether to generate task list before execution
440
+ verbose: Whether to print detailed logs
441
+ **kwargs: Additional arguments passed to the LLM
442
+
443
+ Returns:
444
+ AgenticLoop instance with enhanced capabilities
445
+ """
446
+ # Create custom tool handler that combines skill tools and custom tools
447
+ def combined_tool_handler(response, manager, allow_network, timeout):
448
+ from .tools import ToolUseRequest, ToolResult
449
+
450
+ requests = ToolUseRequest.parse_from_openai_response(response)
451
+ results = []
452
+
453
+ # Get skill tool names
454
+ skill_tool_names = set(self.skill_names())
455
+ skill_tool_names.update(self._registry.list_multi_script_tools())
456
+
457
+ for request in requests:
458
+ if request.name in skill_tool_names:
459
+ # Execute as skill tool
460
+ result = self._handler.execute_tool_call(
461
+ request, allow_network=allow_network, timeout=timeout
462
+ )
463
+ results.append(result)
464
+ elif custom_tool_executor:
465
+ # Execute as custom tool
466
+ try:
467
+ tool_input = {"tool_name": request.name, **request.input}
468
+ output = custom_tool_executor(tool_input)
469
+ results.append(ToolResult.success(request.id, output))
470
+ except Exception as e:
471
+ results.append(ToolResult.error(request.id, str(e)))
472
+ else:
473
+ results.append(ToolResult.error(
474
+ request.id, f"No executor found for tool: {request.name}"
475
+ ))
476
+
477
+ return results
478
+
479
+ return AgenticLoop(
480
+ manager=self,
481
+ client=client,
482
+ model=model,
483
+ system_prompt=system_prompt,
484
+ max_iterations=max_iterations,
485
+ api_format=ApiFormat.OPENAI,
486
+ custom_tool_handler=combined_tool_handler if custom_tool_executor else None,
487
+ enable_task_planning=enable_task_planning,
488
+ verbose=verbose,
489
+ **kwargs
490
+ )
491
+
492
+ # ==================== Compatibility Properties ====================
493
+
494
+ @property
495
+ def _skills(self) -> Dict[str, SkillInfo]:
496
+ """Direct access to skills dict (for backward compatibility)."""
497
+ return self._registry.skills
498
+
499
+ @property
500
+ def _multi_script_tools(self) -> Dict[str, Dict[str, str]]:
501
+ """Direct access to multi-script tools dict (for backward compatibility)."""
502
+ return self._registry.multi_script_tools
503
+
504
+ @property
505
+ def _inferred_schemas(self) -> Dict[str, Dict[str, Any]]:
506
+ """Direct access to inferred schemas (for backward compatibility)."""
507
+ return self._tool_builder.inferred_schemas