lollmsbot 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lollmsbot/skills.py ADDED
@@ -0,0 +1,1483 @@
1
+ """
2
+ Skills Module - LollmsBot's Learned Capability System
3
+
4
+ Skills are reusable, composable workflows that encode "how to do things" - from
5
+ simple tasks like "send a formatted email" to complex orchestrations like
6
+ "research, write, and publish a blog post."
7
+
8
+ Key characteristics:
9
+ - Self-documenting: Each skill explains what it does, when to use it, what it needs
10
+ - Dependency-aware: Skills declare what other skills/tools they require
11
+ - Versioned: Skills evolve, with history and rollback capability
12
+ - Composable: Skills can call other skills, building complex workflows
13
+ - Learnable: LollmsBot can create new skills from demonstration or description
14
+ - Auditable: All skill executions are logged for review
15
+
16
+ Architecture:
17
+ - Skill: Atomic unit of capability
18
+ - SkillRegistry: Manages skill discovery, loading, versioning
19
+ - SkillExecutor: Runs skills with proper dependency injection and logging
20
+ - SkillLearner: Creates new skills from examples or specifications
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ import ast
26
+ import hashlib
27
+ import importlib.util
28
+ import inspect
29
+ import json
30
+ import logging
31
+ import re
32
+ import sys
33
+ import textwrap
34
+ import time
35
+ from dataclasses import dataclass, field, asdict
36
+ from datetime import datetime
37
+ from enum import Enum, auto
38
+ from pathlib import Path
39
+ from typing import Any, Callable, Coroutine, Dict, List, Optional, Set, Tuple, Type, Union
40
+
41
+ from lollmsbot.agent import Agent, Tool, ToolResult
42
+ from lollmsbot.guardian import get_guardian, GuardianAction
43
+
44
+
45
+ logger = logging.getLogger("lollmsbot.skills")
46
+
47
+
48
+ class SkillComplexity(Enum):
49
+ """Complexity classification for skills."""
50
+ TRIVIAL = auto() # Single operation, no decisions (e.g., "format date")
51
+ SIMPLE = auto() # Linear sequence, no branching (e.g., "send email")
52
+ MODERATE = auto() # Conditional logic, error handling (e.g., "process invoice")
53
+ COMPLEX = auto() # Multi-step orchestration, state management (e.g., "onboard employee")
54
+ SYSTEM = auto() # Meta-skills that create/modify other skills
55
+
56
+
57
+ @dataclass
58
+ class SkillParameter:
59
+ """Definition of a skill's input parameter."""
60
+ name: str
61
+ type: str # JSON Schema type: string, number, boolean, array, object
62
+ description: str
63
+ required: bool = True
64
+ default: Any = None
65
+ examples: List[Any] = field(default_factory=list)
66
+ validation_regex: Optional[str] = None # Pattern for string validation
67
+
68
+ def to_schema(self) -> Dict[str, Any]:
69
+ """Convert to JSON Schema property."""
70
+ schema = {
71
+ "type": self.type,
72
+ "description": self.description,
73
+ }
74
+ if self.default is not None:
75
+ schema["default"] = self.default
76
+ if self.examples:
77
+ schema["examples"] = self.examples
78
+ if self.validation_regex:
79
+ schema["pattern"] = self.validation_regex
80
+ return schema
81
+
82
+
83
+ @dataclass
84
+ class SkillOutput:
85
+ """Definition of a skill's output."""
86
+ name: str
87
+ type: str
88
+ description: str
89
+ always_present: bool = True
90
+
91
+ def to_schema(self) -> Dict[str, Any]:
92
+ return {
93
+ "type": self.type,
94
+ "description": self.description,
95
+ }
96
+
97
+
98
+ @dataclass
99
+ class SkillDependency:
100
+ """Declaration of what this skill needs to function."""
101
+ kind: str # "tool", "skill", "api", "file", "env"
102
+ name: str
103
+ version_constraint: Optional[str] = None # e.g., ">=1.0.0", "filesystem>=2.0"
104
+ optional: bool = False
105
+ reason: str = "" # Why is this needed?
106
+
107
+ def is_satisfied(self, available_tools: Set[str], available_skills: Set[str]) -> bool:
108
+ """Check if this dependency is currently available."""
109
+ if self.kind == "tool":
110
+ return self.name in available_tools
111
+ elif self.kind == "skill":
112
+ return self.name in available_skills
113
+ # Other kinds require runtime checks
114
+ return True
115
+
116
+
117
+ @dataclass
118
+ class SkillExample:
119
+ """Demonstration of how to use the skill."""
120
+ description: str
121
+ input_params: Dict[str, Any]
122
+ expected_output: Dict[str, Any]
123
+ notes: str = "" # Special considerations, edge cases
124
+
125
+
126
+ @dataclass
127
+ class SkillMetadata:
128
+ """Comprehensive metadata about a skill."""
129
+ name: str
130
+ version: str = "1.0.0"
131
+ description: str = ""
132
+ long_description: str = ""
133
+ author: str = "lollmsbot"
134
+ created_at: datetime = field(default_factory=datetime.now)
135
+ modified_at: datetime = field(default_factory=datetime.now)
136
+ complexity: SkillComplexity = SkillComplexity.SIMPLE
137
+ tags: List[str] = field(default_factory=list)
138
+ categories: List[str] = field(default_factory=list)
139
+
140
+ # Documentation
141
+ when_to_use: str = "" # Guidance on appropriate usage
142
+ when_not_to_use: str = "" # Anti-patterns, alternatives
143
+ prerequisites: List[str] = field(default_factory=list) # Human knowledge needed
144
+ estimated_duration: Optional[str] = None # "30 seconds", "2-5 minutes"
145
+ error_handling: str = "basic" # "minimal", "basic", "robust", "comprehensive"
146
+
147
+ # Technical
148
+ parameters: List[SkillParameter] = field(default_factory=list)
149
+ outputs: List[SkillOutput] = field(default_factory=list)
150
+ dependencies: List[SkillDependency] = field(default_factory=list)
151
+ examples: List[SkillExample] = field(default_factory=list)
152
+
153
+ # Provenance
154
+ parent_skill: Optional[str] = None # If this was derived from another
155
+ learning_method: Optional[str] = None # "demonstration", "description", "abstraction", "composition"
156
+ confidence_score: float = 1.0 # 0-1, how sure we are this works
157
+
158
+ # Runtime stats
159
+ execution_count: int = 0
160
+ success_rate: float = 0.0
161
+ last_executed: Optional[datetime] = None
162
+
163
+ def to_dict(self) -> Dict[str, Any]:
164
+ """Serialize to dictionary."""
165
+ data = asdict(self)
166
+ # Convert enums
167
+ data["complexity"] = self.complexity.name
168
+ data["created_at"] = self.created_at.isoformat()
169
+ data["modified_at"] = self.modified_at.isoformat()
170
+ data["last_executed"] = self.last_executed.isoformat() if self.last_executed else None
171
+ # Convert nested dataclasses
172
+ data["parameters"] = [asdict(p) for p in self.parameters]
173
+ data["outputs"] = [asdict(o) for o in self.outputs]
174
+ data["dependencies"] = [asdict(d) for d in self.dependencies]
175
+ data["examples"] = [asdict(e) for e in self.examples]
176
+ return data
177
+
178
+ @classmethod
179
+ def from_dict(cls, data: Dict[str, Any]) -> "SkillMetadata":
180
+ """Deserialize from dictionary."""
181
+ # Reconstruct enums
182
+ complexity = SkillComplexity[data.get("complexity", "SIMPLE")]
183
+ # Reconstruct datetimes
184
+ created_at = datetime.fromisoformat(data.get("created_at")) if data.get("created_at") else datetime.now()
185
+ modified_at = datetime.fromisoformat(data.get("modified_at")) if data.get("modified_at") else datetime.now()
186
+ last_executed = datetime.fromisoformat(data["last_executed"]) if data.get("last_executed") else None
187
+ # Reconstruct nested classes
188
+ parameters = [SkillParameter(**p) for p in data.get("parameters", [])]
189
+ outputs = [SkillOutput(**o) for o in data.get("outputs", [])]
190
+ dependencies = [SkillDependency(**d) for d in data.get("dependencies", [])]
191
+ examples = [SkillExample(**e) for e in data.get("examples", [])]
192
+
193
+ return cls(
194
+ name=data["name"],
195
+ version=data.get("version", "1.0.0"),
196
+ description=data.get("description", ""),
197
+ long_description=data.get("long_description", ""),
198
+ author=data.get("author", "lollmsbot"),
199
+ created_at=created_at,
200
+ modified_at=modified_at,
201
+ complexity=complexity,
202
+ tags=data.get("tags", []),
203
+ categories=data.get("categories", []),
204
+ when_to_use=data.get("when_to_use", ""),
205
+ when_not_to_use=data.get("when_not_to_use", ""),
206
+ prerequisites=data.get("prerequisites", []),
207
+ estimated_duration=data.get("estimated_duration"),
208
+ error_handling=data.get("error_handling", "basic"),
209
+ parameters=parameters,
210
+ outputs=outputs,
211
+ dependencies=dependencies,
212
+ examples=examples,
213
+ parent_skill=data.get("parent_skill"),
214
+ learning_method=data.get("learning_method"),
215
+ confidence_score=data.get("confidence_score", 1.0),
216
+ execution_count=data.get("execution_count", 0),
217
+ success_rate=data.get("success_rate", 0.0),
218
+ last_executed=last_executed,
219
+ )
220
+
221
+
222
+ @dataclass
223
+ class SkillExecutionRecord:
224
+ """Log entry for a single skill execution."""
225
+ execution_id: str
226
+ skill_name: str
227
+ skill_version: str
228
+ started_at: datetime
229
+ completed_at: Optional[datetime] = None
230
+ input_params: Dict[str, Any] = field(default_factory=dict, repr=False) # Sensitive data
231
+ output_data: Dict[str, Any] = field(default_factory=dict)
232
+ success: bool = False
233
+ error_message: Optional[str] = None
234
+ steps_executed: List[str] = field(default_factory=list)
235
+ tools_used: List[str] = field(default_factory=list)
236
+ skills_called: List[str] = field(default_factory=list)
237
+ duration_seconds: float = 0.0
238
+ guardian_events: List[str] = field(default_factory=list) # Security review flags
239
+
240
+ def to_dict(self) -> Dict[str, Any]:
241
+ return {
242
+ "execution_id": self.execution_id,
243
+ "skill_name": self.skill_name,
244
+ "skill_version": self.skill_version,
245
+ "started_at": self.started_at.isoformat(),
246
+ "completed_at": self.completed_at.isoformat() if self.completed_at else None,
247
+ "success": self.success,
248
+ "error_message": self.error_message,
249
+ "steps_executed": self.steps_executed,
250
+ "tools_used": self.tools_used,
251
+ "skills_called": self.skills_called,
252
+ "duration_seconds": self.duration_seconds,
253
+ "guardian_events": self.guardian_events,
254
+ }
255
+
256
+
257
+ class Skill:
258
+ """
259
+ A reusable, documented, versioned capability.
260
+
261
+ Skills can be:
262
+ - Code-based: Python functions with decorators
263
+ - Template-based: Jinja2 templates with parameter substitution
264
+ - LLM-based: Natural language descriptions executed by LLM reasoning
265
+ - Composite: Orchestrations of other skills
266
+ """
267
+
268
+ def __init__(
269
+ self,
270
+ metadata: SkillMetadata,
271
+ implementation: Union[Callable, str, Dict[str, Any]],
272
+ implementation_type: str = "code", # "code", "template", "llm", "composite"
273
+ ):
274
+ self.metadata = metadata
275
+ self.implementation = implementation
276
+ self.implementation_type = implementation_type
277
+
278
+ # Runtime state
279
+ self._compiled_code: Optional[Any] = None
280
+ self._template: Optional[Any] = None
281
+
282
+ @property
283
+ def name(self) -> str:
284
+ return self.metadata.name
285
+
286
+ def validate_inputs(self, inputs: Dict[str, Any]) -> Tuple[bool, List[str]]:
287
+ """Validate provided inputs against parameter schema."""
288
+ errors = []
289
+
290
+ for param in self.metadata.parameters:
291
+ if param.name not in inputs:
292
+ if param.required:
293
+ errors.append(f"Missing required parameter: {param.name}")
294
+ continue
295
+
296
+ value = inputs[param.name]
297
+
298
+ # Type checking
299
+ if param.type == "string" and not isinstance(value, str):
300
+ errors.append(f"{param.name}: expected string, got {type(value).__name__}")
301
+ elif param.type == "number" and not isinstance(value, (int, float)):
302
+ errors.append(f"{param.name}: expected number, got {type(value).__name__}")
303
+ elif param.type == "boolean" and not isinstance(value, bool):
304
+ errors.append(f"{param.name}: expected boolean, got {type(value).__name__}")
305
+ elif param.type == "array" and not isinstance(value, list):
306
+ errors.append(f"{param.name}: expected array, got {type(value).__name__}")
307
+ elif param.type == "object" and not isinstance(value, dict):
308
+ errors.append(f"{param.name}: expected object, got {type(value).__name__}")
309
+
310
+ # Pattern validation
311
+ if param.validation_regex and isinstance(value, str):
312
+ if not re.match(param.validation_regex, value):
313
+ errors.append(f"{param.name}: does not match required pattern")
314
+
315
+ # Check for unknown parameters
316
+ known = {p.name for p in self.metadata.parameters}
317
+ unknown = set(inputs.keys()) - known
318
+ if unknown:
319
+ errors.append(f"Unknown parameters: {', '.join(unknown)}")
320
+
321
+ return len(errors) == 0, errors
322
+
323
+ def check_dependencies(self, available_tools: Set[str], available_skills: Set[str]) -> Tuple[bool, List[str]]:
324
+ """Check if all dependencies are satisfied."""
325
+ missing = []
326
+ for dep in self.metadata.dependencies:
327
+ if not dep.optional and not dep.is_satisfied(available_tools, available_skills):
328
+ missing.append(f"{dep.kind}:{dep.name} ({dep.reason})")
329
+ return len(missing) == 0, missing
330
+
331
+ def to_prompt_description(self) -> str:
332
+ """Generate natural language description for LLM tool selection."""
333
+ lines = [
334
+ f"## Skill: {self.name}",
335
+ f"**Description:** {self.metadata.description}",
336
+ "",
337
+ f"**When to use:** {self.metadata.when_to_use or 'Appropriate when ' + self.metadata.description.lower()}",
338
+ ]
339
+
340
+ if self.metadata.when_not_to_use:
341
+ lines.append(f"**When NOT to use:** {self.metadata.when_not_to_use}")
342
+
343
+ lines.extend([
344
+ "",
345
+ "**Parameters:**",
346
+ ])
347
+ for param in self.metadata.parameters:
348
+ req = " (required)" if param.required else " (optional)"
349
+ default = f", default: {param.default}" if param.default is not None else ""
350
+ lines.append(f"- `{param.name}` ({param.type}){req}: {param.description}{default}")
351
+
352
+ if self.metadata.examples:
353
+ lines.extend(["", "**Example:**"])
354
+ ex = self.metadata.examples[0]
355
+ lines.append(f"Input: {json.dumps(ex.input_params, indent=2)}")
356
+ lines.append(f"Output: {json.dumps(ex.expected_output, indent=2)}")
357
+
358
+ return '\n'.join(lines)
359
+
360
+ def to_dict(self) -> Dict[str, Any]:
361
+ """Serialize complete skill."""
362
+ return {
363
+ "metadata": self.metadata.to_dict(),
364
+ "implementation_type": self.implementation_type,
365
+ "implementation": self._serialize_implementation(),
366
+ }
367
+
368
+ def _serialize_implementation(self) -> Any:
369
+ """Serialize implementation based on type."""
370
+ if self.implementation_type == "code" and callable(self.implementation):
371
+ # Store source code
372
+ try:
373
+ return inspect.getsource(self.implementation)
374
+ except (OSError, TypeError):
375
+ return "# Source not available"
376
+ return self.implementation
377
+
378
+
379
+ class SkillRegistry:
380
+ """
381
+ Central registry for all skills.
382
+
383
+ Manages skill discovery, loading, versioning, and dependency resolution.
384
+ Provides skill search and recommendation capabilities.
385
+ """
386
+
387
+ DEFAULT_SKILLS_DIR = Path.home() / ".lollmsbot" / "skills"
388
+
389
+ def __init__(self, skills_dir: Optional[Path] = None):
390
+ self.skills_dir = skills_dir or self.DEFAULT_SKILLS_DIR
391
+ self.skills_dir.mkdir(parents=True, exist_ok=True)
392
+
393
+ # Storage
394
+ self._skills: Dict[str, Skill] = {} # name -> latest version
395
+ self._version_history: Dict[str, List[Skill]] = {} # name -> all versions
396
+ self._categories: Dict[str, Set[str]] = {} # category -> skill names
397
+ self._tags: Dict[str, Set[str]] = {} # tag -> skill names
398
+
399
+ # Index for search
400
+ self._search_index: Dict[str, Set[str]] = {} # word -> skill names
401
+
402
+ # Load built-in and user skills
403
+ self._load_built_in_skills()
404
+ self._load_user_skills()
405
+
406
+ def _load_built_in_skills(self) -> None:
407
+ """Register essential built-in skills."""
408
+ built_ins = [
409
+ self._create_file_organizer_skill(),
410
+ self._create_research_synthesizer_skill(),
411
+ self._create_meeting_prep_skill(),
412
+ self._create_code_review_skill(),
413
+ self._create_learning_skill(), # Meta-skill for learning new skills
414
+ ]
415
+ for skill in built_ins:
416
+ self.register(skill, is_builtin=True)
417
+
418
+ def _create_file_organizer_skill(self) -> Skill:
419
+ """Create the file organizer skill."""
420
+ metadata = SkillMetadata(
421
+ name="organize_files",
422
+ version="1.0.0",
423
+ description="Organize files in a directory by date, type, or custom rules",
424
+ long_description="""
425
+ Automatically organizes files in a specified directory using intelligent categorization.
426
+ Can organize by: date (YYYY-MM folders), type (extension-based categories),
427
+ or custom rules (pattern matching).
428
+ """,
429
+ complexity=SkillComplexity.SIMPLE,
430
+ tags=["files", "organization", "automation"],
431
+ categories=["productivity", "file-management"],
432
+ when_to_use="When downloads folder is cluttered, before archiving projects, when setting up new workspace",
433
+ when_not_to_use="For system directories, when files are actively being used by running processes",
434
+ estimated_duration="1-5 minutes depending on file count",
435
+ parameters=[
436
+ SkillParameter("source_dir", "string", "Directory containing files to organize", required=True),
437
+ SkillParameter("method", "string", "Organization method: 'date', 'type', or 'custom'",
438
+ required=True, examples=["date", "type"]),
439
+ SkillParameter("custom_rules", "object", "For 'custom' method: {pattern: destination_folder}",
440
+ required=False, default={}),
441
+ SkillParameter("dry_run", "boolean", "Preview changes without moving files",
442
+ required=False, default=True),
443
+ ],
444
+ outputs=[
445
+ SkillOutput("moved_files", "array", "List of {source, destination} for moved files"),
446
+ SkillOutput("stats", "object", "Summary: {total_files, organized_by_category}"),
447
+ ],
448
+ dependencies=[
449
+ SkillDependency("tool", "filesystem", reason="Needs to move and organize files"),
450
+ ],
451
+ examples=[
452
+ SkillExample(
453
+ description="Organize downloads by file type",
454
+ input_params={
455
+ "source_dir": "~/Downloads",
456
+ "method": "type",
457
+ "dry_run": False,
458
+ },
459
+ expected_output={
460
+ "moved_files": [{"source": "~/Downloads/report.pdf", "destination": "~/Downloads/PDFs/report.pdf"}],
461
+ "stats": {"total_files": 42, "organized_by_category": {"PDFs": 5, "Images": 12, "Archives": 3}},
462
+ },
463
+ ),
464
+ ],
465
+ )
466
+
467
+ # Implementation is a JSON description for LLM-guided execution
468
+ implementation = {
469
+ "execution_plan": [
470
+ {"step": "analyze", "description": "List all files in source_dir and categorize by method"},
471
+ {"step": "preview", "description": "If dry_run, show planned organization without moving"},
472
+ {"step": "organize", "description": "Create category folders and move files"},
473
+ {"step": "verify", "description": "Confirm all moves successful, report stats"},
474
+ ],
475
+ "error_handling": {
476
+ "permission_denied": "Skip file and continue, report at end",
477
+ "filename_collision": "Append number to create unique name",
478
+ "disk_full": "Stop immediately, report partial completion",
479
+ },
480
+ }
481
+
482
+ return Skill(metadata, implementation, "composite")
483
+
484
+ def _create_research_synthesizer_skill(self) -> Skill:
485
+ """Create the research synthesis skill."""
486
+ metadata = SkillMetadata(
487
+ name="synthesize_research",
488
+ version="1.0.0",
489
+ description="Research a topic across multiple sources and synthesize findings",
490
+ long_description="""
491
+ Performs comprehensive research by querying multiple sources (web search,
492
+ knowledge base, documents), then synthesizes findings into structured output
493
+ with citations, confidence levels, and gaps identified.
494
+ """,
495
+ complexity=SkillComplexity.COMPLEX,
496
+ tags=["research", "synthesis", "knowledge", "learning"],
497
+ categories=["research", "knowledge-work"],
498
+ when_to_use="When exploring new topic, preparing reports, validating claims, learning efficiently",
499
+ when_not_to_use="For time-sensitive decisions, when primary sources are required, for legal/medical advice",
500
+ estimated_duration="2-10 minutes depending on breadth",
501
+ parameters=[
502
+ SkillParameter("topic", "string", "Research topic or question", required=True),
503
+ SkillParameter("depth", "string", "Research depth: 'quick', 'standard', 'comprehensive'",
504
+ required=False, default="standard"),
505
+ SkillParameter("sources", "array", "Preferred sources: 'web', 'documents', 'kb'",
506
+ required=False, default=["web", "kb"]),
507
+ SkillParameter("output_format", "string", "Output structure: 'summary', 'report', 'outline', 'qa'",
508
+ required=False, default="summary"),
509
+ ],
510
+ outputs=[
511
+ SkillOutput("synthesis", "object", "Structured findings with main points, evidence, confidence"),
512
+ SkillOutput("sources_used", "array", "List of sources consulted with relevance scores"),
513
+ SkillOutput("gaps", "array", "Questions that couldn't be answered with available sources"),
514
+ SkillOutput("follow_up", "array", "Suggested next research directions"),
515
+ ],
516
+ dependencies=[
517
+ SkillDependency("tool", "http", reason="Web search and API queries"),
518
+ SkillDependency("tool", "filesystem", reason="Reading local documents"),
519
+ SkillDependency("skill", "evaluate_source_credibility", optional=True,
520
+ reason="Better source quality assessment"),
521
+ ],
522
+ examples=[
523
+ SkillExample(
524
+ description="Quick research on Python async patterns",
525
+ input_params={
526
+ "topic": "Python asyncio best practices for web services",
527
+ "depth": "quick",
528
+ "output_format": "summary",
529
+ },
530
+ expected_output={
531
+ "synthesis": {"main_points": ["Use asyncio.create_task for fire-and-forget", ...]},
532
+ "sources_used": [{"url": "...", "relevance": 0.95}],
533
+ "gaps": ["Performance comparison with threading"],
534
+ "follow_up": ["Research asyncio vs trio frameworks"],
535
+ },
536
+ ),
537
+ ],
538
+ )
539
+
540
+ implementation = {
541
+ "execution_plan": [
542
+ {"step": "decompose", "description": "Break topic into sub-questions and search queries"},
543
+ {"step": "search", "description": "Query each source type with appropriate queries"},
544
+ {"step": "evaluate", "description": "Assess source credibility and extract key claims"},
545
+ {"step": "synthesize", "description": "Integrate findings, resolve conflicts, assign confidence"},
546
+ {"step": "structure", "description": "Format according to output_format with citations"},
547
+ ],
548
+ }
549
+
550
+ return Skill(metadata, implementation, "llm")
551
+
552
+ def _create_meeting_prep_skill(self) -> Skill:
553
+ """Create the meeting preparation skill."""
554
+ metadata = SkillMetadata(
555
+ name="prepare_meeting",
556
+ version="1.0.0",
557
+ description="Prepare comprehensive briefing for upcoming meeting",
558
+ complexity=SkillComplexity.MODERATE,
559
+ tags=["meetings", "productivity", "preparation"],
560
+ categories=["productivity", "communication"],
561
+ when_to_use="Before important meetings, when joining new project, when meeting unfamiliar attendees",
562
+ parameters=[
563
+ SkillParameter("meeting_title", "string", "Title or topic of meeting", required=True),
564
+ SkillParameter("attendees", "array", "List of attendee names/roles", required=False, default=[]),
565
+ SkillParameter("my_role", "string", "Your role/perspective in this meeting", required=False, default="participant"),
566
+ SkillParameter("duration_minutes", "number", "Expected meeting duration", required=False, default=30),
567
+ ],
568
+ outputs=[
569
+ SkillOutput("briefing", "object", "Complete meeting preparation package"),
570
+ ],
571
+ dependencies=[
572
+ SkillDependency("tool", "calendar", reason="Check for conflicts, past related meetings"),
573
+ SkillDependency("tool", "http", reason="Research attendees and topics"),
574
+ ],
575
+ )
576
+
577
+ return Skill(metadata, {
578
+ "execution_plan": [
579
+ {"step": "context", "description": "Gather context from calendar, emails, related documents"},
580
+ {"step": "attendees", "description": "Research attendee backgrounds and relationships"},
581
+ {"step": "agenda", "description": "Draft proposed agenda based on likely objectives"},
582
+ {"step": "materials", "description": "Prepare talking points, questions, and reference materials"},
583
+ {"step": "strategy", "description": "Suggest participation strategy based on role and goals"},
584
+ ],
585
+ }, "composite")
586
+
587
+ def _create_code_review_skill(self) -> Skill:
588
+ """Create the code review skill."""
589
+ metadata = SkillMetadata(
590
+ name="review_code",
591
+ version="1.0.0",
592
+ description="Review code for bugs, style, security, and performance",
593
+ complexity=SkillComplexity.MODERATE,
594
+ tags=["code", "review", "quality", "security"],
595
+ categories=["development", "quality-assurance"],
596
+ when_to_use="Before committing, in PR reviews, when learning new codebase, for security audits",
597
+ parameters=[
598
+ SkillParameter("code", "string", "Code to review", required=True),
599
+ SkillParameter("language", "string", "Programming language", required=True),
600
+ SkillParameter("focus_areas", "array", "Aspects to emphasize: 'security', 'performance', 'style', 'bugs'",
601
+ required=False, default=["bugs", "security"]),
602
+ SkillParameter("context", "string", "What this code is supposed to do", required=False, default=""),
603
+ ],
604
+ outputs=[
605
+ SkillOutput("findings", "array", "List of issues with severity, location, explanation, fix"),
606
+ SkillOutput("summary", "object", "Overall assessment: quality_score, confidence, key_concerns"),
607
+ SkillOutput("suggestions", "array", "Improvement opportunities not strictly issues"),
608
+ ],
609
+ )
610
+
611
+ return Skill(metadata, {}, "llm") # LLM-based implementation
612
+
613
+ def _create_learning_skill(self) -> Skill:
614
+ """Create the meta-skill for learning new skills."""
615
+ metadata = SkillMetadata(
616
+ name="learn_skill",
617
+ version="1.0.0",
618
+ description="Create a new skill from description, example, or demonstration",
619
+ long_description="""
620
+ The ultimate meta-skill: learns how to do new things and encodes them
621
+ as reusable, documented, versioned skills. Can learn from:
622
+ - Natural language description of desired behavior
623
+ - Step-by-step demonstration (what I do)
624
+ - Example input/output pairs (what I want)
625
+ - Abstraction of existing skills (compose, specialize, generalize)
626
+ """,
627
+ complexity=SkillComplexity.SYSTEM,
628
+ tags=["meta", "learning", "creation", "evolution"],
629
+ categories=["system", "meta-cognitive"],
630
+ when_to_use="When you need to automate something new, when current skills don't fit, to refine existing skills",
631
+ when_not_to_use="For one-off tasks, when existing skill suffices, for tasks requiring human judgment",
632
+ estimated_duration="2-10 minutes depending on complexity",
633
+ parameters=[
634
+ SkillParameter("method", "string", "Learning method: 'description', 'demonstration', 'example', 'abstraction'",
635
+ required=True),
636
+ SkillParameter("name", "string", "Name for the new skill", required=True),
637
+ SkillParameter("input", "object", "Learning input based on method", required=True),
638
+ SkillParameter("validate", "boolean", "Test skill with examples before saving",
639
+ required=False, default=True),
640
+ ],
641
+ outputs=[
642
+ SkillOutput("skill_created", "object", "Metadata of created skill"),
643
+ SkillOutput("confidence", "number", "Estimated reliability 0-1"),
644
+ SkillOutput("validation_results", "object", "Test results if validate=True"),
645
+ ],
646
+ dependencies=[
647
+ SkillDependency("skill", "validate_skill", reason="Test newly created skills"),
648
+ ],
649
+ )
650
+
651
+ implementation = {
652
+ "learning_strategies": {
653
+ "description": """
654
+ From natural language description:
655
+ 1. Extract intent, parameters, expected behavior
656
+ 2. Identify required tools and dependencies
657
+ 3. Generate implementation plan or code
658
+ 4. Create comprehensive metadata
659
+ """,
660
+ "demonstration": """
661
+ From step-by-step demonstration:
662
+ 1. Record each step and decision point
663
+ 2. Abstract patterns into reusable logic
664
+ 3. Identify parameterizable components
665
+ 4. Document implicit knowledge
666
+ """,
667
+ "example": """
668
+ From input/output examples:
669
+ 1. Infer transformation logic
670
+ 2. Identify edge cases and constraints
671
+ 3. Generate implementation covering examples
672
+ 4. Create additional test cases
673
+ """,
674
+ "abstraction": """
675
+ From existing skills:
676
+ 1. Identify common patterns across skills
677
+ 2. Extract reusable components
678
+ 3. Create parameterized generalization
679
+ 4. Maintain relationship to parent skills
680
+ """,
681
+ },
682
+ }
683
+
684
+ return Skill(metadata, implementation, "llm")
685
+
686
+ def _load_user_skills(self) -> None:
687
+ """Load skills from user skills directory."""
688
+ if not self.skills_dir.exists():
689
+ return
690
+
691
+ for skill_file in self.skills_dir.glob("*.skill.json"):
692
+ try:
693
+ data = json.loads(skill_file.read_text())
694
+ skill = Skill(
695
+ metadata=SkillMetadata.from_dict(data["metadata"]),
696
+ implementation=data.get("implementation"),
697
+ implementation_type=data.get("implementation_type", "code"),
698
+ )
699
+ self.register(skill)
700
+ except Exception as e:
701
+ logger.warning(f"Failed to load skill from {skill_file}: {e}")
702
+
703
+ def register(self, skill: Skill, is_builtin: bool = False) -> None:
704
+ """Register a skill in the registry."""
705
+ name = skill.name
706
+
707
+ # Store in version history
708
+ if name not in self._version_history:
709
+ self._version_history[name] = []
710
+ self._version_history[name].append(skill)
711
+
712
+ # Update latest
713
+ self._skills[name] = skill
714
+
715
+ # Update indexes
716
+ for category in skill.metadata.categories:
717
+ self._categories.setdefault(category, set()).add(name)
718
+
719
+ for tag in skill.metadata.tags:
720
+ self._tags.setdefault(tag, set()).add(name)
721
+
722
+ # Build search index
723
+ text_to_index = f"{name} {skill.metadata.description} {' '.join(skill.metadata.tags)}"
724
+ for word in set(text_to_index.lower().split()):
725
+ self._search_index.setdefault(word, set()).add(name)
726
+
727
+ source = "built-in" if is_builtin else "user"
728
+ logger.debug(f"Registered skill '{name}' v{skill.metadata.version} ({source})")
729
+
730
+ def get(self, name: str, version: Optional[str] = None) -> Optional[Skill]:
731
+ """Get a skill by name, optionally specific version."""
732
+ if version:
733
+ # Search version history
734
+ for skill in self._version_history.get(name, []):
735
+ if skill.metadata.version == version:
736
+ return skill
737
+ return None
738
+ return self._skills.get(name)
739
+
740
+ def list_skills(
741
+ self,
742
+ category: Optional[str] = None,
743
+ tag: Optional[str] = None,
744
+ complexity: Optional[SkillComplexity] = None,
745
+ ) -> List[Skill]:
746
+ """List skills matching criteria."""
747
+ candidates = set(self._skills.keys())
748
+
749
+ if category:
750
+ candidates &= self._categories.get(category, set())
751
+
752
+ if tag:
753
+ candidates &= self._tags.get(tag, set())
754
+
755
+ skills = [self._skills[name] for name in candidates]
756
+
757
+ if complexity:
758
+ skills = [s for s in skills if s.metadata.complexity == complexity]
759
+
760
+ return skills
761
+
762
+ def search(self, query: str) -> List[Tuple[Skill, float]]:
763
+ """Search skills by relevance to query."""
764
+ query_words = set(query.lower().split())
765
+
766
+ # Score by word overlap
767
+ scores: Dict[str, float] = {}
768
+ for word in query_words:
769
+ for skill_name in self._search_index.get(word, set()):
770
+ scores[skill_name] = scores.get(skill_name, 0) + 1
771
+
772
+ # Normalize by description length (shorter = more focused)
773
+ results = []
774
+ for name, score in sorted(scores.items(), key=lambda x: -x[1]):
775
+ skill = self._skills[name]
776
+ # Boost exact name match
777
+ if query.lower() in name.lower():
778
+ score += 2
779
+ # Boost high-confidence skills
780
+ score *= skill.metadata.confidence_score
781
+ results.append((skill, score))
782
+
783
+ return sorted(results, key=lambda x: -x[1])
784
+
785
+ def recommend(self, context: str, available_tools: Set[str]) -> List[Tuple[Skill, str]]:
786
+ """Recommend skills based on context and available capabilities."""
787
+ # Search for relevant skills
788
+ candidates = self.search(context)[:10]
789
+
790
+ # Filter by satisfiable dependencies
791
+ available_skills = set(self._skills.keys())
792
+ viable = []
793
+ for skill, score in candidates:
794
+ can_run, missing = skill.check_dependencies(available_tools, available_skills)
795
+ if can_run:
796
+ viable.append((skill, f"Relevance: {score:.1f}"))
797
+ elif all(d.optional for d in skill.metadata.dependencies if not d.is_satisfied(available_tools, available_skills)):
798
+ viable.append((skill, f"Partial (missing optional: {missing})"))
799
+
800
+ return viable[:5]
801
+
802
+ def get_dependency_graph(self, skill_name: str) -> Dict[str, Any]:
803
+ """Build dependency tree for a skill."""
804
+ skill = self._skills.get(skill_name)
805
+ if not skill:
806
+ return {}
807
+
808
+ def build_tree(name: str, visited: Set[str]) -> Dict[str, Any]:
809
+ if name in visited:
810
+ return {"name": name, "circular": True}
811
+ visited.add(name)
812
+
813
+ s = self._skills.get(name)
814
+ if not s:
815
+ return {"name": name, "missing": True}
816
+
817
+ deps = []
818
+ for dep in s.metadata.dependencies:
819
+ if dep.kind == "skill":
820
+ deps.append(build_tree(dep.name, visited.copy()))
821
+
822
+ return {
823
+ "name": name,
824
+ "version": s.metadata.version,
825
+ "dependencies": deps,
826
+ }
827
+
828
+ return build_tree(skill_name, set())
829
+
830
+
831
+ class SkillExecutor:
832
+ """
833
+ Executes skills with proper context, logging, and error handling.
834
+
835
+ Provides sandboxed execution environment, dependency injection,
836
+ and comprehensive execution tracing.
837
+ """
838
+
839
+ def __init__(
840
+ self,
841
+ agent: Agent,
842
+ registry: SkillRegistry,
843
+ guardian = None,
844
+ ):
845
+ self.agent = agent
846
+ self.registry = registry
847
+ self.guardian = guardian or get_guardian()
848
+ self._execution_log: List[SkillExecutionRecord] = []
849
+ self._max_log_size = 1000
850
+
851
+ # Execution context stack for nested skill calls
852
+ self._context_stack: List[Dict[str, Any]] = []
853
+
854
+ async def execute(
855
+ self,
856
+ skill_name: str,
857
+ inputs: Dict[str, Any],
858
+ execution_context: Optional[Dict[str, Any]] = None,
859
+ ) -> Dict[str, Any]:
860
+ """
861
+ Execute a skill with full logging and error handling.
862
+ """
863
+ skill = self.registry.get(skill_name)
864
+ if not skill:
865
+ return {
866
+ "success": False,
867
+ "error": f"Skill '{skill_name}' not found",
868
+ "skill_name": skill_name,
869
+ }
870
+
871
+ # Create execution record
872
+ execution_id = f"exec_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{hashlib.sha256(str(time.time()).encode()).hexdigest()[:8]}"
873
+ record = SkillExecutionRecord(
874
+ execution_id=execution_id,
875
+ skill_name=skill_name,
876
+ skill_version=skill.metadata.version,
877
+ started_at=datetime.now(),
878
+ input_params=inputs,
879
+ )
880
+
881
+ # Validate inputs
882
+ valid, errors = skill.validate_inputs(inputs)
883
+ if not valid:
884
+ record.success = False
885
+ record.error_message = f"Input validation failed: {'; '.join(errors)}"
886
+ record.completed_at = datetime.now()
887
+ self._log_execution(record)
888
+ return {
889
+ "success": False,
890
+ "error": record.error_message,
891
+ "validation_errors": errors,
892
+ "execution_id": execution_id,
893
+ }
894
+
895
+ # Check dependencies
896
+ available_tools = set(self.agent.tools.keys()) if hasattr(self.agent, 'tools') else set()
897
+ available_skills = set(self.registry._skills.keys())
898
+
899
+ deps_ok, missing = skill.check_dependencies(available_tools, available_skills)
900
+ if not deps_ok:
901
+ return {
902
+ "success": False,
903
+ "error": f"Missing dependencies: {', '.join(missing)}",
904
+ "execution_id": execution_id,
905
+ }
906
+
907
+ # Guardian pre-authorization for complex skills
908
+ if skill.metadata.complexity in (SkillComplexity.COMPLEX, SkillComplexity.SYSTEM):
909
+ allowed, reason, guard_event = self.guardian.check_tool_execution(
910
+ f"skill:{skill_name}", inputs, "skill_executor", execution_context or {}
911
+ )
912
+ if not allowed:
913
+ record.guardian_events.append(f"BLOCKED: {reason}")
914
+ record.success = False
915
+ record.error_message = f"Guardian blocked: {reason}"
916
+ record.completed_at = datetime.now()
917
+ self._log_execution(record)
918
+ return {
919
+ "success": False,
920
+ "error": record.error_message,
921
+ "guardian_action": "block",
922
+ "execution_id": execution_id,
923
+ }
924
+
925
+ # Push context
926
+ self._context_stack.append({
927
+ "skill_name": skill_name,
928
+ "execution_id": execution_id,
929
+ "inputs": inputs,
930
+ })
931
+
932
+ # Execute based on implementation type
933
+ try:
934
+ if skill.implementation_type == "code":
935
+ result = await self._execute_code_skill(skill, inputs, record)
936
+ elif skill.implementation_type == "composite":
937
+ result = await self._execute_composite_skill(skill, inputs, record)
938
+ elif skill.implementation_type == "llm":
939
+ result = await self._execute_llm_skill(skill, inputs, record)
940
+ elif skill.implementation_type == "template":
941
+ result = await self._execute_template_skill(skill, inputs, record)
942
+ else:
943
+ raise ValueError(f"Unknown implementation type: {skill.implementation_type}")
944
+
945
+ # Update record with success
946
+ record.success = True
947
+ record.output_data = result
948
+ record.completed_at = datetime.now()
949
+ record.duration_seconds = (record.completed_at - record.started_at).total_seconds()
950
+
951
+ # Update skill statistics
952
+ skill.metadata.execution_count += 1
953
+ # Simple running average of success rate
954
+ skill.metadata.success_rate = (
955
+ (skill.metadata.success_rate * (skill.metadata.execution_count - 1) + 1)
956
+ / skill.metadata.execution_count
957
+ )
958
+ skill.metadata.last_executed = datetime.now()
959
+
960
+ return {
961
+ "success": True,
962
+ "result": result,
963
+ "execution_id": execution_id,
964
+ "duration_seconds": record.duration_seconds,
965
+ }
966
+
967
+ except Exception as e:
968
+ record.success = False
969
+ record.error_message = str(e)
970
+ record.completed_at = datetime.now()
971
+ record.duration_seconds = (record.completed_at - record.started_at).total_seconds()
972
+
973
+ # Update skill statistics
974
+ skill.metadata.execution_count += 1
975
+ skill.metadata.success_rate = (
976
+ skill.metadata.success_rate * (skill.metadata.execution_count - 1)
977
+ ) / skill.metadata.execution_count
978
+
979
+ logger.exception(f"Skill execution failed: {skill_name}")
980
+ return {
981
+ "success": False,
982
+ "error": str(e),
983
+ "execution_id": execution_id,
984
+ "duration_seconds": record.duration_seconds,
985
+ }
986
+
987
+ finally:
988
+ # Pop context
989
+ self._context_stack.pop()
990
+ self._log_execution(record)
991
+
992
+ async def _execute_code_skill(self, skill: Skill, inputs: Dict[str, Any], record: SkillExecutionRecord) -> Any:
993
+ """Execute a Python code-based skill."""
994
+ # For security, code skills would run in restricted environment
995
+ # This is a simplified version - production would use sandboxing
996
+ func = skill.implementation
997
+ if callable(func):
998
+ # Inject agent and tools as needed
999
+ sig = inspect.signature(func)
1000
+ kwargs = {}
1001
+ if 'agent' in sig.parameters:
1002
+ kwargs['agent'] = self.agent
1003
+ if 'tools' in sig.parameters:
1004
+ kwargs['tools'] = self.agent.tools if hasattr(self.agent, 'tools') else {}
1005
+ if 'call_skill' in sig.parameters:
1006
+ kwargs['call_skill'] = self._make_skill_caller(record)
1007
+
1008
+ result = func(**inputs, **kwargs)
1009
+ if asyncio.iscoroutine(result):
1010
+ result = await result
1011
+ return result
1012
+
1013
+ raise ValueError("Code skill implementation is not callable")
1014
+
1015
+ async def _execute_composite_skill(self, skill: Skill, inputs: Dict[str, Any], record: SkillExecutionRecord) -> Any:
1016
+ """Execute a composite skill by orchestrating other skills/tools."""
1017
+ plan = skill.implementation.get("execution_plan", [])
1018
+ results = {}
1019
+
1020
+ for step in plan:
1021
+ step_name = step["step"]
1022
+ description = step["description"]
1023
+ record.steps_executed.append(f"{step_name}: {description}")
1024
+
1025
+ # Use agent's planning to execute step
1026
+ step_result = await self.agent.chat(
1027
+ user_id=f"skill:{skill.name}",
1028
+ message=f"Execute step '{step_name}': {description}. "
1029
+ f"Context: inputs={inputs}, previous_results={results}",
1030
+ context={"skill_execution": True, "step": step_name},
1031
+ )
1032
+
1033
+ if not step_result.get("success"):
1034
+ raise RuntimeError(f"Step '{step_name}' failed: {step_result.get('error')}")
1035
+
1036
+ results[step_name] = step_result.get("response")
1037
+ record.tools_used.extend(step_result.get("tools_used", []))
1038
+
1039
+ # Compile final output based on skill's output schema
1040
+ return {
1041
+ "steps_completed": list(results.keys()),
1042
+ "final_result": results.get(plan[-1]["step"]) if plan else None,
1043
+ "all_results": results,
1044
+ }
1045
+
1046
+ async def _execute_llm_skill(self, skill: Skill, inputs: Dict[str, Any], record: SkillExecutionRecord) -> Any:
1047
+ """Execute an LLM-guided skill."""
1048
+ # Build comprehensive prompt from metadata
1049
+ system_prompt = self._build_skill_system_prompt(skill)
1050
+
1051
+ # Construct user prompt from inputs
1052
+ user_prompt = f"Execute skill '{skill.name}' with parameters:\n"
1053
+ for param in skill.metadata.parameters:
1054
+ value = inputs.get(param.name)
1055
+ if value is not None:
1056
+ user_prompt += f"- {param.name}: {json.dumps(value)}\n"
1057
+
1058
+ # Execute through agent
1059
+ result = await self.agent.chat(
1060
+ user_id=f"skill:{skill.name}",
1061
+ message=user_prompt,
1062
+ context={
1063
+ "skill_execution": True,
1064
+ "system_prompt_override": system_prompt,
1065
+ },
1066
+ )
1067
+
1068
+ if not result.get("success"):
1069
+ raise RuntimeError(f"LLM skill execution failed: {result.get('error')}")
1070
+
1071
+ # Parse and validate output against schema
1072
+ output_text = result.get("response", "")
1073
+ try:
1074
+ # Try to extract JSON
1075
+ parsed = self._extract_json(output_text)
1076
+ return parsed
1077
+ except:
1078
+ # Return as structured text
1079
+ return {"raw_output": output_text}
1080
+
1081
+ async def _execute_template_skill(self, skill: Skill, inputs: Dict[str, Any], record: SkillExecutionRecord) -> Any:
1082
+ """Execute a template-based skill."""
1083
+ # Would use Jinja2 or similar
1084
+ template = skill.implementation
1085
+ # Simple string substitution for now
1086
+ result = template
1087
+ for key, value in inputs.items():
1088
+ result = result.replace(f"{{{{ {key} }}}}", str(value))
1089
+ result = result.replace(f"{{{{{key}}}}}", str(value))
1090
+ return {"rendered": result}
1091
+
1092
+ def _build_skill_system_prompt(self, skill: Skill) -> str:
1093
+ """Build system prompt for LLM skill execution."""
1094
+ lines = [
1095
+ f"You are executing the skill: {skill.name}",
1096
+ "",
1097
+ f"Description: {skill.metadata.description}",
1098
+ "",
1099
+ "Your task is to execute this skill correctly, following these steps:",
1100
+ ]
1101
+
1102
+ if skill.implementation_type == "composite":
1103
+ plan = skill.implementation.get("execution_plan", [])
1104
+ for i, step in enumerate(plan, 1):
1105
+ lines.append(f"{i}. {step['step']}: {step['description']}")
1106
+
1107
+ lines.extend([
1108
+ "",
1109
+ "Output Requirements:",
1110
+ ])
1111
+ for output in skill.metadata.outputs:
1112
+ lines.append(f"- {output.name} ({output.type}): {output.description}")
1113
+
1114
+ if skill.metadata.examples:
1115
+ lines.extend(["", "Example:", json.dumps(skill.metadata.examples[0].expected_output, indent=2)])
1116
+
1117
+ return '\n'.join(lines)
1118
+
1119
+ def _extract_json(self, text: str) -> Any:
1120
+ """Extract JSON from text that may contain markdown or other content."""
1121
+ # Try direct parse
1122
+ try:
1123
+ return json.loads(text)
1124
+ except json.JSONDecodeError:
1125
+ pass
1126
+
1127
+ # Try code blocks
1128
+ patterns = [
1129
+ r'```json\s*(.*?)\s*```',
1130
+ r'```\s*(.*?)\s*```',
1131
+ r'\{.*\}',
1132
+ r'\[.*\]',
1133
+ ]
1134
+ for pattern in patterns:
1135
+ matches = re.findall(pattern, text, re.DOTALL)
1136
+ for match in matches:
1137
+ try:
1138
+ return json.loads(match)
1139
+ except:
1140
+ continue
1141
+
1142
+ raise ValueError("No valid JSON found in text")
1143
+
1144
+ def _make_skill_caller(self, parent_record: SkillExecutionRecord) -> Callable:
1145
+ """Create a function for skills to call other skills."""
1146
+ async def call_skill(skill_name: str, inputs: Dict[str, Any]) -> Dict[str, Any]:
1147
+ result = await self.execute(skill_name, inputs, {
1148
+ "parent_execution": parent_record.execution_id,
1149
+ })
1150
+ parent_record.skills_called.append(skill_name)
1151
+ return result
1152
+
1153
+ return call_skill
1154
+
1155
+ def _log_execution(self, record: SkillExecutionRecord) -> None:
1156
+ """Record execution in log."""
1157
+ self._execution_log.append(record)
1158
+ if len(self._execution_log) > self._max_log_size:
1159
+ self._execution_log.pop(0)
1160
+
1161
+ def get_execution_history(
1162
+ self,
1163
+ skill_name: Optional[str] = None,
1164
+ since: Optional[datetime] = None,
1165
+ success_only: bool = False,
1166
+ ) -> List[SkillExecutionRecord]:
1167
+ """Query execution history."""
1168
+ results = self._execution_log
1169
+
1170
+ if skill_name:
1171
+ results = [r for r in results if r.skill_name == skill_name]
1172
+
1173
+ if since:
1174
+ results = [r for r in results if r.started_at >= since]
1175
+
1176
+ if success_only:
1177
+ results = [r for r in results if r.success]
1178
+
1179
+ return results
1180
+
1181
+ def analyze_skill_performance(self, skill_name: str) -> Dict[str, Any]:
1182
+ """Analyze execution statistics for a skill."""
1183
+ records = [r for r in self._execution_log if r.skill_name == skill_name]
1184
+
1185
+ if not records:
1186
+ return {"error": "No execution records found"}
1187
+
1188
+ total = len(records)
1189
+ successful = sum(1 for r in records if r.success)
1190
+ durations = [r.duration_seconds for r in records if r.duration_seconds > 0]
1191
+
1192
+ return {
1193
+ "total_executions": total,
1194
+ "success_count": successful,
1195
+ "failure_count": total - successful,
1196
+ "success_rate": successful / total,
1197
+ "avg_duration": sum(durations) / len(durations) if durations else 0,
1198
+ "max_duration": max(durations) if durations else 0,
1199
+ "min_duration": min(durations) if durations else 0,
1200
+ "recent_errors": [r.error_message for r in records[-10:] if not r.success],
1201
+ }
1202
+
1203
+
1204
+ class SkillLearner:
1205
+ """
1206
+ Creates new skills through learning from examples, descriptions, or demonstrations.
1207
+
1208
+ This is the engine of LollmsBot's growth - turning experience into reusable capability.
1209
+ """
1210
+
1211
+ def __init__(
1212
+ self,
1213
+ registry: SkillRegistry,
1214
+ executor: SkillExecutor,
1215
+ ):
1216
+ self.registry = registry
1217
+ self.executor = executor
1218
+
1219
+ async def learn_from_description(
1220
+ self,
1221
+ name: str,
1222
+ description: str,
1223
+ example_inputs: List[Dict[str, Any]],
1224
+ expected_outputs: List[Dict[str, Any]],
1225
+ complexity_hint: Optional[SkillComplexity] = None,
1226
+ ) -> Skill:
1227
+ """
1228
+ Create a skill from natural language description and examples.
1229
+ """
1230
+ # Analyze to extract parameters
1231
+ parameters = self._infer_parameters(example_inputs)
1232
+
1233
+ # Determine complexity
1234
+ complexity = complexity_hint or self._estimate_complexity(description, parameters)
1235
+
1236
+ # Build metadata
1237
+ metadata = SkillMetadata(
1238
+ name=name,
1239
+ description=description,
1240
+ complexity=complexity,
1241
+ parameters=parameters,
1242
+ outputs=self._infer_outputs(expected_outputs),
1243
+ learning_method="description",
1244
+ examples=[
1245
+ SkillExample(
1246
+ description=f"Example {i+1}",
1247
+ input_params=inp,
1248
+ expected_output=out,
1249
+ )
1250
+ for i, (inp, out) in enumerate(zip(example_inputs, expected_outputs))
1251
+ ],
1252
+ )
1253
+
1254
+ # Generate implementation
1255
+ if complexity in (SkillComplexity.TRIVIAL, SkillComplexity.SIMPLE):
1256
+ implementation = await self._generate_code_implementation(metadata, example_inputs, expected_outputs)
1257
+ impl_type = "code"
1258
+ else:
1259
+ implementation = await self._generate_llm_implementation(metadata)
1260
+ impl_type = "llm"
1261
+
1262
+ skill = Skill(metadata, implementation, impl_type)
1263
+
1264
+ # Validate with examples
1265
+ validation_results = []
1266
+ for inp, expected in zip(example_inputs, expected_outputs):
1267
+ result = await self.executor.execute(name, inp)
1268
+ validation_results.append({
1269
+ "input": inp,
1270
+ "expected": expected,
1271
+ "actual": result,
1272
+ "match": self._outputs_match(expected, result.get("result", {})),
1273
+ })
1274
+
1275
+ # Calculate confidence from validation
1276
+ matches = sum(1 for v in validation_results if v["match"])
1277
+ skill.metadata.confidence_score = matches / len(validation_results) if validation_results else 0.5
1278
+
1279
+ # Register if confidence sufficient
1280
+ if skill.metadata.confidence_score >= 0.7:
1281
+ self.registry.register(skill)
1282
+ return skill
1283
+ else:
1284
+ # Return for refinement
1285
+ return skill # Caller can decide to refine or discard
1286
+
1287
+ async def learn_from_skill_composition(
1288
+ self,
1289
+ name: str,
1290
+ component_skills: List[str],
1291
+ data_flow: Dict[str, Any], # How outputs feed into inputs
1292
+ description: str,
1293
+ ) -> Skill:
1294
+ """
1295
+ Create a new skill by composing existing skills.
1296
+ """
1297
+ # Verify all components exist
1298
+ missing = [s for s in component_skills if s not in self.registry._skills]
1299
+ if missing:
1300
+ raise ValueError(f"Unknown component skills: {missing}")
1301
+
1302
+ # Build composite implementation
1303
+ implementation = {
1304
+ "component_skills": component_skills,
1305
+ "data_flow": data_flow,
1306
+ "execution_plan": [
1307
+ {"step": f"call_{skill}", "description": f"Execute {skill} with mapped inputs"}
1308
+ for skill in component_skills
1309
+ ],
1310
+ }
1311
+
1312
+ # Infer parameters from first skill
1313
+ first_skill = self.registry.get(component_skills[0])
1314
+ parameters = first_skill.metadata.parameters if first_skill else []
1315
+
1316
+ # Infer outputs from last skill
1317
+ last_skill = self.registry.get(component_skills[-1])
1318
+ outputs = last_skill.metadata.outputs if last_skill else []
1319
+
1320
+ metadata = SkillMetadata(
1321
+ name=name,
1322
+ description=description,
1323
+ complexity=SkillComplexity.COMPLEX,
1324
+ parameters=parameters,
1325
+ outputs=outputs,
1326
+ dependencies=[
1327
+ SkillDependency("skill", s, reason=f"Composed component: {s}")
1328
+ for s in component_skills
1329
+ ],
1330
+ learning_method="composition",
1331
+ parent_skill=component_skills[0] if len(component_skills) == 1 else None,
1332
+ )
1333
+
1334
+ skill = Skill(metadata, implementation, "composite")
1335
+ self.registry.register(skill)
1336
+ return skill
1337
+
1338
+ def _infer_parameters(self, examples: List[Dict[str, Any]]) -> List[SkillParameter]:
1339
+ """Infer parameter schema from examples."""
1340
+ if not examples:
1341
+ return []
1342
+
1343
+ # Find all keys across examples
1344
+ all_keys = set()
1345
+ for ex in examples:
1346
+ all_keys.update(ex.keys())
1347
+
1348
+ parameters = []
1349
+ for key in sorted(all_keys):
1350
+ # Determine type from values
1351
+ values = [ex.get(key) for ex in examples if key in ex]
1352
+ types_found = set(type(v).__name__ for v in values if v is not None)
1353
+
1354
+ type_map = {
1355
+ "str": "string",
1356
+ "int": "number",
1357
+ "float": "number",
1358
+ "bool": "boolean",
1359
+ "list": "array",
1360
+ "dict": "object",
1361
+ }
1362
+
1363
+ param_type = "string" # default
1364
+ for t in types_found:
1365
+ if t in type_map:
1366
+ param_type = type_map[t]
1367
+ break
1368
+
1369
+ # Check if always present
1370
+ required = all(key in ex for ex in examples)
1371
+
1372
+ # Find examples
1373
+ example_values = [v for v in values if v is not None][:3]
1374
+
1375
+ parameters.append(SkillParameter(
1376
+ name=key,
1377
+ type=param_type,
1378
+ description=f"Parameter: {key}",
1379
+ required=required,
1380
+ examples=example_values,
1381
+ ))
1382
+
1383
+ return parameters
1384
+
1385
+ def _infer_outputs(self, examples: List[Dict[str, Any]]) -> List[SkillOutput]:
1386
+ """Infer output schema from examples."""
1387
+ if not examples:
1388
+ return []
1389
+
1390
+ # Simplified: assume flat structure
1391
+ all_keys = set()
1392
+ for ex in examples:
1393
+ all_keys.update(ex.keys())
1394
+
1395
+ return [
1396
+ SkillOutput(name=k, type="object", description=f"Output: {k}")
1397
+ for k in sorted(all_keys)
1398
+ ]
1399
+
1400
+ def _estimate_complexity(self, description: str, parameters: List[SkillParameter]) -> SkillComplexity:
1401
+ """Estimate complexity from description and parameter count."""
1402
+ desc_lower = description.lower()
1403
+
1404
+ # Check for complexity indicators
1405
+ if any(w in desc_lower for w in ["simple", "trivial", "basic", "just", "only"]):
1406
+ return SkillComplexity.SIMPLE
1407
+
1408
+ if any(w in desc_lower for w in ["orchestrate", "workflow", "multi-step", "complex"]):
1409
+ return SkillComplexity.COMPLEX
1410
+
1411
+ if len(parameters) > 5:
1412
+ return SkillComplexity.MODERATE
1413
+
1414
+ return SkillComplexity.SIMPLE
1415
+
1416
+ async def _generate_code_implementation(
1417
+ self,
1418
+ metadata: SkillMetadata,
1419
+ examples: List[Dict[str, Any]],
1420
+ expected: List[Dict[str, Any]],
1421
+ ) -> str:
1422
+ """Generate Python code for simple skills."""
1423
+ # In production, this would use LLM code generation
1424
+ # For now, return a template
1425
+ param_list = ", ".join(p.name for p in metadata.parameters)
1426
+
1427
+ code = f'''
1428
+ async def {metadata.name}({param_list}, agent=None, tools=None, call_skill=None):
1429
+ """
1430
+ {metadata.description}
1431
+
1432
+ Generated from examples with confidence: {metadata.confidence_score}
1433
+ """
1434
+ # TODO: Implement based on examples
1435
+ # Examples provided: {len(examples)}
1436
+
1437
+ result = {{}}
1438
+
1439
+ # Use available tools
1440
+ if "filesystem" in tools:
1441
+ fs = tools["filesystem"]
1442
+ # Implementation here
1443
+
1444
+ return result
1445
+ '''
1446
+ return code
1447
+
1448
+ async def _generate_llm_implementation(self, metadata: SkillMetadata) -> Dict[str, Any]:
1449
+ """Generate LLM-guided implementation for complex skills."""
1450
+ return {
1451
+ "system_prompt": f"You are executing: {metadata.description}",
1452
+ "execution_guidance": "Break into steps, use tools as needed, validate outputs",
1453
+ }
1454
+
1455
+ def _outputs_match(self, expected: Dict[str, Any], actual: Dict[str, Any]) -> bool:
1456
+ """Check if actual output matches expected structure."""
1457
+ # Simplified: check key overlap
1458
+ expected_keys = set(expected.keys())
1459
+ actual_keys = set(actual.keys())
1460
+
1461
+ # Allow extra keys in actual, but require all expected keys
1462
+ return expected_keys <= actual_keys
1463
+
1464
+
1465
+ # Global registry access
1466
+ _skill_registry: Optional[SkillRegistry] = None
1467
+ _skill_executor: Optional[SkillExecutor] = None
1468
+
1469
+ def get_skill_registry() -> SkillRegistry:
1470
+ """Get or create global skill registry."""
1471
+ global _skill_registry
1472
+ if _skill_registry is None:
1473
+ _skill_registry = SkillRegistry()
1474
+ return _skill_registry
1475
+
1476
+ def get_skill_executor(agent: Optional[Agent] = None) -> SkillExecutor:
1477
+ """Get or create global skill executor."""
1478
+ global _skill_executor
1479
+ if _skill_executor is None:
1480
+ if agent is None:
1481
+ raise ValueError("Agent required for skill executor initialization")
1482
+ _skill_executor = SkillExecutor(agent, get_skill_registry())
1483
+ return _skill_executor