skillnet-ai 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
skillnet_ai/__init__.py CHANGED
@@ -0,0 +1,23 @@
1
+ """
2
+ SkillNet AI SDK
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ A client library for searching, downloading, creating and evaluating AI Agent Skills.
6
+ """
7
+
8
+ from skillnet_ai.client import SkillNetClient
9
+ from skillnet_ai.creator import SkillCreator
10
+ from skillnet_ai.downloader import SkillDownloader
11
+ from skillnet_ai.evaluator import SkillEvaluator, EvaluatorConfig
12
+ from skillnet_ai.searcher import SkillNetSearcher
13
+ from skillnet_ai.analyzer import SkillRelationshipAnalyzer
14
+
15
+ __all__ = [
16
+ "SkillNetClient",
17
+ "SkillCreator",
18
+ "SkillDownloader",
19
+ "SkillEvaluator",
20
+ "EvaluatorConfig",
21
+ "SkillNetSearcher",
22
+ "SkillRelationshipAnalyzer"
23
+ ]
@@ -0,0 +1,222 @@
1
+ import os
2
+ import json
3
+ import logging
4
+ import re
5
+ from typing import List, Dict, Any, Optional
6
+
7
+ from openai import OpenAI
8
+ from skillnet_ai.prompts import (
9
+ RELATIONSHIP_ANALYSIS_SYSTEM_PROMPT,
10
+ RELATIONSHIP_ANALYSIS_USER_PROMPT_TEMPLATE
11
+ )
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class SkillRelationshipAnalyzer:
16
+ """
17
+ Analyzes a directory of skills to determine relationships between them.
18
+
19
+ Relationships determined:
20
+ - similar_to: A and B are functionally similar and interchangeable.
21
+ - belong_to: A is a sub-task/part of B (B is the larger scope).
22
+ - compose_with: A and B are independent but often used together.
23
+ - depend_on: A requires B to execute (prerequisite).
24
+ """
25
+
26
+ def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None, model: str = "gpt-4o"):
27
+ self.api_key = api_key or os.getenv("API_KEY")
28
+ self.base_url = base_url or os.getenv("BASE_URL") or "https://api.openai.com/v1"
29
+ self.model = model
30
+
31
+ if not self.api_key:
32
+ raise ValueError("API Key is missing. Please provide it in init or set API_KEY environment variable.")
33
+
34
+ self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
35
+
36
+ def analyze_local_skills(self, skills_dir: str, save_to_file: bool = True) -> List[Dict[str, Any]]:
37
+ """
38
+ Main entry point: Scans a directory for skills and maps their relationships.
39
+
40
+ Args:
41
+ skills_dir: Path to the directory containing skill folders.
42
+ save_to_file: Whether to save the result as 'relationships.json' in the dir.
43
+
44
+ Returns:
45
+ A list of relationship dictionaries:
46
+ [
47
+ {
48
+ "source": "skill_a",
49
+ "target": "skill_b",
50
+ "type": "depend_on",
51
+ "reason": "Skill A imports modules provided by Skill B"
52
+ },
53
+ ...
54
+ ]
55
+ """
56
+ logger.info(f"Starting relationship analysis in: {skills_dir}")
57
+
58
+ if not os.path.exists(skills_dir):
59
+ raise FileNotFoundError(f"Directory not found: {skills_dir}")
60
+
61
+ # 1. Load Skill Metadata
62
+ skills_metadata = self._load_skills_metadata(skills_dir)
63
+ if len(skills_metadata) < 2:
64
+ logger.warning("Not enough skills found to analyze relationships (need at least 2).")
65
+ return []
66
+
67
+ logger.info(f"Found {len(skills_metadata)} skills. Analyzing potential connections...")
68
+
69
+ # 2. Analyze with LLM
70
+ relationships = self._generate_relationship_graph(skills_metadata)
71
+
72
+ # 3. Save Results
73
+ if save_to_file and relationships:
74
+ output_path = os.path.join(skills_dir, "relationships.json")
75
+ try:
76
+ with open(output_path, 'w', encoding='utf-8') as f:
77
+ json.dump(relationships, f, indent=2, ensure_ascii=False)
78
+ logger.info(f"Relationships saved to: {output_path}")
79
+ except IOError as e:
80
+ logger.error(f"Failed to save relationships file: {e}")
81
+
82
+ return relationships
83
+
84
+ def _load_skills_metadata(self, root_dir: str) -> List[Dict[str, str]]:
85
+ """
86
+ Walks the directory to extract 'name' and 'description' from SKILL.md or README.md.
87
+ """
88
+ skills = []
89
+
90
+ # Assuming typical structure: root_dir/skill_name/SKILL.md
91
+ for entry in os.scandir(root_dir):
92
+ if entry.is_dir():
93
+ skill_path = entry.path
94
+ skill_name = entry.name
95
+ description = "No description provided."
96
+
97
+ # Try to read SKILL.md
98
+ content_file = None
99
+ if os.path.exists(os.path.join(skill_path, "SKILL.md")):
100
+ content_file = os.path.join(skill_path, "SKILL.md")
101
+
102
+ if content_file:
103
+ try:
104
+ with open(content_file, 'r', encoding='utf-8') as f:
105
+ raw_content = f.read()
106
+ # Extract description from Frontmatter or first paragraph
107
+ description = self._extract_description(raw_content)
108
+ except Exception as e:
109
+ logger.warning(f"Could not read content for {skill_name}: {e}")
110
+
111
+ skills.append({
112
+ "name": skill_name,
113
+ "description": description
114
+ })
115
+
116
+ return skills
117
+
118
+ def _extract_description(self, content: str) -> str:
119
+ """Helper to parse description from markdown frontmatter or body."""
120
+ # 1. Try YAML Frontmatter
121
+ frontmatter_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
122
+ if frontmatter_match:
123
+ fm_text = frontmatter_match.group(1)
124
+ # Simple regex search for description: ... line
125
+ desc_match = re.search(r'description:\s*(.+)$', fm_text, re.MULTILINE)
126
+ if desc_match:
127
+ return desc_match.group(1).strip().strip('"').strip("'")
128
+
129
+ # 2. Fallback: Use the first non-header text block
130
+ # Remove headers
131
+ clean_text = re.sub(r'#+\s.*', '', content)
132
+ # Remove code blocks
133
+ clean_text = re.sub(r'```.*?```', '', clean_text, flags=re.DOTALL)
134
+ lines = [line.strip() for line in clean_text.split('\n') if line.strip()]
135
+
136
+ if lines:
137
+ return lines[0]
138
+
139
+ return "No description available."
140
+
141
+ def _extract_json_from_tags(self, content: str, tag_name: str) -> str:
142
+ """Helper to extract content between XML-style tags."""
143
+ start_tag = f"<{tag_name}>"
144
+ end_tag = f"</{tag_name}>"
145
+
146
+ if start_tag in content and end_tag in content:
147
+ return content.split(start_tag)[1].split(end_tag)[0].strip()
148
+
149
+ clean_content = content.replace("```json", "").replace("```", "").strip()
150
+
151
+ return clean_content
152
+
153
+ def _generate_relationship_graph(self, skills: List[Dict]) -> List[Dict]:
154
+ """Calls LLM to infer edges between nodes."""
155
+
156
+ skills_json = json.dumps(skills, indent=2)
157
+
158
+ messages = [
159
+ {"role": "system", "content": RELATIONSHIP_ANALYSIS_SYSTEM_PROMPT},
160
+ {"role": "user", "content": RELATIONSHIP_ANALYSIS_USER_PROMPT_TEMPLATE.format(
161
+ skills_list=skills_json
162
+ )}
163
+ ]
164
+
165
+ try:
166
+ response = self.client.chat.completions.create(
167
+ model=self.model,
168
+ messages=messages,
169
+ )
170
+ content = response.choices[0].message.content
171
+
172
+ # 1. Extract JSON from tags
173
+ json_str = self._extract_json_from_tags(content, "Skill_Relationships")
174
+
175
+ # 2. Parse JSON
176
+ parsed_data = json.loads(json_str)
177
+
178
+ # 3. Extract edges
179
+ edges = []
180
+ if isinstance(parsed_data, list):
181
+ edges = parsed_data
182
+ elif isinstance(parsed_data, dict) and "relationships" in parsed_data:
183
+ edges = parsed_data["relationships"]
184
+
185
+ # 4. Validate edges structure
186
+ valid_edges = []
187
+ valid_names = {s['name'] for s in skills}
188
+
189
+ valid_types = {'similar_to', 'belong_to', 'compose_with', 'depend_on'}
190
+
191
+ for edge in edges:
192
+ # Basic type check
193
+ if not isinstance(edge, dict):
194
+ continue
195
+
196
+ s_name = edge.get('source')
197
+ t_name = edge.get('target')
198
+ r_type = edge.get('type')
199
+
200
+ # Validate names and type
201
+ if (s_name in valid_names and
202
+ t_name in valid_names and
203
+ r_type in valid_types and
204
+ s_name != t_name):
205
+
206
+ valid_edges.append({
207
+ "source": s_name,
208
+ "target": t_name,
209
+ "type": r_type,
210
+ "reason": edge.get("reason", "No reason provided")
211
+ })
212
+
213
+ logger.info(f"Identified {len(valid_edges)} valid relationships.")
214
+ return valid_edges
215
+
216
+ except json.JSONDecodeError as e:
217
+ logger.error(f"Failed to parse JSON content: {e}")
218
+ logger.debug(f"Raw content was: {content}")
219
+ return []
220
+ except Exception as e:
221
+ logger.error(f"Failed to analyze relationships: {e}")
222
+ return []