cognautic-cli 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,115 @@
1
+ """
2
+ Tool registry for managing and executing tools
3
+ """
4
+
5
+ from typing import Dict, List, Optional
6
+ import asyncio
7
+ from .base import BaseTool, ToolResult, PermissionLevel
8
+
9
+
10
+ class PermissionManager:
11
+ """Manages permissions for tool execution"""
12
+
13
+ def __init__(self, default_level: PermissionLevel = PermissionLevel.SAFE_OPERATIONS):
14
+ self.default_level = default_level
15
+ self.user_permissions = {}
16
+
17
+ def can_execute(self, tool: BaseTool, user_id: str = "default") -> bool:
18
+ """Check if user can execute a tool"""
19
+ user_level = self.user_permissions.get(user_id, self.default_level)
20
+
21
+ # Permission hierarchy
22
+ hierarchy = {
23
+ PermissionLevel.READ_ONLY: 0,
24
+ PermissionLevel.SAFE_OPERATIONS: 1,
25
+ PermissionLevel.SYSTEM_OPERATIONS: 2,
26
+ PermissionLevel.UNRESTRICTED: 3
27
+ }
28
+
29
+ return hierarchy[user_level] >= hierarchy[tool.permission_level]
30
+
31
+ def set_user_permission(self, user_id: str, level: PermissionLevel):
32
+ """Set permission level for a user"""
33
+ self.user_permissions[user_id] = level
34
+
35
+
36
+ class ToolRegistry:
37
+ """Registry for managing and executing tools"""
38
+
39
+ def __init__(self):
40
+ self.tools: Dict[str, BaseTool] = {}
41
+ self.permission_manager = PermissionManager()
42
+ self._register_default_tools()
43
+
44
+ def _register_default_tools(self):
45
+ """Register default tools"""
46
+ from .file_operations import FileOperationsTool
47
+ from .command_runner import CommandRunnerTool
48
+ from .web_search import WebSearchTool
49
+ from .code_analysis import CodeAnalysisTool
50
+ from .response_control import ResponseControlTool
51
+
52
+ # Register tools
53
+ self.register_tool(FileOperationsTool())
54
+ self.register_tool(CommandRunnerTool())
55
+ self.register_tool(WebSearchTool())
56
+ self.register_tool(CodeAnalysisTool())
57
+ self.register_tool(ResponseControlTool())
58
+
59
+ def register_tool(self, tool: BaseTool):
60
+ """Register a tool"""
61
+ self.tools[tool.name] = tool
62
+
63
+ def get_tool(self, name: str) -> Optional[BaseTool]:
64
+ """Get a tool by name"""
65
+ return self.tools.get(name)
66
+
67
+ def list_tools(self) -> List[str]:
68
+ """List all registered tools"""
69
+ return list(self.tools.keys())
70
+
71
+ def get_tool_info(self, name: str) -> Optional[Dict]:
72
+ """Get information about a tool"""
73
+ tool = self.get_tool(name)
74
+ return tool.get_info() if tool else None
75
+
76
+ def list_all_tools_info(self) -> List[Dict]:
77
+ """Get information about all tools"""
78
+ return [tool.get_info() for tool in self.tools.values()]
79
+
80
+ async def execute_tool(
81
+ self,
82
+ tool_name: str,
83
+ user_id: str = "default",
84
+ **kwargs
85
+ ) -> ToolResult:
86
+ """Execute a tool with given parameters"""
87
+
88
+ tool = self.get_tool(tool_name)
89
+ if not tool:
90
+ return ToolResult(
91
+ success=False,
92
+ error=f"Tool '{tool_name}' not found"
93
+ )
94
+
95
+ # Check permissions
96
+ if not self.permission_manager.can_execute(tool, user_id):
97
+ return ToolResult(
98
+ success=False,
99
+ error=f"Insufficient permissions to execute '{tool_name}'"
100
+ )
101
+
102
+ try:
103
+ # Execute the tool
104
+ result = await tool.execute(**kwargs)
105
+ return result
106
+
107
+ except Exception as e:
108
+ return ToolResult(
109
+ success=False,
110
+ error=f"Tool execution failed: {str(e)}"
111
+ )
112
+
113
+ def set_permission_level(self, user_id: str, level: PermissionLevel):
114
+ """Set permission level for a user"""
115
+ self.permission_manager.set_user_permission(user_id, level)
@@ -0,0 +1,48 @@
1
+ """
2
+ Response control tool for managing AI continuation
3
+ """
4
+
5
+ from typing import List, Dict, Any
6
+ from .base import BaseTool, ToolResult, PermissionLevel
7
+
8
+
9
+ class ResponseControlTool(BaseTool):
10
+ """Tool for controlling AI response continuation"""
11
+
12
+ def __init__(self):
13
+ super().__init__(
14
+ name="response_control",
15
+ description="Control when AI response should end (stops auto-continuation)",
16
+ permission_level=PermissionLevel.SAFE_OPERATIONS
17
+ )
18
+
19
+ def get_capabilities(self) -> List[str]:
20
+ return [
21
+ "end_response",
22
+ "continue_response"
23
+ ]
24
+
25
+ async def execute(self, operation: str, **kwargs) -> ToolResult:
26
+ """Execute response control operation"""
27
+
28
+ if operation == "end_response":
29
+ return ToolResult(
30
+ success=True,
31
+ data={
32
+ "action": "end_response",
33
+ "message": kwargs.get("message", "Response completed")
34
+ }
35
+ )
36
+ elif operation == "continue_response":
37
+ return ToolResult(
38
+ success=True,
39
+ data={
40
+ "action": "continue_response",
41
+ "message": kwargs.get("message", "Continuing response")
42
+ }
43
+ )
44
+ else:
45
+ return ToolResult(
46
+ success=False,
47
+ error=f"Unknown operation: {operation}"
48
+ )
@@ -0,0 +1,336 @@
1
+ """
2
+ Web search tool for information retrieval
3
+ """
4
+
5
+ import aiohttp
6
+ import asyncio
7
+ from typing import List, Dict, Any, Optional
8
+ from urllib.parse import quote_plus, urljoin
9
+ from bs4 import BeautifulSoup
10
+ import json
11
+
12
+ from .base import BaseTool, ToolResult, PermissionLevel
13
+
14
+
15
+ class WebSearchTool(BaseTool):
16
+ """Tool for web search and content retrieval"""
17
+
18
+ def __init__(self):
19
+ super().__init__(
20
+ name="web_search",
21
+ description="Search the web for information and documentation",
22
+ permission_level=PermissionLevel.SAFE_OPERATIONS
23
+ )
24
+
25
+ def get_capabilities(self) -> List[str]:
26
+ return [
27
+ "search_web",
28
+ "fetch_url_content",
29
+ "parse_documentation",
30
+ "get_api_docs"
31
+ ]
32
+
33
+ async def execute(self, operation: str, **kwargs) -> ToolResult:
34
+ """Execute web search operation"""
35
+
36
+ operations = {
37
+ 'search_web': self._search_web,
38
+ 'fetch_url_content': self._fetch_url_content,
39
+ 'parse_documentation': self._parse_documentation,
40
+ 'get_api_docs': self._get_api_docs
41
+ }
42
+
43
+ if operation not in operations:
44
+ return ToolResult(
45
+ success=False,
46
+ error=f"Unknown operation: {operation}"
47
+ )
48
+
49
+ try:
50
+ result = await operations[operation](**kwargs)
51
+ return ToolResult(success=True, data=result)
52
+ except Exception as e:
53
+ return ToolResult(success=False, error=str(e))
54
+
55
+ async def _search_web(
56
+ self,
57
+ query: str,
58
+ num_results: int = 10,
59
+ search_engine: str = "duckduckgo"
60
+ ) -> List[Dict[str, Any]]:
61
+ """Search the web for information"""
62
+
63
+ if search_engine == "duckduckgo":
64
+ return await self._search_duckduckgo(query, num_results)
65
+ else:
66
+ raise ValueError(f"Unsupported search engine: {search_engine}")
67
+
68
+ async def _search_duckduckgo(self, query: str, num_results: int) -> List[Dict[str, Any]]:
69
+ """Search using DuckDuckGo"""
70
+
71
+ # DuckDuckGo Instant Answer API
72
+ url = "https://api.duckduckgo.com/"
73
+ params = {
74
+ 'q': query,
75
+ 'format': 'json',
76
+ 'no_html': '1',
77
+ 'skip_disambig': '1'
78
+ }
79
+
80
+ async with aiohttp.ClientSession() as session:
81
+ try:
82
+ async with session.get(url, params=params) as response:
83
+ if response.status == 200:
84
+ data = await response.json()
85
+
86
+ results = []
87
+
88
+ # Add abstract if available
89
+ if data.get('Abstract'):
90
+ results.append({
91
+ 'title': data.get('Heading', 'Abstract'),
92
+ 'snippet': data.get('Abstract'),
93
+ 'url': data.get('AbstractURL', ''),
94
+ 'source': data.get('AbstractSource', 'DuckDuckGo')
95
+ })
96
+
97
+ # Add related topics
98
+ for topic in data.get('RelatedTopics', [])[:num_results-1]:
99
+ if isinstance(topic, dict) and 'Text' in topic:
100
+ results.append({
101
+ 'title': topic.get('Text', '').split(' - ')[0],
102
+ 'snippet': topic.get('Text', ''),
103
+ 'url': topic.get('FirstURL', ''),
104
+ 'source': 'DuckDuckGo'
105
+ })
106
+
107
+ # If no results, try web scraping approach
108
+ if not results:
109
+ return await self._scrape_search_results(query, num_results)
110
+
111
+ return results[:num_results]
112
+ else:
113
+ raise Exception(f"Search API returned status {response.status}")
114
+
115
+ except Exception as e:
116
+ # Fallback to web scraping
117
+ return await self._scrape_search_results(query, num_results)
118
+
119
+ async def _scrape_search_results(self, query: str, num_results: int) -> List[Dict[str, Any]]:
120
+ """Scrape search results as fallback"""
121
+
122
+ # Use DuckDuckGo HTML search as fallback
123
+ url = f"https://html.duckduckgo.com/html/?q={quote_plus(query)}"
124
+
125
+ headers = {
126
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
127
+ }
128
+
129
+ async with aiohttp.ClientSession() as session:
130
+ try:
131
+ async with session.get(url, headers=headers) as response:
132
+ if response.status == 200:
133
+ html = await response.text()
134
+ soup = BeautifulSoup(html, 'html.parser')
135
+
136
+ results = []
137
+ result_divs = soup.find_all('div', class_='result')
138
+
139
+ for div in result_divs[:num_results]:
140
+ title_elem = div.find('a', class_='result__a')
141
+ snippet_elem = div.find('a', class_='result__snippet')
142
+
143
+ if title_elem:
144
+ results.append({
145
+ 'title': title_elem.get_text(strip=True),
146
+ 'snippet': snippet_elem.get_text(strip=True) if snippet_elem else '',
147
+ 'url': title_elem.get('href', ''),
148
+ 'source': 'Web Search'
149
+ })
150
+
151
+ return results
152
+ else:
153
+ raise Exception(f"Search scraping returned status {response.status}")
154
+
155
+ except Exception as e:
156
+ # Return empty results if all methods fail
157
+ return [{
158
+ 'title': 'Search Failed',
159
+ 'snippet': f'Unable to perform web search: {str(e)}',
160
+ 'url': '',
161
+ 'source': 'Error'
162
+ }]
163
+
164
+ async def _fetch_url_content(
165
+ self,
166
+ url: str,
167
+ extract_text: bool = True,
168
+ max_length: int = 10000
169
+ ) -> Dict[str, Any]:
170
+ """Fetch content from a URL"""
171
+
172
+ headers = {
173
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
174
+ }
175
+
176
+ async with aiohttp.ClientSession() as session:
177
+ try:
178
+ async with session.get(url, headers=headers, timeout=30) as response:
179
+ if response.status == 200:
180
+ content_type = response.headers.get('content-type', '').lower()
181
+
182
+ if 'text/html' in content_type:
183
+ html = await response.text()
184
+
185
+ if extract_text:
186
+ soup = BeautifulSoup(html, 'html.parser')
187
+
188
+ # Remove script and style elements
189
+ for script in soup(["script", "style"]):
190
+ script.decompose()
191
+
192
+ # Get text content
193
+ text = soup.get_text()
194
+
195
+ # Clean up text
196
+ lines = (line.strip() for line in text.splitlines())
197
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
198
+ text = ' '.join(chunk for chunk in chunks if chunk)
199
+
200
+ # Truncate if too long
201
+ if len(text) > max_length:
202
+ text = text[:max_length] + "..."
203
+
204
+ return {
205
+ 'url': url,
206
+ 'title': soup.title.string if soup.title else '',
207
+ 'content': text,
208
+ 'content_type': 'text',
209
+ 'length': len(text)
210
+ }
211
+ else:
212
+ return {
213
+ 'url': url,
214
+ 'content': html,
215
+ 'content_type': 'html',
216
+ 'length': len(html)
217
+ }
218
+
219
+ elif 'application/json' in content_type:
220
+ json_data = await response.json()
221
+ return {
222
+ 'url': url,
223
+ 'content': json_data,
224
+ 'content_type': 'json',
225
+ 'length': len(str(json_data))
226
+ }
227
+
228
+ else:
229
+ text = await response.text()
230
+ return {
231
+ 'url': url,
232
+ 'content': text,
233
+ 'content_type': 'text',
234
+ 'length': len(text)
235
+ }
236
+
237
+ else:
238
+ raise Exception(f"HTTP {response.status}: {response.reason}")
239
+
240
+ except Exception as e:
241
+ raise Exception(f"Failed to fetch URL content: {str(e)}")
242
+
243
+ async def _parse_documentation(
244
+ self,
245
+ url: str,
246
+ doc_type: str = "auto"
247
+ ) -> Dict[str, Any]:
248
+ """Parse documentation from a URL"""
249
+
250
+ content_data = await self._fetch_url_content(url, extract_text=True)
251
+
252
+ if doc_type == "auto":
253
+ # Try to detect documentation type
254
+ url_lower = url.lower()
255
+ if 'github.com' in url_lower and 'readme' in url_lower:
256
+ doc_type = "readme"
257
+ elif 'docs.' in url_lower or '/docs/' in url_lower:
258
+ doc_type = "api_docs"
259
+ else:
260
+ doc_type = "general"
261
+
262
+ # Parse based on type
263
+ parsed_content = {
264
+ 'url': url,
265
+ 'doc_type': doc_type,
266
+ 'title': content_data.get('title', ''),
267
+ 'content': content_data.get('content', ''),
268
+ 'sections': []
269
+ }
270
+
271
+ # Try to extract sections (simplified)
272
+ content = content_data.get('content', '')
273
+ sections = []
274
+ current_section = {'title': 'Introduction', 'content': ''}
275
+
276
+ for line in content.split('\n'):
277
+ line = line.strip()
278
+ if line.startswith('#') or line.isupper() and len(line) < 100:
279
+ # Likely a heading
280
+ if current_section['content']:
281
+ sections.append(current_section)
282
+ current_section = {'title': line.lstrip('#').strip(), 'content': ''}
283
+ else:
284
+ current_section['content'] += line + '\n'
285
+
286
+ if current_section['content']:
287
+ sections.append(current_section)
288
+
289
+ parsed_content['sections'] = sections
290
+ return parsed_content
291
+
292
+ async def _get_api_docs(
293
+ self,
294
+ api_name: str,
295
+ version: str = "latest"
296
+ ) -> Dict[str, Any]:
297
+ """Get API documentation for popular APIs"""
298
+
299
+ # Common API documentation URLs
300
+ api_docs = {
301
+ 'openai': 'https://platform.openai.com/docs/api-reference',
302
+ 'anthropic': 'https://docs.anthropic.com/claude/reference',
303
+ 'github': 'https://docs.github.com/en/rest',
304
+ 'stripe': 'https://stripe.com/docs/api',
305
+ 'twilio': 'https://www.twilio.com/docs/usage/api',
306
+ 'aws': 'https://docs.aws.amazon.com/',
307
+ 'google': 'https://developers.google.com/apis-explorer',
308
+ 'microsoft': 'https://docs.microsoft.com/en-us/rest/api/'
309
+ }
310
+
311
+ api_name_lower = api_name.lower()
312
+
313
+ if api_name_lower in api_docs:
314
+ url = api_docs[api_name_lower]
315
+ return await self._parse_documentation(url, doc_type="api_docs")
316
+ else:
317
+ # Search for API documentation
318
+ search_query = f"{api_name} API documentation {version}"
319
+ search_results = await self._search_web(search_query, num_results=5)
320
+
321
+ if search_results:
322
+ # Try to fetch the first result
323
+ first_result = search_results[0]
324
+ if first_result['url']:
325
+ try:
326
+ return await self._parse_documentation(first_result['url'], doc_type="api_docs")
327
+ except Exception:
328
+ pass
329
+
330
+ return {
331
+ 'api_name': api_name,
332
+ 'version': version,
333
+ 'found': False,
334
+ 'search_results': search_results,
335
+ 'message': f'Could not find API documentation for {api_name}'
336
+ }