nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,555 @@
1
+ """
2
+ Web Tools
3
+
4
+ Tools for web operations:
5
+ - WebFetch: Fetch and process web page content
6
+ - WebSearch: Search the web using DuckDuckGo or Brave Search
7
+ """
8
+
9
+ import json
10
+ import re
11
+ import urllib.request
12
+ import urllib.error
13
+ import urllib.parse
14
+ from typing import Optional, List, Dict, Any
15
+ from html.parser import HTMLParser
16
+
17
+ from .base import Tool, ToolResult, ToolParameter, ToolPermission
18
+
19
+
20
+ class HTMLToTextParser(HTMLParser):
21
+ """Simple HTML to text converter"""
22
+
23
+ def __init__(self):
24
+ super().__init__()
25
+ self.text_parts = []
26
+ self.in_script = False
27
+ self.in_style = False
28
+
29
+ def handle_starttag(self, tag, attrs):
30
+ if tag == "script":
31
+ self.in_script = True
32
+ elif tag == "style":
33
+ self.in_style = True
34
+ elif tag in ["p", "div", "br", "h1", "h2", "h3", "h4", "h5", "h6", "li"]:
35
+ self.text_parts.append("\n")
36
+
37
+ def handle_endtag(self, tag):
38
+ if tag == "script":
39
+ self.in_script = False
40
+ elif tag == "style":
41
+ self.in_style = False
42
+
43
+ def handle_data(self, data):
44
+ if not self.in_script and not self.in_style:
45
+ self.text_parts.append(data)
46
+
47
+ def get_text(self) -> str:
48
+ text = "".join(self.text_parts)
49
+ # Clean up whitespace
50
+ text = re.sub(r"\n{3,}", "\n\n", text)
51
+ text = re.sub(r"[ \t]+", " ", text)
52
+ return text.strip()
53
+
54
+
55
+ class WebFetchTool(Tool):
56
+ """Fetch content from a URL"""
57
+
58
+ name = "WebFetch"
59
+ description = (
60
+ "Fetch content from a web URL and convert HTML to text. "
61
+ "Use this to read documentation, articles, or any web page."
62
+ )
63
+ category = "web"
64
+ permission = ToolPermission.ASK # Ask before making web requests
65
+
66
+ parameters = [
67
+ ToolParameter(
68
+ name="url",
69
+ description="The URL to fetch",
70
+ type="string",
71
+ required=True,
72
+ ),
73
+ ToolParameter(
74
+ name="prompt",
75
+ description="Optional prompt to focus on specific information from the page",
76
+ type="string",
77
+ required=False,
78
+ ),
79
+ ]
80
+
81
+ def execute(self, url: str, prompt: str = None) -> ToolResult:
82
+ """Fetch and process a web page"""
83
+
84
+ # Validate URL
85
+ if not url.startswith(("http://", "https://")):
86
+ # Upgrade to https
87
+ url = "https://" + url
88
+
89
+ try:
90
+ # Create request with headers
91
+ headers = {
92
+ "User-Agent": "Mozilla/5.0 (compatible; NC1709/1.0; +https://github.com/nc1709)",
93
+ "Accept": "text/html,application/xhtml+xml,text/plain",
94
+ }
95
+ req = urllib.request.Request(url, headers=headers)
96
+
97
+ # Fetch with timeout
98
+ with urllib.request.urlopen(req, timeout=30) as response:
99
+ # Check content type
100
+ content_type = response.headers.get("Content-Type", "")
101
+
102
+ # Read content
103
+ content = response.read()
104
+
105
+ # Determine encoding
106
+ encoding = "utf-8"
107
+ if "charset=" in content_type:
108
+ encoding = content_type.split("charset=")[-1].split(";")[0].strip()
109
+
110
+ try:
111
+ text = content.decode(encoding)
112
+ except UnicodeDecodeError:
113
+ text = content.decode("utf-8", errors="replace")
114
+
115
+ # Convert HTML to text
116
+ if "html" in content_type.lower():
117
+ parser = HTMLToTextParser()
118
+ parser.feed(text)
119
+ text = parser.get_text()
120
+
121
+ # Truncate if too long
122
+ max_length = 50000
123
+ if len(text) > max_length:
124
+ text = text[:max_length] + "\n\n... (content truncated)"
125
+
126
+ # Format output
127
+ output = f"Content from {url}:\n\n{text}"
128
+
129
+ return ToolResult(
130
+ success=True,
131
+ output=output,
132
+ target=url,
133
+ data={
134
+ "url": url,
135
+ "content_type": content_type,
136
+ "length": len(text),
137
+ },
138
+ )
139
+
140
+ except urllib.error.HTTPError as e:
141
+ return ToolResult(
142
+ success=False,
143
+ output="",
144
+ error=f"HTTP {e.code}: {e.reason}",
145
+ target=url,
146
+ )
147
+ except urllib.error.URLError as e:
148
+ return ToolResult(
149
+ success=False,
150
+ output="",
151
+ error=f"URL error: {e.reason}",
152
+ target=url,
153
+ )
154
+ except Exception as e:
155
+ return ToolResult(
156
+ success=False,
157
+ output="",
158
+ error=f"Error fetching URL: {e}",
159
+ target=url,
160
+ )
161
+
162
+
163
+ class WebSearchTool(Tool):
164
+ """Search the web using DuckDuckGo or Brave Search"""
165
+
166
+ name = "WebSearch"
167
+ description = (
168
+ "Search the web for information using DuckDuckGo (free) or Brave Search (with API key). "
169
+ "Returns search results with titles, URLs, and snippets."
170
+ )
171
+ category = "web"
172
+ permission = ToolPermission.ASK
173
+
174
+ parameters = [
175
+ ToolParameter(
176
+ name="query",
177
+ description="Search query",
178
+ type="string",
179
+ required=True,
180
+ ),
181
+ ToolParameter(
182
+ name="num_results",
183
+ description="Number of results to return (default: 5, max: 10)",
184
+ type="integer",
185
+ required=False,
186
+ default=5,
187
+ ),
188
+ ToolParameter(
189
+ name="search_engine",
190
+ description="Search engine to use: 'duckduckgo' (default, free) or 'brave' (requires API key)",
191
+ type="string",
192
+ required=False,
193
+ default="duckduckgo",
194
+ ),
195
+ ]
196
+
197
+ def execute(
198
+ self,
199
+ query: str,
200
+ num_results: int = 5,
201
+ search_engine: str = "duckduckgo",
202
+ ) -> ToolResult:
203
+ """Search the web"""
204
+ num_results = min(max(1, num_results), 10) # Clamp to 1-10
205
+
206
+ if search_engine.lower() == "brave":
207
+ return self._search_brave(query, num_results)
208
+ else:
209
+ return self._search_duckduckgo(query, num_results)
210
+
211
+ def _search_duckduckgo(self, query: str, num_results: int) -> ToolResult:
212
+ """Search using DuckDuckGo - tries duckduckgo-search library first, then fallbacks"""
213
+ results = []
214
+
215
+ # Method 1: Try ddgs library (most reliable) - newer version
216
+ try:
217
+ from ddgs import DDGS
218
+ ddgs = DDGS()
219
+ search_results = ddgs.text(query, max_results=num_results)
220
+ for r in search_results:
221
+ results.append({
222
+ "title": r.get("title", ""),
223
+ "url": r.get("href", ""),
224
+ "snippet": r.get("body", ""),
225
+ })
226
+ if results:
227
+ output = self._format_results(query, results)
228
+ return ToolResult(
229
+ success=True,
230
+ output=output,
231
+ target=query,
232
+ data={"results": results, "query": query, "engine": "duckduckgo"},
233
+ )
234
+ except ImportError:
235
+ # Try old package name
236
+ try:
237
+ from duckduckgo_search import DDGS
238
+ with DDGS() as ddgs:
239
+ for r in ddgs.text(query, max_results=num_results):
240
+ results.append({
241
+ "title": r.get("title", ""),
242
+ "url": r.get("href", ""),
243
+ "snippet": r.get("body", ""),
244
+ })
245
+ if results:
246
+ output = self._format_results(query, results)
247
+ return ToolResult(
248
+ success=True,
249
+ output=output,
250
+ target=query,
251
+ data={"results": results, "query": query, "engine": "duckduckgo"},
252
+ )
253
+ except ImportError:
254
+ pass # Neither library installed
255
+ except Exception as e:
256
+ pass # Library failed, try fallback
257
+
258
+ # Method 2: Try DuckDuckGo Instant Answer API
259
+ try:
260
+ api_url = f"https://api.duckduckgo.com/?q={urllib.parse.quote(query)}&format=json&no_html=1"
261
+ headers = {
262
+ "User-Agent": "Mozilla/5.0 (compatible; NC1709/1.0)",
263
+ }
264
+ req = urllib.request.Request(api_url, headers=headers)
265
+
266
+ with urllib.request.urlopen(req, timeout=10) as response:
267
+ data = json.loads(response.read().decode("utf-8"))
268
+
269
+ # Extract instant answer
270
+ if data.get("Abstract"):
271
+ results.append({
272
+ "title": data.get("Heading", "DuckDuckGo Answer"),
273
+ "url": data.get("AbstractURL", ""),
274
+ "snippet": data.get("Abstract", ""),
275
+ "source": data.get("AbstractSource", ""),
276
+ })
277
+
278
+ # Extract related topics
279
+ for topic in data.get("RelatedTopics", [])[:num_results]:
280
+ if isinstance(topic, dict) and "Text" in topic:
281
+ results.append({
282
+ "title": topic.get("Text", "")[:80],
283
+ "url": topic.get("FirstURL", ""),
284
+ "snippet": topic.get("Text", ""),
285
+ })
286
+
287
+ except Exception:
288
+ pass
289
+
290
+ # Method 3: HTML scraping fallback
291
+ if len(results) < num_results:
292
+ try:
293
+ html_results = self._scrape_duckduckgo_html(query, num_results - len(results))
294
+ results.extend(html_results)
295
+ except Exception as e:
296
+ if not results:
297
+ # All methods failed - provide helpful error
298
+ return ToolResult(
299
+ success=False,
300
+ output="",
301
+ error=(
302
+ f"DuckDuckGo search failed. For better results, install: pip install duckduckgo-search\n"
303
+ f"Error: {e}"
304
+ ),
305
+ target=query,
306
+ )
307
+
308
+ if not results:
309
+ return ToolResult(
310
+ success=True,
311
+ output=f"No results found for: {query}\n\nTip: Install duckduckgo-search for better results: pip install duckduckgo-search",
312
+ target=query,
313
+ data={"results": [], "query": query},
314
+ )
315
+
316
+ output = self._format_results(query, results[:num_results])
317
+
318
+ return ToolResult(
319
+ success=True,
320
+ output=output,
321
+ target=query,
322
+ data={
323
+ "results": results[:num_results],
324
+ "query": query,
325
+ "engine": "duckduckgo",
326
+ },
327
+ )
328
+
329
+ def _scrape_duckduckgo_html(self, query: str, num_results: int) -> List[Dict]:
330
+ """Scrape DuckDuckGo HTML lite version"""
331
+ results = []
332
+
333
+ url = f"https://html.duckduckgo.com/html/?q={urllib.parse.quote(query)}"
334
+ headers = {
335
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
336
+ }
337
+ req = urllib.request.Request(url, headers=headers)
338
+
339
+ with urllib.request.urlopen(req, timeout=15) as response:
340
+ html = response.read().decode("utf-8", errors="replace")
341
+
342
+ # Parse results from HTML
343
+ # DuckDuckGo HTML version uses class="result__a" for links
344
+ link_pattern = r'<a[^>]*class="result__a"[^>]*href="([^"]*)"[^>]*>([^<]*)</a>'
345
+ snippet_pattern = r'<a[^>]*class="result__snippet"[^>]*>([^<]*)</a>'
346
+
347
+ links = re.findall(link_pattern, html)
348
+ snippets = re.findall(snippet_pattern, html)
349
+
350
+ for i, (link_url, title) in enumerate(links[:num_results]):
351
+ snippet = snippets[i] if i < len(snippets) else ""
352
+ # Clean up the URL (DuckDuckGo wraps URLs)
353
+ if "uddg=" in link_url:
354
+ try:
355
+ link_url = urllib.parse.unquote(link_url.split("uddg=")[1].split("&")[0])
356
+ except:
357
+ pass
358
+ if link_url and not link_url.startswith("/"):
359
+ results.append({
360
+ "title": title.strip(),
361
+ "url": link_url,
362
+ "snippet": snippet.strip(),
363
+ })
364
+
365
+ return results
366
+
367
+ def _search_brave(self, query: str, num_results: int) -> ToolResult:
368
+ """Search using Brave Search API"""
369
+ import os
370
+
371
+ api_key = os.environ.get("BRAVE_SEARCH_API_KEY")
372
+ if not api_key:
373
+ return ToolResult(
374
+ success=False,
375
+ output="",
376
+ error=(
377
+ "Brave Search requires API key. "
378
+ "Set BRAVE_SEARCH_API_KEY environment variable. "
379
+ "Get a free API key at: https://brave.com/search/api/"
380
+ ),
381
+ target=query,
382
+ )
383
+
384
+ try:
385
+ url = f"https://api.search.brave.com/res/v1/web/search?q={urllib.parse.quote(query)}&count={num_results}"
386
+ headers = {
387
+ "Accept": "application/json",
388
+ "X-Subscription-Token": api_key,
389
+ }
390
+ req = urllib.request.Request(url, headers=headers)
391
+
392
+ with urllib.request.urlopen(req, timeout=15) as response:
393
+ data = json.loads(response.read().decode("utf-8"))
394
+
395
+ results = []
396
+ web_results = data.get("web", {}).get("results", [])
397
+
398
+ for item in web_results[:num_results]:
399
+ results.append({
400
+ "title": item.get("title", ""),
401
+ "url": item.get("url", ""),
402
+ "snippet": item.get("description", ""),
403
+ })
404
+
405
+ if not results:
406
+ return ToolResult(
407
+ success=True,
408
+ output=f"No results found for: {query}",
409
+ target=query,
410
+ data={"results": [], "query": query},
411
+ )
412
+
413
+ output = self._format_results(query, results)
414
+
415
+ return ToolResult(
416
+ success=True,
417
+ output=output,
418
+ target=query,
419
+ data={
420
+ "results": results,
421
+ "query": query,
422
+ "engine": "brave",
423
+ },
424
+ )
425
+
426
+ except urllib.error.HTTPError as e:
427
+ return ToolResult(
428
+ success=False,
429
+ output="",
430
+ error=f"Brave Search API error: HTTP {e.code}",
431
+ target=query,
432
+ )
433
+ except Exception as e:
434
+ return ToolResult(
435
+ success=False,
436
+ output="",
437
+ error=f"Brave Search failed: {e}",
438
+ target=query,
439
+ )
440
+
441
+ def _format_results(self, query: str, results: List[Dict]) -> str:
442
+ """Format search results for output"""
443
+ output_parts = [
444
+ f"Search results for: {query}",
445
+ "=" * 60,
446
+ ]
447
+
448
+ for i, result in enumerate(results, 1):
449
+ output_parts.append(f"\n{i}. {result.get('title', 'No title')}")
450
+ if result.get("url"):
451
+ output_parts.append(f" URL: {result['url']}")
452
+ if result.get("snippet"):
453
+ snippet = result["snippet"][:300]
454
+ if len(result.get("snippet", "")) > 300:
455
+ snippet += "..."
456
+ output_parts.append(f" {snippet}")
457
+
458
+ output_parts.append(f"\n{'=' * 60}")
459
+ output_parts.append(f"Found {len(results)} result(s)")
460
+
461
+ return "\n".join(output_parts)
462
+
463
+
464
+ class WebScreenshotTool(Tool):
465
+ """Take a screenshot of a web page (requires playwright)"""
466
+
467
+ name = "WebScreenshot"
468
+ description = (
469
+ "Take a screenshot of a web page. Requires playwright to be installed. "
470
+ "Returns the path to the saved screenshot image."
471
+ )
472
+ category = "web"
473
+ permission = ToolPermission.ASK
474
+
475
+ parameters = [
476
+ ToolParameter(
477
+ name="url",
478
+ description="The URL to screenshot",
479
+ type="string",
480
+ required=True,
481
+ ),
482
+ ToolParameter(
483
+ name="output_path",
484
+ description="Path to save the screenshot (default: /tmp/screenshot.png)",
485
+ type="string",
486
+ required=False,
487
+ default="/tmp/nc1709_screenshot.png",
488
+ ),
489
+ ToolParameter(
490
+ name="full_page",
491
+ description="Capture full page instead of viewport only",
492
+ type="boolean",
493
+ required=False,
494
+ default=False,
495
+ ),
496
+ ]
497
+
498
+ def execute(
499
+ self,
500
+ url: str,
501
+ output_path: str = "/tmp/nc1709_screenshot.png",
502
+ full_page: bool = False,
503
+ ) -> ToolResult:
504
+ """Take screenshot of web page"""
505
+ try:
506
+ from playwright.sync_api import sync_playwright
507
+ except ImportError:
508
+ return ToolResult(
509
+ success=False,
510
+ output="",
511
+ error=(
512
+ "Playwright not installed. Install with:\n"
513
+ " pip install playwright\n"
514
+ " playwright install chromium"
515
+ ),
516
+ target=url,
517
+ )
518
+
519
+ # Validate URL
520
+ if not url.startswith(("http://", "https://")):
521
+ url = "https://" + url
522
+
523
+ try:
524
+ with sync_playwright() as p:
525
+ browser = p.chromium.launch()
526
+ page = browser.new_page()
527
+ page.goto(url, timeout=30000)
528
+ page.screenshot(path=output_path, full_page=full_page)
529
+ browser.close()
530
+
531
+ return ToolResult(
532
+ success=True,
533
+ output=f"Screenshot saved to: {output_path}",
534
+ target=url,
535
+ data={
536
+ "url": url,
537
+ "output_path": output_path,
538
+ "full_page": full_page,
539
+ },
540
+ )
541
+
542
+ except Exception as e:
543
+ return ToolResult(
544
+ success=False,
545
+ output="",
546
+ error=f"Screenshot failed: {e}",
547
+ target=url,
548
+ )
549
+
550
+
551
+ def register_web_tools(registry):
552
+ """Register web tools with a registry"""
553
+ registry.register_class(WebFetchTool)
554
+ registry.register_class(WebSearchTool)
555
+ registry.register_class(WebScreenshotTool)
@@ -0,0 +1,17 @@
1
+ """
2
+ NC1709 AI Agents
3
+ Specialized agents for automated development tasks
4
+ """
5
+
6
+ from .auto_fix import AutoFixAgent, CodeError, Fix, auto_fix_command
7
+ from .test_generator import TestGeneratorAgent, GeneratedTest, generate_tests_command
8
+
9
+ __all__ = [
10
+ 'AutoFixAgent',
11
+ 'CodeError',
12
+ 'Fix',
13
+ 'auto_fix_command',
14
+ 'TestGeneratorAgent',
15
+ 'GeneratedTest',
16
+ 'generate_tests_command',
17
+ ]