nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,474 @@
1
+ """
2
+ Project Indexer for NC1709
3
+ Indexes project files for semantic search
4
+ """
5
+ import os
6
+ import hashlib
7
+ import json
8
+ from pathlib import Path
9
+ from typing import List, Dict, Any, Optional, Set
10
+ from datetime import datetime
11
+ from dataclasses import dataclass, field
12
+
13
+ from .vector_store import VectorStore
14
+ from .embeddings import CodeChunker
15
+
16
+
17
+ @dataclass
18
+ class IndexedFile:
19
+ """Represents an indexed file"""
20
+ path: str
21
+ hash: str
22
+ language: str
23
+ chunk_count: int
24
+ indexed_at: str = field(default_factory=lambda: datetime.now().isoformat())
25
+ size: int = 0
26
+ line_count: int = 0
27
+
28
+
29
+ class ProjectIndexer:
30
+ """Indexes project files for semantic code search"""
31
+
32
+ # Supported file extensions and their languages
33
+ SUPPORTED_EXTENSIONS = {
34
+ '.py': 'python',
35
+ '.js': 'javascript',
36
+ '.jsx': 'javascript',
37
+ '.ts': 'typescript',
38
+ '.tsx': 'typescript',
39
+ '.go': 'go',
40
+ '.rs': 'rust',
41
+ '.java': 'java',
42
+ '.c': 'c',
43
+ '.cpp': 'cpp',
44
+ '.h': 'c',
45
+ '.hpp': 'cpp',
46
+ '.rb': 'ruby',
47
+ '.php': 'php',
48
+ '.swift': 'swift',
49
+ '.kt': 'kotlin',
50
+ '.scala': 'scala',
51
+ '.md': 'markdown',
52
+ '.txt': 'text',
53
+ '.json': 'json',
54
+ '.yaml': 'yaml',
55
+ '.yml': 'yaml',
56
+ '.toml': 'toml',
57
+ '.xml': 'xml',
58
+ '.html': 'html',
59
+ '.css': 'css',
60
+ '.scss': 'scss',
61
+ '.sql': 'sql',
62
+ '.sh': 'shell',
63
+ '.bash': 'shell',
64
+ }
65
+
66
+ # Default ignore patterns
67
+ DEFAULT_IGNORE = {
68
+ '.git', '.svn', '.hg',
69
+ 'node_modules', '__pycache__', '.pytest_cache',
70
+ 'venv', '.venv', 'env', '.env',
71
+ 'dist', 'build', 'target',
72
+ '.idea', '.vscode',
73
+ '*.pyc', '*.pyo', '*.so', '*.dylib',
74
+ '*.min.js', '*.min.css',
75
+ '.DS_Store', 'Thumbs.db'
76
+ }
77
+
78
+ def __init__(
79
+ self,
80
+ project_path: str,
81
+ vector_store: Optional[VectorStore] = None,
82
+ index_path: Optional[str] = None
83
+ ):
84
+ """Initialize the project indexer
85
+
86
+ Args:
87
+ project_path: Path to the project root
88
+ vector_store: VectorStore instance
89
+ index_path: Path to store index metadata
90
+ """
91
+ self.project_path = Path(project_path).resolve()
92
+ self.vector_store = vector_store or VectorStore()
93
+ self.chunker = CodeChunker()
94
+
95
+ # Index metadata storage
96
+ if index_path:
97
+ self.index_path = Path(index_path)
98
+ else:
99
+ self.index_path = self.project_path / ".nc1709_index"
100
+
101
+ self.index_path.mkdir(parents=True, exist_ok=True)
102
+ self.metadata_file = self.index_path / "index_metadata.json"
103
+
104
+ # Load existing index metadata
105
+ self.indexed_files: Dict[str, IndexedFile] = {}
106
+ self._load_metadata()
107
+
108
+ # Custom ignore patterns
109
+ self.ignore_patterns: Set[str] = set(self.DEFAULT_IGNORE)
110
+ self._load_gitignore()
111
+
112
+ def index_project(
113
+ self,
114
+ force: bool = False,
115
+ show_progress: bool = True
116
+ ) -> Dict[str, Any]:
117
+ """Index all files in the project
118
+
119
+ Args:
120
+ force: Force re-index of all files
121
+ show_progress: Show progress information
122
+
123
+ Returns:
124
+ Index statistics
125
+ """
126
+ stats = {
127
+ "files_scanned": 0,
128
+ "files_indexed": 0,
129
+ "files_skipped": 0,
130
+ "files_unchanged": 0,
131
+ "chunks_created": 0,
132
+ "errors": []
133
+ }
134
+
135
+ if show_progress:
136
+ print(f"Indexing project: {self.project_path}")
137
+
138
+ # Find all files
139
+ files_to_index = self._find_indexable_files()
140
+ stats["files_scanned"] = len(files_to_index)
141
+
142
+ if show_progress:
143
+ print(f"Found {len(files_to_index)} indexable files")
144
+
145
+ # Process each file
146
+ for file_path in files_to_index:
147
+ try:
148
+ result = self._index_file(file_path, force=force)
149
+
150
+ if result == "indexed":
151
+ stats["files_indexed"] += 1
152
+ if show_progress:
153
+ print(f" Indexed: {file_path.relative_to(self.project_path)}")
154
+ elif result == "unchanged":
155
+ stats["files_unchanged"] += 1
156
+ else:
157
+ stats["files_skipped"] += 1
158
+
159
+ except Exception as e:
160
+ stats["errors"].append({
161
+ "file": str(file_path),
162
+ "error": str(e)
163
+ })
164
+ if show_progress:
165
+ print(f" Error indexing {file_path}: {e}")
166
+
167
+ # Count total chunks
168
+ stats["chunks_created"] = self.vector_store.count("code")
169
+
170
+ # Save metadata
171
+ self._save_metadata()
172
+
173
+ if show_progress:
174
+ print(f"\nIndexing complete:")
175
+ print(f" Files indexed: {stats['files_indexed']}")
176
+ print(f" Files unchanged: {stats['files_unchanged']}")
177
+ print(f" Total chunks: {stats['chunks_created']}")
178
+ if stats["errors"]:
179
+ print(f" Errors: {len(stats['errors'])}")
180
+
181
+ return stats
182
+
183
+ def index_file(self, file_path: str, force: bool = False) -> Optional[IndexedFile]:
184
+ """Index a single file
185
+
186
+ Args:
187
+ file_path: Path to the file
188
+ force: Force re-index even if unchanged
189
+
190
+ Returns:
191
+ IndexedFile or None if skipped
192
+ """
193
+ path = Path(file_path).resolve()
194
+ result = self._index_file(path, force=force)
195
+
196
+ if result == "indexed":
197
+ self._save_metadata()
198
+ return self.indexed_files.get(str(path))
199
+
200
+ return None
201
+
202
+ def _index_file(self, file_path: Path, force: bool = False) -> str:
203
+ """Internal method to index a file
204
+
205
+ Returns:
206
+ 'indexed', 'unchanged', or 'skipped'
207
+ """
208
+ path_str = str(file_path)
209
+
210
+ # Check if file is supported
211
+ ext = file_path.suffix.lower()
212
+ if ext not in self.SUPPORTED_EXTENSIONS:
213
+ return "skipped"
214
+
215
+ # Calculate file hash
216
+ file_hash = self._get_file_hash(file_path)
217
+
218
+ # Check if file has changed
219
+ if not force and path_str in self.indexed_files:
220
+ if self.indexed_files[path_str].hash == file_hash:
221
+ return "unchanged"
222
+
223
+ # Read file content
224
+ try:
225
+ content = file_path.read_text(encoding='utf-8')
226
+ except UnicodeDecodeError:
227
+ try:
228
+ content = file_path.read_text(encoding='latin-1')
229
+ except Exception:
230
+ return "skipped"
231
+
232
+ # Get language
233
+ language = self.SUPPORTED_EXTENSIONS[ext]
234
+
235
+ # Remove old chunks if re-indexing
236
+ if path_str in self.indexed_files:
237
+ self._remove_file_chunks(path_str)
238
+
239
+ # Chunk the file
240
+ chunks = self.chunker.chunk_code(content, language)
241
+
242
+ if not chunks:
243
+ return "skipped"
244
+
245
+ # Prepare entries for batch insert
246
+ entries = []
247
+ relative_path = str(file_path.relative_to(self.project_path))
248
+
249
+ for i, chunk in enumerate(chunks):
250
+ entries.append({
251
+ "content": chunk["content"],
252
+ "metadata": {
253
+ "file_path": relative_path,
254
+ "absolute_path": path_str,
255
+ "language": language,
256
+ "chunk_index": i,
257
+ "start_line": chunk["start_line"],
258
+ "end_line": chunk["end_line"],
259
+ "project": str(self.project_path)
260
+ }
261
+ })
262
+
263
+ # Add to vector store
264
+ self.vector_store.add_batch(entries, entry_type="code")
265
+
266
+ # Update metadata
267
+ self.indexed_files[path_str] = IndexedFile(
268
+ path=path_str,
269
+ hash=file_hash,
270
+ language=language,
271
+ chunk_count=len(chunks),
272
+ size=file_path.stat().st_size,
273
+ line_count=len(content.splitlines())
274
+ )
275
+
276
+ return "indexed"
277
+
278
+ def search(
279
+ self,
280
+ query: str,
281
+ n_results: int = 5,
282
+ language: Optional[str] = None,
283
+ file_pattern: Optional[str] = None
284
+ ) -> List[Dict[str, Any]]:
285
+ """Search indexed code
286
+
287
+ Args:
288
+ query: Natural language query
289
+ n_results: Maximum results
290
+ language: Filter by programming language
291
+ file_pattern: Filter by file path pattern
292
+
293
+ Returns:
294
+ List of matching code chunks with metadata
295
+ """
296
+ # Build metadata filter
297
+ filter_metadata = {}
298
+ if language:
299
+ filter_metadata["language"] = language
300
+
301
+ # Search vector store
302
+ results = self.vector_store.search(
303
+ query=query,
304
+ entry_type="code",
305
+ n_results=n_results * 2, # Get more, then filter
306
+ filter_metadata=filter_metadata if filter_metadata else None
307
+ )
308
+
309
+ # Filter by file pattern if specified
310
+ if file_pattern:
311
+ import fnmatch
312
+ results = [
313
+ r for r in results
314
+ if fnmatch.fnmatch(r["metadata"].get("file_path", ""), file_pattern)
315
+ ]
316
+
317
+ # Limit and enhance results
318
+ enhanced_results = []
319
+ for result in results[:n_results]:
320
+ enhanced = {
321
+ "content": result["content"],
322
+ "file_path": result["metadata"].get("file_path"),
323
+ "language": result["metadata"].get("language"),
324
+ "start_line": result["metadata"].get("start_line"),
325
+ "end_line": result["metadata"].get("end_line"),
326
+ "similarity": result["similarity"],
327
+ "location": f"{result['metadata'].get('file_path')}:{result['metadata'].get('start_line')}-{result['metadata'].get('end_line')}"
328
+ }
329
+ enhanced_results.append(enhanced)
330
+
331
+ return enhanced_results
332
+
333
+ def get_project_summary(self) -> Dict[str, Any]:
334
+ """Get summary of indexed project
335
+
336
+ Returns:
337
+ Project statistics
338
+ """
339
+ # Count by language
340
+ language_counts = {}
341
+ total_lines = 0
342
+ total_size = 0
343
+
344
+ for indexed_file in self.indexed_files.values():
345
+ lang = indexed_file.language
346
+ language_counts[lang] = language_counts.get(lang, 0) + 1
347
+ total_lines += indexed_file.line_count
348
+ total_size += indexed_file.size
349
+
350
+ return {
351
+ "project_path": str(self.project_path),
352
+ "total_files": len(self.indexed_files),
353
+ "total_chunks": self.vector_store.count("code"),
354
+ "total_lines": total_lines,
355
+ "total_size_bytes": total_size,
356
+ "languages": language_counts,
357
+ "last_indexed": max(
358
+ (f.indexed_at for f in self.indexed_files.values()),
359
+ default=None
360
+ )
361
+ }
362
+
363
+ def _find_indexable_files(self) -> List[Path]:
364
+ """Find all files that can be indexed
365
+
366
+ Returns:
367
+ List of file paths
368
+ """
369
+ files = []
370
+
371
+ for root, dirs, filenames in os.walk(self.project_path):
372
+ # Filter out ignored directories
373
+ dirs[:] = [d for d in dirs if not self._should_ignore(d)]
374
+
375
+ for filename in filenames:
376
+ if self._should_ignore(filename):
377
+ continue
378
+
379
+ file_path = Path(root) / filename
380
+ ext = file_path.suffix.lower()
381
+
382
+ if ext in self.SUPPORTED_EXTENSIONS:
383
+ files.append(file_path)
384
+
385
+ return files
386
+
387
+ def _should_ignore(self, name: str) -> bool:
388
+ """Check if a file/directory should be ignored
389
+
390
+ Args:
391
+ name: File or directory name
392
+
393
+ Returns:
394
+ True if should be ignored
395
+ """
396
+ import fnmatch
397
+
398
+ for pattern in self.ignore_patterns:
399
+ if fnmatch.fnmatch(name, pattern):
400
+ return True
401
+ if name == pattern:
402
+ return True
403
+
404
+ return False
405
+
406
+ def _get_file_hash(self, file_path: Path) -> str:
407
+ """Calculate file hash for change detection
408
+
409
+ Args:
410
+ file_path: Path to file
411
+
412
+ Returns:
413
+ Hash string
414
+ """
415
+ hasher = hashlib.md5()
416
+ hasher.update(str(file_path.stat().st_mtime).encode())
417
+ hasher.update(str(file_path.stat().st_size).encode())
418
+ return hasher.hexdigest()
419
+
420
+ def _remove_file_chunks(self, file_path: str):
421
+ """Remove all chunks for a file
422
+
423
+ Args:
424
+ file_path: Absolute file path
425
+ """
426
+ # This is a simplified implementation
427
+ # In production, you'd want to track chunk IDs per file
428
+ pass
429
+
430
+ def _load_gitignore(self):
431
+ """Load patterns from .gitignore"""
432
+ gitignore_path = self.project_path / ".gitignore"
433
+
434
+ if gitignore_path.exists():
435
+ try:
436
+ content = gitignore_path.read_text()
437
+ for line in content.splitlines():
438
+ line = line.strip()
439
+ if line and not line.startswith('#'):
440
+ self.ignore_patterns.add(line)
441
+ except Exception:
442
+ pass
443
+
444
+ def _load_metadata(self):
445
+ """Load index metadata from disk"""
446
+ if self.metadata_file.exists():
447
+ try:
448
+ data = json.loads(self.metadata_file.read_text())
449
+ for path, file_data in data.get("files", {}).items():
450
+ self.indexed_files[path] = IndexedFile(**file_data)
451
+ except Exception:
452
+ pass
453
+
454
+ def _save_metadata(self):
455
+ """Save index metadata to disk"""
456
+ data = {
457
+ "version": "1.0",
458
+ "project_path": str(self.project_path),
459
+ "updated_at": datetime.now().isoformat(),
460
+ "files": {
461
+ path: {
462
+ "path": f.path,
463
+ "hash": f.hash,
464
+ "language": f.language,
465
+ "chunk_count": f.chunk_count,
466
+ "indexed_at": f.indexed_at,
467
+ "size": f.size,
468
+ "line_count": f.line_count
469
+ }
470
+ for path, f in self.indexed_files.items()
471
+ }
472
+ }
473
+
474
+ self.metadata_file.write_text(json.dumps(data, indent=2))