nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,497 @@
1
+ """
2
+ Model Registry for NC1709
3
+
4
+ Centralized model specifications. Adding a new model is as simple as
5
+ adding an entry to KNOWN_MODELS or letting the system auto-detect it.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from typing import Dict, List, Optional, Any
10
+ from enum import Enum
11
+
12
+
13
+ class PromptFormat(Enum):
14
+ """Supported prompt formats"""
15
+ CHATML = "chatml" # <|im_start|>user\n...<|im_end|>
16
+ LLAMA = "llama" # [INST] ... [/INST]
17
+ ALPACA = "alpaca" # ### Instruction:\n...\n### Response:
18
+ RAW = "raw" # No special formatting
19
+ DEEPSEEK = "deepseek" # DeepSeek specific format
20
+ MISTRAL = "mistral" # Mistral format
21
+ COMMAND_R = "command_r" # Cohere Command-R format
22
+
23
+
24
+ class ModelCapability(Enum):
25
+ """Model capabilities"""
26
+ CODE_GENERATION = "code_generation"
27
+ CODE_COMPLETION = "code_completion"
28
+ REASONING = "reasoning"
29
+ TOOL_USE = "tool_use"
30
+ FUNCTION_CALLING = "function_calling"
31
+ VISION = "vision"
32
+ LONG_CONTEXT = "long_context"
33
+ FAST_INFERENCE = "fast_inference"
34
+ MATH = "math"
35
+ CREATIVE_WRITING = "creative_writing"
36
+ EMBEDDING = "embedding"
37
+
38
+
39
+ @dataclass
40
+ class ModelSpec:
41
+ """
42
+ Complete specification for a model.
43
+
44
+ This contains everything NC1709 needs to know to use a model effectively.
45
+ """
46
+ # Basic info
47
+ name: str # Human-readable name
48
+ ollama_name: str # Name in Ollama (e.g., "qwen2.5-coder:32b")
49
+
50
+ # Context limits
51
+ context_window: int = 32768 # Max input tokens
52
+ max_output_tokens: int = 4096 # Max output tokens
53
+
54
+ # Prompt formatting
55
+ prompt_format: PromptFormat = PromptFormat.CHATML
56
+ system_prompt_supported: bool = True # Does it support system prompts?
57
+
58
+ # Capabilities
59
+ capabilities: List[ModelCapability] = field(default_factory=list)
60
+
61
+ # Performance characteristics
62
+ tokens_per_second: Optional[float] = None # Approximate speed
63
+ memory_gb: Optional[float] = None # VRAM required
64
+
65
+ # Recommended settings
66
+ default_temperature: float = 0.7
67
+ recommended_temperature_code: float = 0.3 # Lower for code
68
+ recommended_temperature_creative: float = 0.9
69
+
70
+ # Special features
71
+ supports_streaming: bool = True
72
+ supports_json_mode: bool = False # Structured output
73
+ supports_vision: bool = False # Image input
74
+
75
+ # Task suitability scores (0-1, higher is better)
76
+ suitability: Dict[str, float] = field(default_factory=dict)
77
+
78
+ # Any special notes or quirks
79
+ notes: Optional[str] = None
80
+
81
+
82
+ # ============================================================================
83
+ # KNOWN MODELS REGISTRY
84
+ # ============================================================================
85
+ # Add new models here. This is the ONLY place you need to update when
86
+ # adding a new model to the system.
87
+
88
+ KNOWN_MODELS: Dict[str, ModelSpec] = {
89
+
90
+ # -------------------------------------------------------------------------
91
+ # Qwen Models
92
+ # -------------------------------------------------------------------------
93
+
94
+ "qwen2.5-coder:32b": ModelSpec(
95
+ name="Qwen 2.5 Coder 32B",
96
+ ollama_name="qwen2.5-coder:32b",
97
+ context_window=32768,
98
+ max_output_tokens=8192,
99
+ prompt_format=PromptFormat.CHATML,
100
+ capabilities=[
101
+ ModelCapability.CODE_GENERATION,
102
+ ModelCapability.CODE_COMPLETION,
103
+ ModelCapability.REASONING,
104
+ ],
105
+ memory_gb=20.0,
106
+ tokens_per_second=30.0,
107
+ default_temperature=0.7,
108
+ recommended_temperature_code=0.3,
109
+ suitability={
110
+ "coding": 0.95,
111
+ "reasoning": 0.80,
112
+ "general": 0.75,
113
+ "fast": 0.40,
114
+ "instant": 0.20,
115
+ },
116
+ notes="Excellent for code generation and debugging."
117
+ ),
118
+
119
+ "qwen2.5-coder:7b": ModelSpec(
120
+ name="Qwen 2.5 Coder 7B",
121
+ ollama_name="qwen2.5-coder:7b",
122
+ context_window=32768,
123
+ max_output_tokens=8192,
124
+ prompt_format=PromptFormat.CHATML,
125
+ capabilities=[
126
+ ModelCapability.CODE_GENERATION,
127
+ ModelCapability.CODE_COMPLETION,
128
+ ModelCapability.FAST_INFERENCE,
129
+ ],
130
+ memory_gb=5.0,
131
+ tokens_per_second=80.0,
132
+ suitability={
133
+ "coding": 0.75,
134
+ "reasoning": 0.60,
135
+ "general": 0.65,
136
+ "fast": 0.90,
137
+ "instant": 0.60,
138
+ },
139
+ notes="Fast model for quick tasks and drafting."
140
+ ),
141
+
142
+ "qwen2.5:32b": ModelSpec(
143
+ name="Qwen 2.5 32B",
144
+ ollama_name="qwen2.5:32b",
145
+ context_window=32768,
146
+ max_output_tokens=8192,
147
+ prompt_format=PromptFormat.CHATML,
148
+ capabilities=[
149
+ ModelCapability.REASONING,
150
+ ModelCapability.TOOL_USE,
151
+ ModelCapability.CREATIVE_WRITING,
152
+ ],
153
+ memory_gb=20.0,
154
+ tokens_per_second=30.0,
155
+ suitability={
156
+ "coding": 0.70,
157
+ "reasoning": 0.85,
158
+ "general": 0.90,
159
+ "tools": 0.85,
160
+ "fast": 0.40,
161
+ },
162
+ notes="Good general-purpose model."
163
+ ),
164
+
165
+ "qwen2.5:7b": ModelSpec(
166
+ name="Qwen 2.5 7B",
167
+ ollama_name="qwen2.5:7b",
168
+ context_window=32768,
169
+ max_output_tokens=8192,
170
+ prompt_format=PromptFormat.CHATML,
171
+ capabilities=[
172
+ ModelCapability.REASONING,
173
+ ModelCapability.FAST_INFERENCE,
174
+ ],
175
+ memory_gb=5.0,
176
+ tokens_per_second=80.0,
177
+ suitability={
178
+ "coding": 0.60,
179
+ "reasoning": 0.70,
180
+ "general": 0.75,
181
+ "fast": 0.85,
182
+ "instant": 0.50,
183
+ },
184
+ notes="Balanced speed and capability."
185
+ ),
186
+
187
+ "qwen2.5:3b": ModelSpec(
188
+ name="Qwen 2.5 3B",
189
+ ollama_name="qwen2.5:3b",
190
+ context_window=32768,
191
+ max_output_tokens=4096,
192
+ prompt_format=PromptFormat.CHATML,
193
+ capabilities=[
194
+ ModelCapability.FAST_INFERENCE,
195
+ ],
196
+ memory_gb=2.5,
197
+ tokens_per_second=150.0,
198
+ suitability={
199
+ "coding": 0.50,
200
+ "reasoning": 0.40,
201
+ "general": 0.55,
202
+ "fast": 0.95,
203
+ "instant": 0.95,
204
+ },
205
+ notes="Ultra-fast for simple tasks."
206
+ ),
207
+
208
+ # -------------------------------------------------------------------------
209
+ # DeepSeek Models
210
+ # -------------------------------------------------------------------------
211
+
212
+ "deepseek-r1:latest": ModelSpec(
213
+ name="DeepSeek R1",
214
+ ollama_name="deepseek-r1:latest",
215
+ context_window=65536,
216
+ max_output_tokens=8192,
217
+ prompt_format=PromptFormat.DEEPSEEK,
218
+ capabilities=[
219
+ ModelCapability.REASONING,
220
+ ModelCapability.MATH,
221
+ ModelCapability.LONG_CONTEXT,
222
+ ],
223
+ memory_gb=18.0,
224
+ tokens_per_second=25.0,
225
+ suitability={
226
+ "coding": 0.75,
227
+ "reasoning": 0.95,
228
+ "general": 0.80,
229
+ "math": 0.95,
230
+ "council": 0.90,
231
+ },
232
+ notes="Excellent for complex reasoning and math."
233
+ ),
234
+
235
+ "deepseek-coder-v2:latest": ModelSpec(
236
+ name="DeepSeek Coder V2",
237
+ ollama_name="deepseek-coder-v2:latest",
238
+ context_window=128000,
239
+ max_output_tokens=8192,
240
+ prompt_format=PromptFormat.DEEPSEEK,
241
+ capabilities=[
242
+ ModelCapability.CODE_GENERATION,
243
+ ModelCapability.CODE_COMPLETION,
244
+ ModelCapability.LONG_CONTEXT,
245
+ ],
246
+ memory_gb=22.0,
247
+ tokens_per_second=28.0,
248
+ suitability={
249
+ "coding": 0.92,
250
+ "reasoning": 0.85,
251
+ "general": 0.75,
252
+ },
253
+ notes="128K context, great for large codebases."
254
+ ),
255
+
256
+ # -------------------------------------------------------------------------
257
+ # Llama Models
258
+ # -------------------------------------------------------------------------
259
+
260
+ "llama3.2:latest": ModelSpec(
261
+ name="Llama 3.2",
262
+ ollama_name="llama3.2:latest",
263
+ context_window=128000,
264
+ max_output_tokens=4096,
265
+ prompt_format=PromptFormat.LLAMA,
266
+ capabilities=[
267
+ ModelCapability.REASONING,
268
+ ModelCapability.LONG_CONTEXT,
269
+ ],
270
+ memory_gb=8.0,
271
+ tokens_per_second=50.0,
272
+ suitability={
273
+ "coding": 0.70,
274
+ "reasoning": 0.80,
275
+ "general": 0.85,
276
+ },
277
+ ),
278
+
279
+ "codellama:34b": ModelSpec(
280
+ name="Code Llama 34B",
281
+ ollama_name="codellama:34b",
282
+ context_window=16384,
283
+ max_output_tokens=4096,
284
+ prompt_format=PromptFormat.LLAMA,
285
+ capabilities=[
286
+ ModelCapability.CODE_GENERATION,
287
+ ModelCapability.CODE_COMPLETION,
288
+ ],
289
+ memory_gb=20.0,
290
+ tokens_per_second=25.0,
291
+ suitability={
292
+ "coding": 0.88,
293
+ "reasoning": 0.70,
294
+ "general": 0.65,
295
+ },
296
+ ),
297
+
298
+ # -------------------------------------------------------------------------
299
+ # Mistral Models
300
+ # -------------------------------------------------------------------------
301
+
302
+ "mistral:latest": ModelSpec(
303
+ name="Mistral 7B",
304
+ ollama_name="mistral:latest",
305
+ context_window=32768,
306
+ max_output_tokens=4096,
307
+ prompt_format=PromptFormat.MISTRAL,
308
+ capabilities=[
309
+ ModelCapability.REASONING,
310
+ ModelCapability.FAST_INFERENCE,
311
+ ],
312
+ memory_gb=5.0,
313
+ tokens_per_second=70.0,
314
+ suitability={
315
+ "coding": 0.70,
316
+ "reasoning": 0.75,
317
+ "general": 0.80,
318
+ "fast": 0.85,
319
+ },
320
+ ),
321
+
322
+ "mixtral:8x7b": ModelSpec(
323
+ name="Mixtral 8x7B",
324
+ ollama_name="mixtral:8x7b",
325
+ context_window=32768,
326
+ max_output_tokens=4096,
327
+ prompt_format=PromptFormat.MISTRAL,
328
+ capabilities=[
329
+ ModelCapability.REASONING,
330
+ ModelCapability.CODE_GENERATION,
331
+ ],
332
+ memory_gb=26.0,
333
+ tokens_per_second=40.0,
334
+ suitability={
335
+ "coding": 0.80,
336
+ "reasoning": 0.85,
337
+ "general": 0.85,
338
+ },
339
+ ),
340
+
341
+ # -------------------------------------------------------------------------
342
+ # Embedding Models
343
+ # -------------------------------------------------------------------------
344
+
345
+ "nomic-embed-text": ModelSpec(
346
+ name="Nomic Embed Text",
347
+ ollama_name="nomic-embed-text",
348
+ context_window=8192,
349
+ max_output_tokens=0, # Embedding model
350
+ prompt_format=PromptFormat.RAW,
351
+ capabilities=[ModelCapability.EMBEDDING],
352
+ memory_gb=0.5,
353
+ tokens_per_second=500.0,
354
+ suitability={
355
+ "embedding": 1.0,
356
+ },
357
+ notes="Embedding model for semantic search."
358
+ ),
359
+
360
+ "mxbai-embed-large": ModelSpec(
361
+ name="MxBai Embed Large",
362
+ ollama_name="mxbai-embed-large",
363
+ context_window=512,
364
+ max_output_tokens=0,
365
+ prompt_format=PromptFormat.RAW,
366
+ capabilities=[ModelCapability.EMBEDDING],
367
+ memory_gb=0.7,
368
+ tokens_per_second=400.0,
369
+ suitability={
370
+ "embedding": 0.95,
371
+ },
372
+ notes="High-quality embedding model."
373
+ ),
374
+ }
375
+
376
+
377
+ # ============================================================================
378
+ # REGISTRY FUNCTIONS
379
+ # ============================================================================
380
+
381
+ def get_model_spec(model_name: str) -> Optional[ModelSpec]:
382
+ """
383
+ Get specification for a model.
384
+
385
+ Args:
386
+ model_name: Ollama model name (e.g., "qwen2.5-coder:32b")
387
+
388
+ Returns:
389
+ ModelSpec if found, None otherwise
390
+ """
391
+ # Direct lookup
392
+ if model_name in KNOWN_MODELS:
393
+ return KNOWN_MODELS[model_name]
394
+
395
+ # Try without tag
396
+ base_name = model_name.split(":")[0]
397
+ for known_name, spec in KNOWN_MODELS.items():
398
+ if known_name.startswith(base_name):
399
+ return spec
400
+
401
+ return None
402
+
403
+
404
+ def get_all_models() -> Dict[str, ModelSpec]:
405
+ """Get all known models"""
406
+ return KNOWN_MODELS.copy()
407
+
408
+
409
+ def get_models_with_capability(capability: ModelCapability) -> List[ModelSpec]:
410
+ """Get all models with a specific capability"""
411
+ return [
412
+ spec for spec in KNOWN_MODELS.values()
413
+ if capability in spec.capabilities
414
+ ]
415
+
416
+
417
+ def get_best_model_for_task(task: str) -> Optional[ModelSpec]:
418
+ """
419
+ Get the best model for a specific task based on suitability scores.
420
+
421
+ Args:
422
+ task: Task name (e.g., "coding", "reasoning", "fast")
423
+
424
+ Returns:
425
+ Best ModelSpec for the task
426
+ """
427
+ best_model = None
428
+ best_score = 0.0
429
+
430
+ for spec in KNOWN_MODELS.values():
431
+ score = spec.suitability.get(task, 0.0)
432
+ if score > best_score:
433
+ best_score = score
434
+ best_model = spec
435
+
436
+ return best_model
437
+
438
+
439
+ def register_model(spec: ModelSpec) -> None:
440
+ """
441
+ Register a new model at runtime.
442
+
443
+ Args:
444
+ spec: Model specification to register
445
+ """
446
+ KNOWN_MODELS[spec.ollama_name] = spec
447
+
448
+
449
+ def unregister_model(model_name: str) -> bool:
450
+ """
451
+ Unregister a model.
452
+
453
+ Args:
454
+ model_name: Model name to remove
455
+
456
+ Returns:
457
+ True if removed, False if not found
458
+ """
459
+ if model_name in KNOWN_MODELS:
460
+ del KNOWN_MODELS[model_name]
461
+ return True
462
+ return False
463
+
464
+
465
+ def create_model_spec(
466
+ ollama_name: str,
467
+ name: Optional[str] = None,
468
+ context_window: int = 32768,
469
+ prompt_format: PromptFormat = PromptFormat.CHATML,
470
+ capabilities: Optional[List[ModelCapability]] = None,
471
+ suitability: Optional[Dict[str, float]] = None,
472
+ **kwargs
473
+ ) -> ModelSpec:
474
+ """
475
+ Helper to create a ModelSpec with sensible defaults.
476
+
477
+ Args:
478
+ ollama_name: Name in Ollama
479
+ name: Human-readable name (defaults to ollama_name)
480
+ context_window: Context window size
481
+ prompt_format: Prompt format to use
482
+ capabilities: List of capabilities
483
+ suitability: Suitability scores
484
+ **kwargs: Additional ModelSpec fields
485
+
486
+ Returns:
487
+ New ModelSpec
488
+ """
489
+ return ModelSpec(
490
+ name=name or ollama_name,
491
+ ollama_name=ollama_name,
492
+ context_window=context_window,
493
+ prompt_format=prompt_format,
494
+ capabilities=capabilities or [],
495
+ suitability=suitability or {"general": 0.5},
496
+ **kwargs
497
+ )