nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,438 @@
1
+ """
2
+ High-level model management for NC1709.
3
+
4
+ This is the main interface that the rest of NC1709 uses to interact
5
+ with models. It handles model selection, prompt formatting, and
6
+ configuration management.
7
+ """
8
+
9
+ from typing import Optional, List, Dict, Any
10
+ from pathlib import Path
11
+ import json
12
+
13
+ from .registry import (
14
+ ModelSpec, ModelCapability, PromptFormat,
15
+ KNOWN_MODELS, get_model_spec, get_best_model_for_task,
16
+ register_model, create_model_spec
17
+ )
18
+ from .formats import PromptFormatter, Message
19
+ from .detector import ModelDetector
20
+
21
+
22
+ class ModelManager:
23
+ """
24
+ High-level interface for model management.
25
+
26
+ Provides:
27
+ - Easy model lookup and selection
28
+ - Automatic prompt formatting
29
+ - Model recommendation
30
+ - Registry management
31
+
32
+ Example:
33
+ manager = ModelManager(config)
34
+ spec = manager.get_model_for_task("coding")
35
+ prompt = manager.format_prompt(messages, spec.ollama_name)
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ config: Optional[Any] = None,
41
+ ollama_url: str = "http://localhost:11434"
42
+ ):
43
+ """
44
+ Initialize the model manager.
45
+
46
+ Args:
47
+ config: Configuration object with model settings
48
+ ollama_url: Ollama API URL
49
+ """
50
+ self.config = config
51
+ self.detector = ModelDetector(ollama_url)
52
+ self.formatter = PromptFormatter()
53
+ self._initialized = False
54
+ self._model_assignments: Dict[str, str] = {}
55
+
56
+ # Load saved model assignments
57
+ self._load_assignments()
58
+
59
+ async def initialize(self) -> None:
60
+ """Initialize the model manager (async)"""
61
+ if self._initialized:
62
+ return
63
+
64
+ # Sync with Ollama to detect new models
65
+ try:
66
+ await self.detector.sync_with_ollama()
67
+ except Exception:
68
+ pass # Ollama might not be running
69
+
70
+ self._initialized = True
71
+
72
+ def initialize_sync(self) -> None:
73
+ """Initialize the model manager (sync)"""
74
+ if self._initialized:
75
+ return
76
+
77
+ try:
78
+ self.detector.sync_with_ollama_sync()
79
+ except Exception:
80
+ pass
81
+
82
+ self._initialized = True
83
+
84
+ def get_model_for_task(self, task: str) -> ModelSpec:
85
+ """
86
+ Get the configured model for a task.
87
+
88
+ Args:
89
+ task: Task name (e.g., "coding", "reasoning", "fast", "instant")
90
+
91
+ Returns:
92
+ ModelSpec for the configured model
93
+ """
94
+ # Check saved assignments first
95
+ if task in self._model_assignments:
96
+ model_name = self._model_assignments[task]
97
+ spec = get_model_spec(model_name)
98
+ if spec:
99
+ return spec
100
+
101
+ # Check config
102
+ if self.config:
103
+ model_name = None
104
+ if hasattr(self.config, 'get'):
105
+ model_name = self.config.get(f"models.{task}")
106
+ elif isinstance(self.config, dict):
107
+ model_name = self.config.get("models", {}).get(task)
108
+
109
+ if model_name:
110
+ spec = get_model_spec(model_name)
111
+ if spec:
112
+ return spec
113
+ # Unknown model - create a basic spec
114
+ spec = create_model_spec(model_name)
115
+ register_model(spec)
116
+ return spec
117
+
118
+ # Fallback: get best model for task from registry
119
+ best = get_best_model_for_task(task)
120
+ if best:
121
+ return best
122
+
123
+ # Ultimate fallback
124
+ return get_model_spec("qwen2.5:32b") or list(KNOWN_MODELS.values())[0]
125
+
126
+ def set_model_for_task(self, task: str, model_name: str) -> bool:
127
+ """
128
+ Set the model for a task.
129
+
130
+ Args:
131
+ task: Task name
132
+ model_name: Model to use
133
+
134
+ Returns:
135
+ True if successful
136
+ """
137
+ # Verify model exists or can be detected
138
+ spec = get_model_spec(model_name)
139
+ if not spec:
140
+ # Try to auto-detect
141
+ try:
142
+ spec = self.detector.detect_model_spec_sync(model_name)
143
+ except Exception:
144
+ # Create minimal spec
145
+ spec = create_model_spec(model_name)
146
+ register_model(spec)
147
+
148
+ self._model_assignments[task] = model_name
149
+ self._save_assignments()
150
+ return True
151
+
152
+ def format_prompt(
153
+ self,
154
+ messages: List[Dict[str, str]],
155
+ model_name: str,
156
+ add_generation_prompt: bool = True
157
+ ) -> str:
158
+ """
159
+ Format messages for a specific model.
160
+
161
+ Args:
162
+ messages: List of {"role": "...", "content": "..."} dicts
163
+ model_name: Model to format for
164
+ add_generation_prompt: Whether to add assistant prompt at end
165
+
166
+ Returns:
167
+ Formatted prompt string
168
+ """
169
+ spec = get_model_spec(model_name)
170
+ prompt_format = spec.prompt_format if spec else PromptFormat.CHATML
171
+
172
+ return self.formatter.format_from_dicts(
173
+ messages, prompt_format, add_generation_prompt
174
+ )
175
+
176
+ def get_recommended_settings(
177
+ self,
178
+ model_name: str,
179
+ task: str = "general"
180
+ ) -> Dict[str, Any]:
181
+ """
182
+ Get recommended settings for a model and task.
183
+
184
+ Args:
185
+ model_name: Model name
186
+ task: Task type
187
+
188
+ Returns:
189
+ Dict with temperature, max_tokens, etc.
190
+ """
191
+ spec = get_model_spec(model_name)
192
+
193
+ if not spec:
194
+ return {
195
+ "temperature": 0.7,
196
+ "max_tokens": 4096,
197
+ "context_window": 32768,
198
+ }
199
+
200
+ # Determine temperature based on task
201
+ if task in ["coding", "code_generation", "code"]:
202
+ temperature = spec.recommended_temperature_code
203
+ elif task in ["creative", "writing"]:
204
+ temperature = spec.recommended_temperature_creative
205
+ else:
206
+ temperature = spec.default_temperature
207
+
208
+ return {
209
+ "temperature": temperature,
210
+ "max_tokens": spec.max_output_tokens,
211
+ "context_window": spec.context_window,
212
+ }
213
+
214
+ def recommend_model(
215
+ self,
216
+ task: str,
217
+ prefer_fast: bool = False,
218
+ min_context: int = 0
219
+ ) -> Optional[ModelSpec]:
220
+ """
221
+ Recommend the best model for a task.
222
+
223
+ Args:
224
+ task: Task type
225
+ prefer_fast: Prioritize speed over quality
226
+ min_context: Minimum context window required
227
+
228
+ Returns:
229
+ Recommended ModelSpec
230
+ """
231
+ candidates = []
232
+
233
+ for spec in KNOWN_MODELS.values():
234
+ # Filter by context requirement
235
+ if spec.context_window < min_context:
236
+ continue
237
+
238
+ # Skip embedding models for non-embedding tasks
239
+ if task != "embedding" and ModelCapability.EMBEDDING in spec.capabilities:
240
+ continue
241
+
242
+ # Get suitability score
243
+ score = spec.suitability.get(task, 0.5)
244
+
245
+ # Adjust for speed preference
246
+ if prefer_fast and ModelCapability.FAST_INFERENCE in spec.capabilities:
247
+ score *= 1.3
248
+
249
+ candidates.append((spec, score))
250
+
251
+ if not candidates:
252
+ return None
253
+
254
+ # Return highest scoring
255
+ candidates.sort(key=lambda x: x[1], reverse=True)
256
+ return candidates[0][0]
257
+
258
+ def list_available_models(self) -> List[Dict[str, Any]]:
259
+ """List all available models with their info"""
260
+ return [
261
+ {
262
+ "name": spec.name,
263
+ "ollama_name": spec.ollama_name,
264
+ "context_window": spec.context_window,
265
+ "capabilities": [c.value for c in spec.capabilities],
266
+ "suitability": spec.suitability,
267
+ "notes": spec.notes,
268
+ }
269
+ for spec in KNOWN_MODELS.values()
270
+ ]
271
+
272
+ def get_model_info(self, model_name: str) -> Optional[Dict[str, Any]]:
273
+ """Get detailed info about a model"""
274
+ spec = get_model_spec(model_name)
275
+ if not spec:
276
+ return None
277
+
278
+ return {
279
+ "name": spec.name,
280
+ "ollama_name": spec.ollama_name,
281
+ "context_window": spec.context_window,
282
+ "max_output_tokens": spec.max_output_tokens,
283
+ "prompt_format": spec.prompt_format.value,
284
+ "capabilities": [c.value for c in spec.capabilities],
285
+ "suitability": spec.suitability,
286
+ "memory_gb": spec.memory_gb,
287
+ "tokens_per_second": spec.tokens_per_second,
288
+ "notes": spec.notes,
289
+ }
290
+
291
+ def add_custom_model(
292
+ self,
293
+ ollama_name: str,
294
+ name: Optional[str] = None,
295
+ context_window: int = 32768,
296
+ prompt_format: str = "chatml",
297
+ capabilities: Optional[List[str]] = None,
298
+ suitability: Optional[Dict[str, float]] = None
299
+ ) -> ModelSpec:
300
+ """
301
+ Add a custom model to the registry.
302
+
303
+ Args:
304
+ ollama_name: Name in Ollama
305
+ name: Human-readable name
306
+ context_window: Context window size
307
+ prompt_format: Prompt format name
308
+ capabilities: List of capability strings
309
+ suitability: Suitability scores by task
310
+
311
+ Returns:
312
+ Created ModelSpec
313
+ """
314
+ # Parse prompt format
315
+ try:
316
+ fmt = PromptFormat(prompt_format)
317
+ except ValueError:
318
+ fmt = PromptFormat.CHATML
319
+
320
+ # Parse capabilities
321
+ caps = []
322
+ if capabilities:
323
+ for cap in capabilities:
324
+ try:
325
+ caps.append(ModelCapability(cap))
326
+ except ValueError:
327
+ pass
328
+
329
+ spec = ModelSpec(
330
+ name=name or ollama_name,
331
+ ollama_name=ollama_name,
332
+ context_window=context_window,
333
+ prompt_format=fmt,
334
+ capabilities=caps,
335
+ suitability=suitability or {"general": 0.7},
336
+ )
337
+
338
+ register_model(spec)
339
+ return spec
340
+
341
+ def get_task_assignments(self) -> Dict[str, str]:
342
+ """Get current task-to-model assignments"""
343
+ return self._model_assignments.copy()
344
+
345
+ def _load_assignments(self) -> None:
346
+ """Load model assignments from disk"""
347
+ try:
348
+ path = Path.home() / ".nc1709" / "model_assignments.json"
349
+ if path.exists():
350
+ with open(path) as f:
351
+ self._model_assignments = json.load(f)
352
+ except Exception:
353
+ self._model_assignments = {}
354
+
355
+ def _save_assignments(self) -> None:
356
+ """Save model assignments to disk"""
357
+ try:
358
+ path = Path.home() / ".nc1709" / "model_assignments.json"
359
+ path.parent.mkdir(parents=True, exist_ok=True)
360
+ with open(path, "w") as f:
361
+ json.dump(self._model_assignments, f, indent=2)
362
+ except Exception:
363
+ pass
364
+
365
+
366
+ # ============================================================================
367
+ # CLI HELPER FUNCTIONS
368
+ # ============================================================================
369
+
370
+ def print_model_info(model_name: str) -> None:
371
+ """Print detailed info about a model"""
372
+ spec = get_model_spec(model_name)
373
+
374
+ if not spec:
375
+ print(f"Model not found: {model_name}")
376
+ return
377
+
378
+ print(f"\n{spec.name}")
379
+ print(f" Ollama: {spec.ollama_name}")
380
+ print(f" Context: {spec.context_window:,} tokens")
381
+ print(f" Max Output: {spec.max_output_tokens:,} tokens")
382
+ print(f" Format: {spec.prompt_format.value}")
383
+
384
+ if spec.capabilities:
385
+ caps = ", ".join(c.value for c in spec.capabilities)
386
+ print(f" Capabilities: {caps}")
387
+
388
+ if spec.suitability:
389
+ print(f" Suitability:")
390
+ for task, score in sorted(spec.suitability.items(), key=lambda x: -x[1]):
391
+ bar = "#" * int(score * 10) + "-" * (10 - int(score * 10))
392
+ print(f" {task}: [{bar}] {score:.0%}")
393
+
394
+ if spec.memory_gb:
395
+ print(f" Memory: {spec.memory_gb:.1f} GB")
396
+
397
+ if spec.tokens_per_second:
398
+ print(f" Speed: ~{spec.tokens_per_second:.0f} tokens/sec")
399
+
400
+ if spec.notes:
401
+ print(f" Notes: {spec.notes}")
402
+
403
+
404
+ def print_all_models() -> None:
405
+ """Print summary of all registered models"""
406
+ print("\nRegistered Models:")
407
+ print("-" * 60)
408
+
409
+ for name, spec in sorted(KNOWN_MODELS.items()):
410
+ # Best task
411
+ if spec.suitability:
412
+ best_task = max(spec.suitability.items(), key=lambda x: x[1])
413
+ best_str = f"Best for: {best_task[0]}"
414
+ else:
415
+ best_str = ""
416
+
417
+ ctx_k = spec.context_window // 1000
418
+ print(f" {spec.name}")
419
+ print(f" -> {spec.ollama_name} | {ctx_k}K context | {best_str}")
420
+
421
+ print("-" * 60)
422
+ print(f"Total: {len(KNOWN_MODELS)} models")
423
+
424
+
425
+ def print_task_assignments(manager: ModelManager) -> None:
426
+ """Print current task assignments"""
427
+ assignments = manager.get_task_assignments()
428
+
429
+ print("\nTask Assignments:")
430
+ print("-" * 40)
431
+
432
+ if not assignments:
433
+ print(" (No custom assignments - using defaults)")
434
+ else:
435
+ for task, model in sorted(assignments.items()):
436
+ print(f" {task}: {model}")
437
+
438
+ print("-" * 40)