nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,343 @@
1
+ """
2
+ NC1709 Performance Optimization Stack
3
+
4
+ A comprehensive performance optimization layer that provides:
5
+ - Multi-level intelligent caching (L1/L2/L3)
6
+ - Smart model tiering (3B → 7B → 32B → Council)
7
+ - Parallel processing pipeline
8
+
9
+ Expected improvements:
10
+ - Cache hits: <100ms (instant)
11
+ - Simple queries: 0.3-0.5s (was 5s)
12
+ - Medium queries: 1-2s (was 8s)
13
+ - Complex queries: 5-7s (was 15s)
14
+
15
+ Usage:
16
+ from nc1709.performance import PerformanceOptimizer
17
+
18
+ optimizer = PerformanceOptimizer()
19
+
20
+ # Process with full optimization
21
+ result = optimizer.process(
22
+ prompt="explain decorators",
23
+ context={"files": ["main.py"]}
24
+ )
25
+
26
+ print(f"Response in {result.latency_ms}ms (cache: {result.cache_hit})")
27
+ """
28
+
29
+ from .cache import (
30
+ LayeredCache,
31
+ L1ExactCache,
32
+ L2SemanticCache,
33
+ L3TemplateCache,
34
+ CacheEntry,
35
+ CacheStats,
36
+ CacheResult,
37
+ get_cache,
38
+ make_context_hash,
39
+ )
40
+
41
+ from .tiering import (
42
+ ModelTier,
43
+ TierConfig,
44
+ TieringDecision,
45
+ TieringStats,
46
+ TieredModelOrchestrator,
47
+ get_orchestrator,
48
+ quick_tier,
49
+ DEFAULT_TIERS,
50
+ )
51
+
52
+ from .pipeline import (
53
+ PipelineStage,
54
+ StageResult,
55
+ PipelineResult,
56
+ PipelineStats,
57
+ ParallelPipeline,
58
+ SyncPipeline,
59
+ create_pipeline,
60
+ )
61
+
62
+ from typing import Dict, Any, Optional
63
+ from pathlib import Path
64
+ from dataclasses import dataclass
65
+ import logging
66
+ import time
67
+
68
+ logger = logging.getLogger(__name__)
69
+
70
+
71
+ @dataclass
72
+ class OptimizedResult:
73
+ """Result from the performance optimizer"""
74
+ response: Optional[str]
75
+ latency_ms: float
76
+ cache_hit: bool
77
+ cache_level: Optional[str]
78
+ tier_used: Optional[str]
79
+ model_used: Optional[str]
80
+ stage_timings: Dict[str, float]
81
+
82
+ @property
83
+ def success(self) -> bool:
84
+ return self.response is not None
85
+
86
+
87
+ class PerformanceOptimizer:
88
+ """
89
+ Unified performance optimization interface.
90
+
91
+ Combines caching, tiering, and parallel pipeline into a single
92
+ easy-to-use interface.
93
+
94
+ Usage:
95
+ optimizer = PerformanceOptimizer(
96
+ project_root=Path.cwd(),
97
+ enable_cache=True,
98
+ enable_tiering=True,
99
+ enable_parallel=True,
100
+ )
101
+
102
+ result = optimizer.process("explain decorators")
103
+
104
+ if result.cache_hit:
105
+ print(f"Cache hit ({result.cache_level})!")
106
+ else:
107
+ print(f"Generated with {result.model_used}")
108
+
109
+ print(f"Total: {result.latency_ms}ms")
110
+ """
111
+
112
+ def __init__(
113
+ self,
114
+ llm_adapter=None,
115
+ intent_analyzer=None,
116
+ context_engine=None,
117
+ project_root: Optional[Path] = None,
118
+ cache_path: Optional[Path] = None,
119
+ enable_cache: bool = True,
120
+ enable_tiering: bool = True,
121
+ enable_parallel: bool = True,
122
+ l1_cache_size: int = 1000,
123
+ l2_cache_size: int = 500,
124
+ l2_threshold: float = 0.92,
125
+ conservative_tiering: bool = False,
126
+ ):
127
+ self.llm_adapter = llm_adapter
128
+ self.project_root = project_root or Path.cwd()
129
+ self.enable_cache = enable_cache
130
+ self.enable_tiering = enable_tiering
131
+ self.enable_parallel = enable_parallel
132
+
133
+ # Initialize cache
134
+ self._cache = None
135
+ if enable_cache:
136
+ cache_path = cache_path or (Path.home() / ".nc1709" / "cache.json")
137
+ self._cache = LayeredCache(
138
+ l1_size=l1_cache_size,
139
+ l2_size=l2_cache_size,
140
+ l2_threshold=l2_threshold,
141
+ persist_path=cache_path,
142
+ )
143
+ self._cache.load()
144
+
145
+ # Initialize tiering
146
+ self._tier_orchestrator = None
147
+ if enable_tiering:
148
+ self._tier_orchestrator = TieredModelOrchestrator(
149
+ conservative=conservative_tiering
150
+ )
151
+
152
+ # Initialize pipeline
153
+ self._pipeline = ParallelPipeline(
154
+ cache=self._cache,
155
+ intent_analyzer=intent_analyzer,
156
+ context_engine=context_engine,
157
+ tier_orchestrator=self._tier_orchestrator,
158
+ llm_adapter=llm_adapter,
159
+ enable_parallel=enable_parallel,
160
+ )
161
+
162
+ # Stats
163
+ self._request_count = 0
164
+ self._start_time = time.time()
165
+
166
+ def process(
167
+ self,
168
+ prompt: str,
169
+ context: Optional[Dict[str, Any]] = None,
170
+ force_no_cache: bool = False,
171
+ force_tier: Optional[ModelTier] = None,
172
+ ) -> OptimizedResult:
173
+ """
174
+ Process a request with full optimization.
175
+
176
+ Args:
177
+ prompt: User's prompt
178
+ context: Additional context
179
+ force_no_cache: Skip cache lookup
180
+ force_tier: Force a specific tier
181
+
182
+ Returns:
183
+ OptimizedResult with response and performance info
184
+ """
185
+ start = time.time()
186
+ self._request_count += 1
187
+ context = context or {}
188
+
189
+ # Quick cache check first (outside pipeline for speed)
190
+ if self.enable_cache and not force_no_cache and self._cache:
191
+ context_hash = make_context_hash(context)
192
+ cache_result = self._cache.get(prompt, context_hash)
193
+
194
+ if cache_result.hit:
195
+ latency = (time.time() - start) * 1000
196
+ logger.info(f"Cache hit ({cache_result.level}) in {latency:.1f}ms")
197
+
198
+ return OptimizedResult(
199
+ response=cache_result.response,
200
+ latency_ms=latency,
201
+ cache_hit=True,
202
+ cache_level=cache_result.level,
203
+ tier_used="cache",
204
+ model_used="cache",
205
+ stage_timings={"cache_lookup": cache_result.time_ms},
206
+ )
207
+
208
+ # Use pipeline for full processing
209
+ import asyncio
210
+ try:
211
+ loop = asyncio.get_event_loop()
212
+ if loop.is_running():
213
+ # Create sync wrapper
214
+ sync_pipeline = SyncPipeline(self._pipeline)
215
+ result = sync_pipeline.process(prompt, context, force_no_cache=True)
216
+ else:
217
+ result = loop.run_until_complete(
218
+ self._pipeline.process(prompt, context, force_no_cache=True)
219
+ )
220
+ except RuntimeError:
221
+ result = asyncio.run(
222
+ self._pipeline.process(prompt, context, force_no_cache=True)
223
+ )
224
+
225
+ latency = (time.time() - start) * 1000
226
+
227
+ # Cache the result
228
+ if self.enable_cache and self._cache and result.response:
229
+ context_hash = make_context_hash(context)
230
+ self._cache.set(
231
+ prompt=prompt,
232
+ context_hash=context_hash,
233
+ response=result.response,
234
+ model_used=result.tier_used or "unknown",
235
+ )
236
+
237
+ return OptimizedResult(
238
+ response=result.response,
239
+ latency_ms=latency,
240
+ cache_hit=result.cache_hit,
241
+ cache_level=None,
242
+ tier_used=result.tier_used,
243
+ model_used=result.tier_used,
244
+ stage_timings=result.get_stage_timing(),
245
+ )
246
+
247
+ def get_stats(self) -> Dict[str, Any]:
248
+ """Get comprehensive performance statistics"""
249
+ uptime = time.time() - self._start_time
250
+
251
+ stats = {
252
+ "uptime_seconds": round(uptime, 2),
253
+ "total_requests": self._request_count,
254
+ "requests_per_minute": round(self._request_count / (uptime / 60), 2) if uptime > 0 else 0,
255
+ }
256
+
257
+ if self._cache:
258
+ stats["cache"] = self._cache.get_stats()
259
+
260
+ if self._tier_orchestrator:
261
+ stats["tiering"] = self._tier_orchestrator.get_stats()
262
+
263
+ stats["pipeline"] = self._pipeline.get_stats()
264
+
265
+ return stats
266
+
267
+ def clear_cache(self) -> Dict[str, int]:
268
+ """Clear all caches"""
269
+ if self._cache:
270
+ return self._cache.clear()
271
+ return {}
272
+
273
+ def save_cache(self) -> bool:
274
+ """Persist cache to disk"""
275
+ if self._cache:
276
+ return self._cache.save()
277
+ return False
278
+
279
+ def shutdown(self) -> None:
280
+ """Clean shutdown"""
281
+ self.save_cache()
282
+ self._pipeline.shutdown()
283
+ logger.info("Performance optimizer shutdown complete")
284
+
285
+
286
+ # Singleton instance
287
+ _optimizer: Optional[PerformanceOptimizer] = None
288
+
289
+
290
+ def get_optimizer(**kwargs) -> PerformanceOptimizer:
291
+ """Get or create the global optimizer"""
292
+ global _optimizer
293
+ if _optimizer is None:
294
+ _optimizer = PerformanceOptimizer(**kwargs)
295
+ return _optimizer
296
+
297
+
298
+ def quick_optimize(
299
+ prompt: str,
300
+ context: Optional[Dict[str, Any]] = None,
301
+ **kwargs
302
+ ) -> OptimizedResult:
303
+ """Quick helper for optimized processing"""
304
+ optimizer = get_optimizer(**kwargs)
305
+ return optimizer.process(prompt, context)
306
+
307
+
308
+ __all__ = [
309
+ # Cache
310
+ "LayeredCache",
311
+ "L1ExactCache",
312
+ "L2SemanticCache",
313
+ "L3TemplateCache",
314
+ "CacheEntry",
315
+ "CacheStats",
316
+ "CacheResult",
317
+ "get_cache",
318
+ "make_context_hash",
319
+ # Tiering
320
+ "ModelTier",
321
+ "TierConfig",
322
+ "TieringDecision",
323
+ "TieringStats",
324
+ "TieredModelOrchestrator",
325
+ "get_orchestrator",
326
+ "quick_tier",
327
+ "DEFAULT_TIERS",
328
+ # Pipeline
329
+ "PipelineStage",
330
+ "StageResult",
331
+ "PipelineResult",
332
+ "PipelineStats",
333
+ "ParallelPipeline",
334
+ "SyncPipeline",
335
+ "create_pipeline",
336
+ # Unified
337
+ "OptimizedResult",
338
+ "PerformanceOptimizer",
339
+ "get_optimizer",
340
+ "quick_optimize",
341
+ ]
342
+
343
+ __version__ = "0.1.0"