nc1709 1.15.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nc1709/__init__.py +13 -0
- nc1709/agent/__init__.py +36 -0
- nc1709/agent/core.py +505 -0
- nc1709/agent/mcp_bridge.py +245 -0
- nc1709/agent/permissions.py +298 -0
- nc1709/agent/tools/__init__.py +21 -0
- nc1709/agent/tools/base.py +440 -0
- nc1709/agent/tools/bash_tool.py +367 -0
- nc1709/agent/tools/file_tools.py +454 -0
- nc1709/agent/tools/notebook_tools.py +516 -0
- nc1709/agent/tools/search_tools.py +322 -0
- nc1709/agent/tools/task_tool.py +284 -0
- nc1709/agent/tools/web_tools.py +555 -0
- nc1709/agents/__init__.py +17 -0
- nc1709/agents/auto_fix.py +506 -0
- nc1709/agents/test_generator.py +507 -0
- nc1709/checkpoints.py +372 -0
- nc1709/cli.py +3380 -0
- nc1709/cli_ui.py +1080 -0
- nc1709/cognitive/__init__.py +149 -0
- nc1709/cognitive/anticipation.py +594 -0
- nc1709/cognitive/context_engine.py +1046 -0
- nc1709/cognitive/council.py +824 -0
- nc1709/cognitive/learning.py +761 -0
- nc1709/cognitive/router.py +583 -0
- nc1709/cognitive/system.py +519 -0
- nc1709/config.py +155 -0
- nc1709/custom_commands.py +300 -0
- nc1709/executor.py +333 -0
- nc1709/file_controller.py +354 -0
- nc1709/git_integration.py +308 -0
- nc1709/github_integration.py +477 -0
- nc1709/image_input.py +446 -0
- nc1709/linting.py +519 -0
- nc1709/llm_adapter.py +667 -0
- nc1709/logger.py +192 -0
- nc1709/mcp/__init__.py +18 -0
- nc1709/mcp/client.py +370 -0
- nc1709/mcp/manager.py +407 -0
- nc1709/mcp/protocol.py +210 -0
- nc1709/mcp/server.py +473 -0
- nc1709/memory/__init__.py +20 -0
- nc1709/memory/embeddings.py +325 -0
- nc1709/memory/indexer.py +474 -0
- nc1709/memory/sessions.py +432 -0
- nc1709/memory/vector_store.py +451 -0
- nc1709/models/__init__.py +86 -0
- nc1709/models/detector.py +377 -0
- nc1709/models/formats.py +315 -0
- nc1709/models/manager.py +438 -0
- nc1709/models/registry.py +497 -0
- nc1709/performance/__init__.py +343 -0
- nc1709/performance/cache.py +705 -0
- nc1709/performance/pipeline.py +611 -0
- nc1709/performance/tiering.py +543 -0
- nc1709/plan_mode.py +362 -0
- nc1709/plugins/__init__.py +17 -0
- nc1709/plugins/agents/__init__.py +18 -0
- nc1709/plugins/agents/django_agent.py +912 -0
- nc1709/plugins/agents/docker_agent.py +623 -0
- nc1709/plugins/agents/fastapi_agent.py +887 -0
- nc1709/plugins/agents/git_agent.py +731 -0
- nc1709/plugins/agents/nextjs_agent.py +867 -0
- nc1709/plugins/base.py +359 -0
- nc1709/plugins/manager.py +411 -0
- nc1709/plugins/registry.py +337 -0
- nc1709/progress.py +443 -0
- nc1709/prompts/__init__.py +22 -0
- nc1709/prompts/agent_system.py +180 -0
- nc1709/prompts/task_prompts.py +340 -0
- nc1709/prompts/unified_prompt.py +133 -0
- nc1709/reasoning_engine.py +541 -0
- nc1709/remote_client.py +266 -0
- nc1709/shell_completions.py +349 -0
- nc1709/slash_commands.py +649 -0
- nc1709/task_classifier.py +408 -0
- nc1709/version_check.py +177 -0
- nc1709/web/__init__.py +8 -0
- nc1709/web/server.py +950 -0
- nc1709/web/templates/index.html +1127 -0
- nc1709-1.15.4.dist-info/METADATA +858 -0
- nc1709-1.15.4.dist-info/RECORD +86 -0
- nc1709-1.15.4.dist-info/WHEEL +5 -0
- nc1709-1.15.4.dist-info/entry_points.txt +2 -0
- nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
- nc1709-1.15.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,519 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CognitiveSystem - The unified orchestrator for NC1709's cognitive architecture
|
|
3
|
+
|
|
4
|
+
This class integrates all 5 layers:
|
|
5
|
+
- Layer 1: Intelligent Router
|
|
6
|
+
- Layer 2: Deep Context Engine
|
|
7
|
+
- Layer 3: Multi-Agent Council
|
|
8
|
+
- Layer 4: Learning Core
|
|
9
|
+
- Layer 5: Anticipation Engine
|
|
10
|
+
|
|
11
|
+
Plus the Performance Optimization stack:
|
|
12
|
+
- Multi-level caching (L1/L2/L3)
|
|
13
|
+
- Smart model tiering
|
|
14
|
+
- Parallel processing pipeline
|
|
15
|
+
|
|
16
|
+
The CognitiveSystem is the main entry point for intelligent request processing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import logging
|
|
20
|
+
from dataclasses import dataclass, field
|
|
21
|
+
from typing import Dict, List, Optional, Any, Tuple
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from datetime import datetime
|
|
24
|
+
import asyncio
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class CognitiveRequest:
|
|
31
|
+
"""A request to the cognitive system"""
|
|
32
|
+
prompt: str
|
|
33
|
+
context: Optional[Dict[str, Any]] = None
|
|
34
|
+
target_files: Optional[List[str]] = None
|
|
35
|
+
force_council: bool = False # Force council convene even for simple tasks
|
|
36
|
+
force_no_cache: bool = False # Skip cache lookup
|
|
37
|
+
stream: bool = False
|
|
38
|
+
user_preferences: Optional[Dict[str, Any]] = None
|
|
39
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class CognitiveResponse:
|
|
44
|
+
"""Response from the cognitive system"""
|
|
45
|
+
content: str
|
|
46
|
+
model_used: str
|
|
47
|
+
category: str
|
|
48
|
+
complexity: float
|
|
49
|
+
council_used: bool = False
|
|
50
|
+
council_agents: Optional[List[str]] = None
|
|
51
|
+
suggestions: Optional[List[Dict[str, Any]]] = None
|
|
52
|
+
context_summary: Optional[str] = None
|
|
53
|
+
processing_time_ms: Optional[int] = None
|
|
54
|
+
cache_hit: bool = False
|
|
55
|
+
cache_level: Optional[str] = None # "L1", "L2", "L3"
|
|
56
|
+
tier_used: Optional[str] = None # "instant", "fast", "smart", "council"
|
|
57
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class CognitiveSystem:
|
|
61
|
+
"""
|
|
62
|
+
NC1709 Cognitive System
|
|
63
|
+
|
|
64
|
+
The brain of NC1709 - orchestrates all 5 cognitive layers to provide
|
|
65
|
+
intelligent, personalized, and proactive assistance.
|
|
66
|
+
|
|
67
|
+
Flow:
|
|
68
|
+
1. Request comes in
|
|
69
|
+
2. Layer 1 (Router) analyzes intent and routes to appropriate model
|
|
70
|
+
3. Layer 2 (Context) provides relevant codebase context
|
|
71
|
+
4. Layer 3 (Council) convenes for complex tasks
|
|
72
|
+
5. Layer 4 (Learning) records interaction and updates preferences
|
|
73
|
+
6. Layer 5 (Anticipation) generates proactive suggestions
|
|
74
|
+
|
|
75
|
+
Example:
|
|
76
|
+
system = CognitiveSystem(llm_adapter=adapter)
|
|
77
|
+
response = system.process("fix the authentication bug in login.py")
|
|
78
|
+
print(response.content)
|
|
79
|
+
print(f"Used model: {response.model_used}")
|
|
80
|
+
print(f"Council used: {response.council_used}")
|
|
81
|
+
for suggestion in response.suggestions:
|
|
82
|
+
print(f"Suggestion: {suggestion['title']}")
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(
|
|
86
|
+
self,
|
|
87
|
+
llm_adapter: Optional[Any] = None,
|
|
88
|
+
project_root: Optional[Path] = None,
|
|
89
|
+
learning_data_dir: Optional[Path] = None,
|
|
90
|
+
council_threshold: float = 0.75,
|
|
91
|
+
enable_anticipation: bool = True,
|
|
92
|
+
enable_learning: bool = True,
|
|
93
|
+
enable_cache: bool = True,
|
|
94
|
+
enable_tiering: bool = True,
|
|
95
|
+
):
|
|
96
|
+
"""
|
|
97
|
+
Initialize the cognitive system
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
llm_adapter: LLM adapter for model completions
|
|
101
|
+
project_root: Root directory of the project for context
|
|
102
|
+
learning_data_dir: Directory for storing learning data
|
|
103
|
+
council_threshold: Complexity threshold for council convene
|
|
104
|
+
enable_anticipation: Enable proactive suggestions
|
|
105
|
+
enable_learning: Enable learning from interactions
|
|
106
|
+
enable_cache: Enable multi-level caching
|
|
107
|
+
enable_tiering: Enable smart model tiering
|
|
108
|
+
"""
|
|
109
|
+
self._llm_adapter = llm_adapter
|
|
110
|
+
self.project_root = project_root or Path.cwd()
|
|
111
|
+
self.council_threshold = council_threshold
|
|
112
|
+
self.enable_anticipation = enable_anticipation
|
|
113
|
+
self.enable_learning = enable_learning
|
|
114
|
+
self.enable_cache = enable_cache
|
|
115
|
+
self.enable_tiering = enable_tiering
|
|
116
|
+
|
|
117
|
+
# Initialize layers (lazy loading)
|
|
118
|
+
self._router = None
|
|
119
|
+
self._context_engine = None
|
|
120
|
+
self._council = None
|
|
121
|
+
self._learning_core = None
|
|
122
|
+
self._anticipation_engine = None
|
|
123
|
+
|
|
124
|
+
# Performance optimization (lazy loading)
|
|
125
|
+
self._cache = None
|
|
126
|
+
self._tier_orchestrator = None
|
|
127
|
+
|
|
128
|
+
# Learning data directory
|
|
129
|
+
self._learning_data_dir = learning_data_dir or (Path.home() / ".nc1709" / "learning")
|
|
130
|
+
|
|
131
|
+
# Stats
|
|
132
|
+
self._request_count = 0
|
|
133
|
+
self._council_convenes = 0
|
|
134
|
+
self._cache_hits = 0
|
|
135
|
+
self._start_time = datetime.now()
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def router(self):
|
|
139
|
+
"""Layer 1: Intelligent Router"""
|
|
140
|
+
if self._router is None:
|
|
141
|
+
from .router import IntelligentRouter, IntentAnalyzer
|
|
142
|
+
analyzer = IntentAnalyzer(llm_adapter=self._llm_adapter)
|
|
143
|
+
self._router = IntelligentRouter(intent_analyzer=analyzer)
|
|
144
|
+
return self._router
|
|
145
|
+
|
|
146
|
+
@property
|
|
147
|
+
def context_engine(self):
|
|
148
|
+
"""Layer 2: Deep Context Engine"""
|
|
149
|
+
if self._context_engine is None:
|
|
150
|
+
from .context_engine import DeepContextEngine
|
|
151
|
+
self._context_engine = DeepContextEngine(project_root=self.project_root)
|
|
152
|
+
return self._context_engine
|
|
153
|
+
|
|
154
|
+
@property
|
|
155
|
+
def council(self):
|
|
156
|
+
"""Layer 3: Multi-Agent Council"""
|
|
157
|
+
if self._council is None:
|
|
158
|
+
from .council import MultiAgentCouncil
|
|
159
|
+
self._council = MultiAgentCouncil(
|
|
160
|
+
llm_adapter=self._llm_adapter,
|
|
161
|
+
council_threshold=self.council_threshold
|
|
162
|
+
)
|
|
163
|
+
return self._council
|
|
164
|
+
|
|
165
|
+
@property
|
|
166
|
+
def learning_core(self):
|
|
167
|
+
"""Layer 4: Learning Core"""
|
|
168
|
+
if self._learning_core is None and self.enable_learning:
|
|
169
|
+
from .learning import LearningCore
|
|
170
|
+
self._learning_core = LearningCore(data_dir=self._learning_data_dir)
|
|
171
|
+
return self._learning_core
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def anticipation_engine(self):
|
|
175
|
+
"""Layer 5: Anticipation Engine"""
|
|
176
|
+
if self._anticipation_engine is None and self.enable_anticipation:
|
|
177
|
+
from .anticipation import AnticipationEngine
|
|
178
|
+
self._anticipation_engine = AnticipationEngine(
|
|
179
|
+
learning_core=self.learning_core,
|
|
180
|
+
context_engine=self.context_engine
|
|
181
|
+
)
|
|
182
|
+
return self._anticipation_engine
|
|
183
|
+
|
|
184
|
+
@property
|
|
185
|
+
def cache(self):
|
|
186
|
+
"""Performance: Multi-level Cache"""
|
|
187
|
+
if self._cache is None and self.enable_cache:
|
|
188
|
+
from ..performance import LayeredCache
|
|
189
|
+
cache_path = Path.home() / ".nc1709" / "cache.json"
|
|
190
|
+
self._cache = LayeredCache(persist_path=cache_path)
|
|
191
|
+
self._cache.load()
|
|
192
|
+
return self._cache
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def tier_orchestrator(self):
|
|
196
|
+
"""Performance: Model Tier Orchestrator"""
|
|
197
|
+
if self._tier_orchestrator is None and self.enable_tiering:
|
|
198
|
+
from ..performance import TieredModelOrchestrator
|
|
199
|
+
self._tier_orchestrator = TieredModelOrchestrator()
|
|
200
|
+
return self._tier_orchestrator
|
|
201
|
+
|
|
202
|
+
def process(self, request: CognitiveRequest) -> CognitiveResponse:
|
|
203
|
+
"""
|
|
204
|
+
Process a cognitive request through all layers
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
request: The cognitive request
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
CognitiveResponse with the result
|
|
211
|
+
"""
|
|
212
|
+
start_time = datetime.now()
|
|
213
|
+
self._request_count += 1
|
|
214
|
+
|
|
215
|
+
# Step 0: Check cache first (fastest path)
|
|
216
|
+
cache_hit = False
|
|
217
|
+
cache_level = None
|
|
218
|
+
if self.enable_cache and not request.force_no_cache and self.cache:
|
|
219
|
+
from ..performance import make_context_hash
|
|
220
|
+
context_hash = make_context_hash(request.context or {})
|
|
221
|
+
cache_result = self.cache.get(request.prompt, context_hash)
|
|
222
|
+
|
|
223
|
+
if cache_result.hit:
|
|
224
|
+
self._cache_hits += 1
|
|
225
|
+
processing_time = int((datetime.now() - start_time).total_seconds() * 1000)
|
|
226
|
+
|
|
227
|
+
logger.info(f"Cache hit ({cache_result.level}) in {processing_time}ms")
|
|
228
|
+
|
|
229
|
+
return CognitiveResponse(
|
|
230
|
+
content=cache_result.response,
|
|
231
|
+
model_used="cache",
|
|
232
|
+
category="cached",
|
|
233
|
+
complexity=0.0,
|
|
234
|
+
processing_time_ms=processing_time,
|
|
235
|
+
cache_hit=True,
|
|
236
|
+
cache_level=cache_result.level,
|
|
237
|
+
tier_used="cache",
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
# Step 1: Route the request (Layer 1)
|
|
241
|
+
routing = self.router.route_sync(
|
|
242
|
+
prompt=request.prompt,
|
|
243
|
+
context=request.context
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# Get category from intent if available
|
|
247
|
+
category = routing.intent.primary_category if routing.intent else "unknown"
|
|
248
|
+
category_value = category.value if hasattr(category, 'value') else str(category)
|
|
249
|
+
|
|
250
|
+
logger.info(f"Routed to {routing.primary_model} ({category_value})")
|
|
251
|
+
|
|
252
|
+
# Step 2: Build context (Layer 2)
|
|
253
|
+
context = None
|
|
254
|
+
if routing.context_budget > 0:
|
|
255
|
+
context = self.context_engine.build_context_for_task(
|
|
256
|
+
task_description=request.prompt,
|
|
257
|
+
target_files=request.target_files,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Get complexity from intent if available
|
|
261
|
+
complexity = routing.intent.complexity if routing.intent else 0.5
|
|
262
|
+
|
|
263
|
+
# Step 3: Determine if council needed (Layer 3)
|
|
264
|
+
use_council = request.force_council or (
|
|
265
|
+
routing.should_use_council and
|
|
266
|
+
complexity > self.council_threshold
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
council_response = None
|
|
270
|
+
council_agents = None
|
|
271
|
+
|
|
272
|
+
if use_council:
|
|
273
|
+
self._council_convenes += 1
|
|
274
|
+
council_session = self.council.convene(
|
|
275
|
+
task_description=request.prompt,
|
|
276
|
+
task_category=category_value,
|
|
277
|
+
complexity=complexity,
|
|
278
|
+
context={
|
|
279
|
+
"code": context.get("code") if context else None,
|
|
280
|
+
"files": request.target_files,
|
|
281
|
+
},
|
|
282
|
+
agents=routing.agents_to_involve,
|
|
283
|
+
)
|
|
284
|
+
council_response = council_session.consensus
|
|
285
|
+
council_agents = [a.value for a in council_session.agents_consulted]
|
|
286
|
+
|
|
287
|
+
# Step 4: Select model tier (Performance)
|
|
288
|
+
tier_used = None
|
|
289
|
+
selected_model = routing.primary_model
|
|
290
|
+
|
|
291
|
+
if self.enable_tiering and self.tier_orchestrator and not use_council:
|
|
292
|
+
tier_decision = self.tier_orchestrator.select_tier(
|
|
293
|
+
prompt=request.prompt,
|
|
294
|
+
category=category_value,
|
|
295
|
+
complexity=complexity,
|
|
296
|
+
)
|
|
297
|
+
tier_used = tier_decision.tier.value
|
|
298
|
+
selected_model = tier_decision.model
|
|
299
|
+
logger.info(f"Tier: {tier_used} -> {selected_model}")
|
|
300
|
+
|
|
301
|
+
# Step 5: Get the actual completion
|
|
302
|
+
if council_response:
|
|
303
|
+
# Use council consensus
|
|
304
|
+
content = council_response
|
|
305
|
+
model_used = "council"
|
|
306
|
+
tier_used = "council"
|
|
307
|
+
else:
|
|
308
|
+
# Get completion from selected model
|
|
309
|
+
if self._llm_adapter:
|
|
310
|
+
# Build enhanced prompt with context
|
|
311
|
+
enhanced_prompt = request.prompt
|
|
312
|
+
if context and context.get("summary"):
|
|
313
|
+
enhanced_prompt = f"Context: {context['summary']}\n\n{request.prompt}"
|
|
314
|
+
|
|
315
|
+
content = self._llm_adapter.complete(
|
|
316
|
+
prompt=enhanced_prompt,
|
|
317
|
+
model=selected_model,
|
|
318
|
+
)
|
|
319
|
+
else:
|
|
320
|
+
content = f"[Mock response for: {request.prompt[:100]}...]"
|
|
321
|
+
model_used = selected_model
|
|
322
|
+
|
|
323
|
+
# Step 5: Record interaction for learning (Layer 4)
|
|
324
|
+
if self.learning_core:
|
|
325
|
+
from .learning import InteractionType
|
|
326
|
+
self.learning_core.record_interaction(
|
|
327
|
+
interaction_type=InteractionType.COMPLETION,
|
|
328
|
+
task_category=category_value,
|
|
329
|
+
input_text=request.prompt,
|
|
330
|
+
output_text=content,
|
|
331
|
+
model_used=model_used,
|
|
332
|
+
duration_ms=int((datetime.now() - start_time).total_seconds() * 1000),
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
# Step 6: Update anticipation and get suggestions (Layer 5)
|
|
336
|
+
suggestions = []
|
|
337
|
+
if self.anticipation_engine:
|
|
338
|
+
# Update context
|
|
339
|
+
self.anticipation_engine.update_context(
|
|
340
|
+
current_file=request.target_files[0] if request.target_files else None,
|
|
341
|
+
recent_files=request.target_files,
|
|
342
|
+
current_task=category_value,
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Get suggestions
|
|
346
|
+
suggestion_objects = self.anticipation_engine.get_suggestions(limit=3)
|
|
347
|
+
suggestions = [
|
|
348
|
+
{
|
|
349
|
+
"type": s.suggestion_type.value,
|
|
350
|
+
"title": s.title,
|
|
351
|
+
"description": s.description,
|
|
352
|
+
"confidence": s.confidence,
|
|
353
|
+
"action": s.action,
|
|
354
|
+
}
|
|
355
|
+
for s in suggestion_objects
|
|
356
|
+
]
|
|
357
|
+
|
|
358
|
+
# Step 8: Cache the response (Performance)
|
|
359
|
+
if self.enable_cache and self.cache and content:
|
|
360
|
+
from ..performance import make_context_hash
|
|
361
|
+
context_hash = make_context_hash(request.context or {})
|
|
362
|
+
self.cache.set(
|
|
363
|
+
prompt=request.prompt,
|
|
364
|
+
context_hash=context_hash,
|
|
365
|
+
response=content,
|
|
366
|
+
model_used=model_used,
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
# Build response
|
|
370
|
+
processing_time = int((datetime.now() - start_time).total_seconds() * 1000)
|
|
371
|
+
|
|
372
|
+
return CognitiveResponse(
|
|
373
|
+
content=content,
|
|
374
|
+
model_used=model_used,
|
|
375
|
+
category=category_value,
|
|
376
|
+
complexity=complexity,
|
|
377
|
+
council_used=use_council,
|
|
378
|
+
council_agents=council_agents,
|
|
379
|
+
suggestions=suggestions if suggestions else None,
|
|
380
|
+
context_summary=context.get("summary") if context else None,
|
|
381
|
+
processing_time_ms=processing_time,
|
|
382
|
+
cache_hit=False,
|
|
383
|
+
cache_level=None,
|
|
384
|
+
tier_used=tier_used,
|
|
385
|
+
metadata={
|
|
386
|
+
"routing_reasoning": routing.reasoning,
|
|
387
|
+
"fallback_model": routing.fallback_model,
|
|
388
|
+
}
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
async def process_async(self, request: CognitiveRequest) -> CognitiveResponse:
|
|
392
|
+
"""Async version of process"""
|
|
393
|
+
loop = asyncio.get_event_loop()
|
|
394
|
+
return await loop.run_in_executor(None, lambda: self.process(request))
|
|
395
|
+
|
|
396
|
+
def quick_process(self, prompt: str, **kwargs) -> CognitiveResponse:
|
|
397
|
+
"""Quick helper for simple prompts"""
|
|
398
|
+
request = CognitiveRequest(prompt=prompt, **kwargs)
|
|
399
|
+
return self.process(request)
|
|
400
|
+
|
|
401
|
+
def index_project(self, incremental: bool = True) -> Dict[str, Any]:
|
|
402
|
+
"""Index the project for context awareness"""
|
|
403
|
+
return self.context_engine.index_project(incremental=incremental)
|
|
404
|
+
|
|
405
|
+
def get_suggestions(self, limit: int = 5) -> List[Dict[str, Any]]:
|
|
406
|
+
"""Get proactive suggestions"""
|
|
407
|
+
if not self.anticipation_engine:
|
|
408
|
+
return []
|
|
409
|
+
|
|
410
|
+
suggestions = self.anticipation_engine.get_suggestions(limit=limit)
|
|
411
|
+
return [
|
|
412
|
+
{
|
|
413
|
+
"type": s.suggestion_type.value,
|
|
414
|
+
"title": s.title,
|
|
415
|
+
"description": s.description,
|
|
416
|
+
"confidence": s.confidence,
|
|
417
|
+
"action": s.action,
|
|
418
|
+
}
|
|
419
|
+
for s in suggestions
|
|
420
|
+
]
|
|
421
|
+
|
|
422
|
+
def get_user_insights(self) -> Dict[str, Any]:
|
|
423
|
+
"""Get insights about user patterns"""
|
|
424
|
+
if not self.learning_core:
|
|
425
|
+
return {"error": "Learning not enabled"}
|
|
426
|
+
return self.learning_core.get_user_summary()
|
|
427
|
+
|
|
428
|
+
def get_system_stats(self) -> Dict[str, Any]:
|
|
429
|
+
"""Get cognitive system statistics"""
|
|
430
|
+
uptime = (datetime.now() - self._start_time).total_seconds()
|
|
431
|
+
|
|
432
|
+
stats = {
|
|
433
|
+
"uptime_seconds": uptime,
|
|
434
|
+
"total_requests": self._request_count,
|
|
435
|
+
"cache_hits": self._cache_hits,
|
|
436
|
+
"cache_hit_rate": self._cache_hits / self._request_count if self._request_count > 0 else 0,
|
|
437
|
+
"council_convenes": self._council_convenes,
|
|
438
|
+
"council_rate": self._council_convenes / self._request_count if self._request_count > 0 else 0,
|
|
439
|
+
"layers_active": {
|
|
440
|
+
"router": self._router is not None,
|
|
441
|
+
"context_engine": self._context_engine is not None,
|
|
442
|
+
"council": self._council is not None,
|
|
443
|
+
"learning": self._learning_core is not None,
|
|
444
|
+
"anticipation": self._anticipation_engine is not None,
|
|
445
|
+
},
|
|
446
|
+
"performance_active": {
|
|
447
|
+
"cache": self._cache is not None,
|
|
448
|
+
"tiering": self._tier_orchestrator is not None,
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
# Add cache stats if available
|
|
453
|
+
if self._cache:
|
|
454
|
+
stats["cache_stats"] = self._cache.get_stats()
|
|
455
|
+
|
|
456
|
+
# Add tiering stats if available
|
|
457
|
+
if self._tier_orchestrator:
|
|
458
|
+
stats["tiering_stats"] = self._tier_orchestrator.get_stats()
|
|
459
|
+
|
|
460
|
+
# Add context stats if available
|
|
461
|
+
if self._context_engine and hasattr(self._context_engine, '_indexed'):
|
|
462
|
+
if self._context_engine._indexed:
|
|
463
|
+
summary = self._context_engine.get_project_summary()
|
|
464
|
+
stats["project"] = {
|
|
465
|
+
"files_indexed": summary.get("files_indexed", 0),
|
|
466
|
+
"total_lines": summary.get("total_lines", 0),
|
|
467
|
+
"patterns_detected": len(summary.get("patterns", [])),
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
return stats
|
|
471
|
+
|
|
472
|
+
def shutdown(self) -> None:
|
|
473
|
+
"""Clean shutdown of the cognitive system"""
|
|
474
|
+
logger.info("Shutting down cognitive system...")
|
|
475
|
+
|
|
476
|
+
# Save performance cache
|
|
477
|
+
if self._cache:
|
|
478
|
+
self._cache.save()
|
|
479
|
+
logger.info("Performance cache saved")
|
|
480
|
+
|
|
481
|
+
# End learning session
|
|
482
|
+
if self._learning_core:
|
|
483
|
+
self._learning_core.end_session()
|
|
484
|
+
|
|
485
|
+
# End anticipation session
|
|
486
|
+
if self._anticipation_engine:
|
|
487
|
+
self._anticipation_engine.end_session()
|
|
488
|
+
|
|
489
|
+
# Save context cache
|
|
490
|
+
if self._context_engine:
|
|
491
|
+
self._context_engine.save_cache()
|
|
492
|
+
|
|
493
|
+
logger.info("Cognitive system shutdown complete")
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
# Singleton instance
|
|
497
|
+
_cognitive_system: Optional[CognitiveSystem] = None
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def get_cognitive_system(
|
|
501
|
+
llm_adapter: Optional[Any] = None,
|
|
502
|
+
project_root: Optional[Path] = None,
|
|
503
|
+
**kwargs
|
|
504
|
+
) -> CognitiveSystem:
|
|
505
|
+
"""Get or create the cognitive system instance"""
|
|
506
|
+
global _cognitive_system
|
|
507
|
+
if _cognitive_system is None:
|
|
508
|
+
_cognitive_system = CognitiveSystem(
|
|
509
|
+
llm_adapter=llm_adapter,
|
|
510
|
+
project_root=project_root,
|
|
511
|
+
**kwargs
|
|
512
|
+
)
|
|
513
|
+
return _cognitive_system
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
def quick_cognitive(prompt: str, **kwargs) -> CognitiveResponse:
|
|
517
|
+
"""Quick helper for cognitive processing"""
|
|
518
|
+
system = get_cognitive_system()
|
|
519
|
+
return system.quick_process(prompt, **kwargs)
|
nc1709/config.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration management for NC1709 CLI
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import json
|
|
6
|
+
import copy
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Dict, Any, Optional
|
|
9
|
+
|
|
10
|
+
class Config:
|
|
11
|
+
"""Manages NC1709 configuration"""
|
|
12
|
+
|
|
13
|
+
DEFAULT_CONFIG = {
|
|
14
|
+
"models": {
|
|
15
|
+
"reasoning": "deepseek-r1:latest",
|
|
16
|
+
"coding": "qwen2.5-coder:32b",
|
|
17
|
+
"tools": "qwen2.5:32b",
|
|
18
|
+
"general": "qwen2.5:32b",
|
|
19
|
+
"fast": "qwen2.5-coder:7b"
|
|
20
|
+
},
|
|
21
|
+
"ollama": {
|
|
22
|
+
"base_url": "http://localhost:11434",
|
|
23
|
+
"timeout": 120
|
|
24
|
+
},
|
|
25
|
+
"safety": {
|
|
26
|
+
"confirm_writes": True,
|
|
27
|
+
"confirm_commands": True,
|
|
28
|
+
"confirm_destructive": True,
|
|
29
|
+
"auto_backup": True,
|
|
30
|
+
"backup_dir": "~/.nc1709/backups"
|
|
31
|
+
},
|
|
32
|
+
"execution": {
|
|
33
|
+
"max_retries": 3,
|
|
34
|
+
"command_timeout": 60,
|
|
35
|
+
"allowed_commands": [
|
|
36
|
+
"ls", "cat", "grep", "find", "git", "npm", "npx", "pip", "pip3",
|
|
37
|
+
"python", "python3", "node", "go", "cargo", "docker", "kubectl",
|
|
38
|
+
"make", "cmake", "rustc", "javac", "java", "mvn", "gradle",
|
|
39
|
+
"pytest", "jest", "yarn", "pnpm", "brew", "apt", "yum",
|
|
40
|
+
"echo", "pwd", "whoami", "date", "sleep", "touch", "mkdir", "cp", "mv"
|
|
41
|
+
],
|
|
42
|
+
"blocked_commands": [
|
|
43
|
+
"rm -rf /", "rm -rf /*", "rm -rf ~",
|
|
44
|
+
"dd if=/dev/zero", "dd if=/dev/random",
|
|
45
|
+
"mkfs", "fdisk", "format",
|
|
46
|
+
":(){:|:&};:", "fork bomb"
|
|
47
|
+
]
|
|
48
|
+
},
|
|
49
|
+
"memory": {
|
|
50
|
+
"enabled": False, # Will enable in Phase 2
|
|
51
|
+
"vector_db_path": "~/.nc1709/memory/vectors",
|
|
52
|
+
"conversation_history": 100
|
|
53
|
+
},
|
|
54
|
+
"ui": {
|
|
55
|
+
"color": True,
|
|
56
|
+
"verbose": False,
|
|
57
|
+
"stream_output": True
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def __init__(self, config_path: Optional[str] = None):
|
|
62
|
+
"""Initialize configuration
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
config_path: Path to config file. Defaults to ~/.nc1709/config.json
|
|
66
|
+
"""
|
|
67
|
+
if config_path is None:
|
|
68
|
+
config_path = os.path.expanduser("~/.nc1709/config.json")
|
|
69
|
+
|
|
70
|
+
self.config_path = Path(config_path)
|
|
71
|
+
# Use deep copy to avoid modifying the class-level DEFAULT_CONFIG
|
|
72
|
+
self.config: Dict[str, Any] = copy.deepcopy(self.DEFAULT_CONFIG)
|
|
73
|
+
self.load()
|
|
74
|
+
|
|
75
|
+
def load(self) -> None:
|
|
76
|
+
"""Load configuration from file"""
|
|
77
|
+
if self.config_path.exists():
|
|
78
|
+
try:
|
|
79
|
+
with open(self.config_path, 'r') as f:
|
|
80
|
+
user_config = json.load(f)
|
|
81
|
+
self._merge_config(user_config)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
print(f"Warning: Could not load config from {self.config_path}: {e}")
|
|
84
|
+
print("Using default configuration")
|
|
85
|
+
|
|
86
|
+
def save(self) -> None:
|
|
87
|
+
"""Save configuration to file"""
|
|
88
|
+
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
89
|
+
with open(self.config_path, 'w') as f:
|
|
90
|
+
json.dump(self.config, f, indent=2)
|
|
91
|
+
|
|
92
|
+
def _merge_config(self, user_config: Dict[str, Any]) -> None:
|
|
93
|
+
"""Merge user configuration with defaults"""
|
|
94
|
+
for key, value in user_config.items():
|
|
95
|
+
if key in self.config and isinstance(self.config[key], dict) and isinstance(value, dict):
|
|
96
|
+
self.config[key].update(value)
|
|
97
|
+
else:
|
|
98
|
+
self.config[key] = value
|
|
99
|
+
|
|
100
|
+
def get(self, key: str, default: Any = None) -> Any:
|
|
101
|
+
"""Get configuration value using dot notation
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
key: Configuration key (e.g., 'models.reasoning')
|
|
105
|
+
default: Default value if key not found
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Configuration value
|
|
109
|
+
"""
|
|
110
|
+
keys = key.split('.')
|
|
111
|
+
value = self.config
|
|
112
|
+
for k in keys:
|
|
113
|
+
if isinstance(value, dict) and k in value:
|
|
114
|
+
value = value[k]
|
|
115
|
+
else:
|
|
116
|
+
return default
|
|
117
|
+
return value
|
|
118
|
+
|
|
119
|
+
def set(self, key: str, value: Any) -> None:
|
|
120
|
+
"""Set configuration value using dot notation
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
key: Configuration key (e.g., 'models.reasoning')
|
|
124
|
+
value: Value to set
|
|
125
|
+
"""
|
|
126
|
+
keys = key.split('.')
|
|
127
|
+
config = self.config
|
|
128
|
+
for k in keys[:-1]:
|
|
129
|
+
if k not in config:
|
|
130
|
+
config[k] = {}
|
|
131
|
+
config = config[k]
|
|
132
|
+
config[keys[-1]] = value
|
|
133
|
+
self.save()
|
|
134
|
+
|
|
135
|
+
def get_model_for_task(self, task_type: str) -> str:
|
|
136
|
+
"""Get the appropriate model for a task type
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
task_type: Type of task (reasoning, coding, tools, general, fast)
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Model name
|
|
143
|
+
"""
|
|
144
|
+
return self.get(f"models.{task_type}", self.get("models.general"))
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
# Global configuration instance
|
|
148
|
+
_config: Optional[Config] = None
|
|
149
|
+
|
|
150
|
+
def get_config() -> Config:
|
|
151
|
+
"""Get global configuration instance"""
|
|
152
|
+
global _config
|
|
153
|
+
if _config is None:
|
|
154
|
+
_config = Config()
|
|
155
|
+
return _config
|