nc1709 1.15.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nc1709/__init__.py +13 -0
- nc1709/agent/__init__.py +36 -0
- nc1709/agent/core.py +505 -0
- nc1709/agent/mcp_bridge.py +245 -0
- nc1709/agent/permissions.py +298 -0
- nc1709/agent/tools/__init__.py +21 -0
- nc1709/agent/tools/base.py +440 -0
- nc1709/agent/tools/bash_tool.py +367 -0
- nc1709/agent/tools/file_tools.py +454 -0
- nc1709/agent/tools/notebook_tools.py +516 -0
- nc1709/agent/tools/search_tools.py +322 -0
- nc1709/agent/tools/task_tool.py +284 -0
- nc1709/agent/tools/web_tools.py +555 -0
- nc1709/agents/__init__.py +17 -0
- nc1709/agents/auto_fix.py +506 -0
- nc1709/agents/test_generator.py +507 -0
- nc1709/checkpoints.py +372 -0
- nc1709/cli.py +3380 -0
- nc1709/cli_ui.py +1080 -0
- nc1709/cognitive/__init__.py +149 -0
- nc1709/cognitive/anticipation.py +594 -0
- nc1709/cognitive/context_engine.py +1046 -0
- nc1709/cognitive/council.py +824 -0
- nc1709/cognitive/learning.py +761 -0
- nc1709/cognitive/router.py +583 -0
- nc1709/cognitive/system.py +519 -0
- nc1709/config.py +155 -0
- nc1709/custom_commands.py +300 -0
- nc1709/executor.py +333 -0
- nc1709/file_controller.py +354 -0
- nc1709/git_integration.py +308 -0
- nc1709/github_integration.py +477 -0
- nc1709/image_input.py +446 -0
- nc1709/linting.py +519 -0
- nc1709/llm_adapter.py +667 -0
- nc1709/logger.py +192 -0
- nc1709/mcp/__init__.py +18 -0
- nc1709/mcp/client.py +370 -0
- nc1709/mcp/manager.py +407 -0
- nc1709/mcp/protocol.py +210 -0
- nc1709/mcp/server.py +473 -0
- nc1709/memory/__init__.py +20 -0
- nc1709/memory/embeddings.py +325 -0
- nc1709/memory/indexer.py +474 -0
- nc1709/memory/sessions.py +432 -0
- nc1709/memory/vector_store.py +451 -0
- nc1709/models/__init__.py +86 -0
- nc1709/models/detector.py +377 -0
- nc1709/models/formats.py +315 -0
- nc1709/models/manager.py +438 -0
- nc1709/models/registry.py +497 -0
- nc1709/performance/__init__.py +343 -0
- nc1709/performance/cache.py +705 -0
- nc1709/performance/pipeline.py +611 -0
- nc1709/performance/tiering.py +543 -0
- nc1709/plan_mode.py +362 -0
- nc1709/plugins/__init__.py +17 -0
- nc1709/plugins/agents/__init__.py +18 -0
- nc1709/plugins/agents/django_agent.py +912 -0
- nc1709/plugins/agents/docker_agent.py +623 -0
- nc1709/plugins/agents/fastapi_agent.py +887 -0
- nc1709/plugins/agents/git_agent.py +731 -0
- nc1709/plugins/agents/nextjs_agent.py +867 -0
- nc1709/plugins/base.py +359 -0
- nc1709/plugins/manager.py +411 -0
- nc1709/plugins/registry.py +337 -0
- nc1709/progress.py +443 -0
- nc1709/prompts/__init__.py +22 -0
- nc1709/prompts/agent_system.py +180 -0
- nc1709/prompts/task_prompts.py +340 -0
- nc1709/prompts/unified_prompt.py +133 -0
- nc1709/reasoning_engine.py +541 -0
- nc1709/remote_client.py +266 -0
- nc1709/shell_completions.py +349 -0
- nc1709/slash_commands.py +649 -0
- nc1709/task_classifier.py +408 -0
- nc1709/version_check.py +177 -0
- nc1709/web/__init__.py +8 -0
- nc1709/web/server.py +950 -0
- nc1709/web/templates/index.html +1127 -0
- nc1709-1.15.4.dist-info/METADATA +858 -0
- nc1709-1.15.4.dist-info/RECORD +86 -0
- nc1709-1.15.4.dist-info/WHEEL +5 -0
- nc1709-1.15.4.dist-info/entry_points.txt +2 -0
- nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
- nc1709-1.15.4.dist-info/top_level.txt +1 -0
nc1709/cli.py
ADDED
|
@@ -0,0 +1,3380 @@
|
|
|
1
|
+
"""
|
|
2
|
+
NC1709 CLI - Main Command Line Interface
|
|
3
|
+
Provides both direct command mode and interactive shell mode with optional agentic execution
|
|
4
|
+
"""
|
|
5
|
+
import sys
|
|
6
|
+
import os
|
|
7
|
+
import argparse
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from .config import get_config
|
|
12
|
+
from .llm_adapter import LLMAdapter, TaskType, TaskClassifier
|
|
13
|
+
from .file_controller import FileController
|
|
14
|
+
from .executor import CommandExecutor
|
|
15
|
+
from .reasoning_engine import ReasoningEngine
|
|
16
|
+
from .remote_client import RemoteClient, is_remote_mode
|
|
17
|
+
from .cli_ui import (
|
|
18
|
+
ActionSpinner, Color, Icons,
|
|
19
|
+
status, thinking, success, error, warning, info,
|
|
20
|
+
action_spinner, print_response, format_response
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Default server URL - users connect to this server by default
|
|
24
|
+
DEFAULT_API_URL = "https://nc1709.lafzusa.com"
|
|
25
|
+
|
|
26
|
+
# Import agent module
|
|
27
|
+
try:
|
|
28
|
+
from .agent import Agent, AgentConfig, PermissionManager, PermissionPolicy, integrate_mcp_with_agent
|
|
29
|
+
HAS_AGENT = True
|
|
30
|
+
except ImportError:
|
|
31
|
+
HAS_AGENT = False
|
|
32
|
+
|
|
33
|
+
# Import checkpoints module
|
|
34
|
+
try:
|
|
35
|
+
from .checkpoints import get_checkpoint_manager
|
|
36
|
+
HAS_CHECKPOINTS = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
HAS_CHECKPOINTS = False
|
|
39
|
+
|
|
40
|
+
# Import git integration
|
|
41
|
+
try:
|
|
42
|
+
from .git_integration import get_git_integration, GitIntegration
|
|
43
|
+
HAS_GIT_INTEGRATION = True
|
|
44
|
+
except ImportError:
|
|
45
|
+
HAS_GIT_INTEGRATION = False
|
|
46
|
+
|
|
47
|
+
# Import custom commands
|
|
48
|
+
try:
|
|
49
|
+
from .custom_commands import get_custom_command_manager, execute_custom_command
|
|
50
|
+
HAS_CUSTOM_COMMANDS = True
|
|
51
|
+
except ImportError:
|
|
52
|
+
HAS_CUSTOM_COMMANDS = False
|
|
53
|
+
|
|
54
|
+
# Import image input
|
|
55
|
+
try:
|
|
56
|
+
from .image_input import (
|
|
57
|
+
get_image_handler, load_image, capture_screenshot,
|
|
58
|
+
get_clipboard_image, get_image_info, is_image_file
|
|
59
|
+
)
|
|
60
|
+
HAS_IMAGE_INPUT = True
|
|
61
|
+
except ImportError:
|
|
62
|
+
HAS_IMAGE_INPUT = False
|
|
63
|
+
|
|
64
|
+
# Import plan mode
|
|
65
|
+
try:
|
|
66
|
+
from .plan_mode import get_plan_manager, PLAN_MODE_SYSTEM_PROMPT
|
|
67
|
+
HAS_PLAN_MODE = True
|
|
68
|
+
except ImportError:
|
|
69
|
+
HAS_PLAN_MODE = False
|
|
70
|
+
|
|
71
|
+
# Import GitHub integration
|
|
72
|
+
try:
|
|
73
|
+
from .github_integration import (
|
|
74
|
+
get_github_integration, format_pr_summary, format_issue_summary
|
|
75
|
+
)
|
|
76
|
+
HAS_GITHUB = True
|
|
77
|
+
except ImportError:
|
|
78
|
+
HAS_GITHUB = False
|
|
79
|
+
|
|
80
|
+
# Import linting integration
|
|
81
|
+
try:
|
|
82
|
+
from .linting import (
|
|
83
|
+
get_linting_manager, format_lint_result, generate_fix_prompt
|
|
84
|
+
)
|
|
85
|
+
HAS_LINTING = True
|
|
86
|
+
except ImportError:
|
|
87
|
+
HAS_LINTING = False
|
|
88
|
+
|
|
89
|
+
# Import cognitive architecture
|
|
90
|
+
try:
|
|
91
|
+
from .cognitive import (
|
|
92
|
+
CognitiveSystem, CognitiveRequest, CognitiveResponse,
|
|
93
|
+
get_cognitive_system
|
|
94
|
+
)
|
|
95
|
+
HAS_COGNITIVE = True
|
|
96
|
+
except ImportError:
|
|
97
|
+
HAS_COGNITIVE = False
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class NC1709CLI:
|
|
101
|
+
"""Main CLI application"""
|
|
102
|
+
|
|
103
|
+
def __init__(self, remote_url: Optional[str] = None, api_key: Optional[str] = None):
|
|
104
|
+
"""Initialize the CLI
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
remote_url: URL of remote NC1709 server (uses local if None)
|
|
108
|
+
api_key: API key for remote server authentication
|
|
109
|
+
"""
|
|
110
|
+
self.config = get_config()
|
|
111
|
+
self.running = True
|
|
112
|
+
|
|
113
|
+
# Check for remote mode - use default server if no local override
|
|
114
|
+
self.remote_url = remote_url or os.environ.get("NC1709_API_URL") or DEFAULT_API_URL
|
|
115
|
+
self.api_key = api_key or os.environ.get("NC1709_API_KEY")
|
|
116
|
+
self.remote_client: Optional[RemoteClient] = None
|
|
117
|
+
|
|
118
|
+
if self.remote_url:
|
|
119
|
+
# Remote mode - connect to remote server
|
|
120
|
+
self._init_remote_mode()
|
|
121
|
+
else:
|
|
122
|
+
# Local mode - use local LLMs
|
|
123
|
+
self._init_local_mode()
|
|
124
|
+
|
|
125
|
+
# Memory module (lazy loaded)
|
|
126
|
+
self._session_manager = None
|
|
127
|
+
self._project_indexer = None
|
|
128
|
+
self._memory_enabled = self.config.get("memory.enabled", False)
|
|
129
|
+
|
|
130
|
+
# Plugin system (lazy loaded)
|
|
131
|
+
self._plugin_manager = None
|
|
132
|
+
|
|
133
|
+
# MCP support (lazy loaded)
|
|
134
|
+
self._mcp_manager = None
|
|
135
|
+
|
|
136
|
+
# Agent mode (lazy loaded) - ON by default for full tool execution
|
|
137
|
+
self._agent = None
|
|
138
|
+
self._agent_mode = self.config.get("agent.enabled", True) # Enabled by default
|
|
139
|
+
|
|
140
|
+
# Cognitive system (lazy loaded)
|
|
141
|
+
self._cognitive_system = None
|
|
142
|
+
self._cognitive_enabled = self.config.get("cognitive.enabled", True) # Enabled by default
|
|
143
|
+
|
|
144
|
+
def _init_remote_mode(self):
|
|
145
|
+
"""Initialize remote mode"""
|
|
146
|
+
import uuid
|
|
147
|
+
try:
|
|
148
|
+
self.remote_client = RemoteClient(
|
|
149
|
+
server_url=self.remote_url,
|
|
150
|
+
api_key=self.api_key
|
|
151
|
+
)
|
|
152
|
+
# Verify connection
|
|
153
|
+
status = self.remote_client.check_status()
|
|
154
|
+
print(f"🌐 Connected to remote NC1709 server: {self.remote_url}")
|
|
155
|
+
print(f" Server version: {status.get('version', 'unknown')}")
|
|
156
|
+
|
|
157
|
+
# Set up minimal local components (no LLM needed)
|
|
158
|
+
self.file_controller = FileController()
|
|
159
|
+
self.executor = CommandExecutor()
|
|
160
|
+
self.llm = None
|
|
161
|
+
self.reasoning_engine = None
|
|
162
|
+
|
|
163
|
+
# Generate unique user ID for this machine (persistent across sessions)
|
|
164
|
+
self._user_id = self._get_or_create_user_id()
|
|
165
|
+
|
|
166
|
+
# Track files to index (batched for efficiency)
|
|
167
|
+
self._files_to_index = []
|
|
168
|
+
self._indexed_files = set() # Don't re-index same files
|
|
169
|
+
|
|
170
|
+
except Exception as e:
|
|
171
|
+
error_msg = str(e).lower()
|
|
172
|
+
|
|
173
|
+
# Check if it's an authentication error
|
|
174
|
+
if "401" in error_msg or "unauthorized" in error_msg or "authentication" in error_msg:
|
|
175
|
+
# Show clean startup screen with API key instructions
|
|
176
|
+
self._print_auth_screen()
|
|
177
|
+
sys.exit(1)
|
|
178
|
+
else:
|
|
179
|
+
# Other connection errors
|
|
180
|
+
print(f"❌ Failed to connect to NC1709 server: {e}")
|
|
181
|
+
print(f"\n📧 For support, contact: support@lafzusa.com")
|
|
182
|
+
sys.exit(1)
|
|
183
|
+
|
|
184
|
+
def _get_or_create_user_id(self) -> str:
|
|
185
|
+
"""Get or create a persistent user ID for this machine"""
|
|
186
|
+
import uuid
|
|
187
|
+
import hashlib
|
|
188
|
+
|
|
189
|
+
# Use machine-specific info to create a stable ID
|
|
190
|
+
user_id_file = Path.home() / ".nc1709_user_id"
|
|
191
|
+
|
|
192
|
+
if user_id_file.exists():
|
|
193
|
+
return user_id_file.read_text().strip()
|
|
194
|
+
|
|
195
|
+
# Create new ID based on machine info
|
|
196
|
+
import platform
|
|
197
|
+
machine_info = f"{platform.node()}-{platform.machine()}-{os.getlogin() if hasattr(os, 'getlogin') else 'user'}"
|
|
198
|
+
user_id = hashlib.sha256(machine_info.encode()).hexdigest()[:16]
|
|
199
|
+
|
|
200
|
+
# Save for future sessions
|
|
201
|
+
try:
|
|
202
|
+
user_id_file.write_text(user_id)
|
|
203
|
+
except Exception:
|
|
204
|
+
pass # If we can't save, that's ok
|
|
205
|
+
|
|
206
|
+
return user_id
|
|
207
|
+
|
|
208
|
+
def _init_local_mode(self):
|
|
209
|
+
"""Initialize local mode"""
|
|
210
|
+
self.llm = LLMAdapter()
|
|
211
|
+
self.file_controller = FileController()
|
|
212
|
+
self.executor = CommandExecutor()
|
|
213
|
+
self.reasoning_engine = ReasoningEngine()
|
|
214
|
+
|
|
215
|
+
@property
|
|
216
|
+
def session_manager(self):
|
|
217
|
+
"""Lazy load session manager"""
|
|
218
|
+
if self._session_manager is None:
|
|
219
|
+
try:
|
|
220
|
+
from .memory.sessions import SessionManager
|
|
221
|
+
self._session_manager = SessionManager()
|
|
222
|
+
except ImportError:
|
|
223
|
+
pass
|
|
224
|
+
return self._session_manager
|
|
225
|
+
|
|
226
|
+
@property
|
|
227
|
+
def project_indexer(self):
|
|
228
|
+
"""Lazy load project indexer"""
|
|
229
|
+
if self._project_indexer is None and self._memory_enabled:
|
|
230
|
+
try:
|
|
231
|
+
from .memory.indexer import ProjectIndexer
|
|
232
|
+
self._project_indexer = ProjectIndexer(str(Path.cwd()))
|
|
233
|
+
except ImportError:
|
|
234
|
+
pass
|
|
235
|
+
return self._project_indexer
|
|
236
|
+
|
|
237
|
+
@property
|
|
238
|
+
def plugin_manager(self):
|
|
239
|
+
"""Lazy load plugin manager"""
|
|
240
|
+
if self._plugin_manager is None:
|
|
241
|
+
try:
|
|
242
|
+
from .plugins import PluginManager
|
|
243
|
+
self._plugin_manager = PluginManager()
|
|
244
|
+
# Discover and load built-in plugins
|
|
245
|
+
self._plugin_manager.discover_plugins()
|
|
246
|
+
self._plugin_manager.load_all()
|
|
247
|
+
except ImportError:
|
|
248
|
+
pass
|
|
249
|
+
return self._plugin_manager
|
|
250
|
+
|
|
251
|
+
@property
|
|
252
|
+
def mcp_manager(self):
|
|
253
|
+
"""Lazy load MCP manager"""
|
|
254
|
+
if self._mcp_manager is None:
|
|
255
|
+
try:
|
|
256
|
+
from .mcp import MCPManager
|
|
257
|
+
self._mcp_manager = MCPManager(name="nc1709", version="1.0.0")
|
|
258
|
+
self._mcp_manager.setup_default_tools()
|
|
259
|
+
except ImportError:
|
|
260
|
+
pass
|
|
261
|
+
return self._mcp_manager
|
|
262
|
+
|
|
263
|
+
@property
|
|
264
|
+
def cognitive_system(self):
|
|
265
|
+
"""Lazy load cognitive system - the brain of NC1709"""
|
|
266
|
+
if self._cognitive_system is None and self._cognitive_enabled and HAS_COGNITIVE:
|
|
267
|
+
try:
|
|
268
|
+
self._cognitive_system = get_cognitive_system(
|
|
269
|
+
llm_adapter=self.llm if hasattr(self, 'llm') else None,
|
|
270
|
+
project_root=Path.cwd(),
|
|
271
|
+
enable_anticipation=True,
|
|
272
|
+
enable_learning=True,
|
|
273
|
+
)
|
|
274
|
+
# Index project for context awareness (in background)
|
|
275
|
+
# self._cognitive_system.index_project(incremental=True)
|
|
276
|
+
except Exception:
|
|
277
|
+
pass
|
|
278
|
+
return self._cognitive_system
|
|
279
|
+
|
|
280
|
+
@property
|
|
281
|
+
def agent(self):
|
|
282
|
+
"""Lazy load agent"""
|
|
283
|
+
if self._agent is None and HAS_AGENT and self.llm:
|
|
284
|
+
try:
|
|
285
|
+
# Create agent configuration
|
|
286
|
+
config = AgentConfig(
|
|
287
|
+
max_iterations=self.config.get("agent.max_iterations", 50),
|
|
288
|
+
verbose=self.config.get("ui.verbose", False),
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Create agent
|
|
292
|
+
self._agent = Agent(llm=self.llm, config=config)
|
|
293
|
+
|
|
294
|
+
# Integrate MCP tools if available
|
|
295
|
+
if self.mcp_manager:
|
|
296
|
+
try:
|
|
297
|
+
integrate_mcp_with_agent(self._agent, self.mcp_manager)
|
|
298
|
+
except Exception as e:
|
|
299
|
+
warning(f"Failed to integrate MCP with agent: {e}")
|
|
300
|
+
|
|
301
|
+
except Exception as e:
|
|
302
|
+
error(f"Failed to create agent: {e}")
|
|
303
|
+
self._agent = None
|
|
304
|
+
return self._agent
|
|
305
|
+
|
|
306
|
+
def run(self, args: Optional[list] = None) -> int:
|
|
307
|
+
"""Run the CLI
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
args: Command line arguments (default: sys.argv)
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Exit code
|
|
314
|
+
"""
|
|
315
|
+
parser = self._create_parser()
|
|
316
|
+
parsed_args = parser.parse_args(args)
|
|
317
|
+
|
|
318
|
+
# Handle different modes
|
|
319
|
+
if parsed_args.version:
|
|
320
|
+
self._print_version()
|
|
321
|
+
return 0
|
|
322
|
+
|
|
323
|
+
if parsed_args.config:
|
|
324
|
+
self._show_config()
|
|
325
|
+
return 0
|
|
326
|
+
|
|
327
|
+
# Session management
|
|
328
|
+
if parsed_args.sessions:
|
|
329
|
+
return self._list_sessions()
|
|
330
|
+
|
|
331
|
+
if parsed_args.resume:
|
|
332
|
+
return self._run_shell(resume_session=parsed_args.resume)
|
|
333
|
+
|
|
334
|
+
# Project indexing
|
|
335
|
+
if parsed_args.index:
|
|
336
|
+
return self._index_project()
|
|
337
|
+
|
|
338
|
+
if parsed_args.search:
|
|
339
|
+
return self._search_code(parsed_args.search)
|
|
340
|
+
|
|
341
|
+
# Plugin commands
|
|
342
|
+
if parsed_args.plugins:
|
|
343
|
+
return self._list_plugins()
|
|
344
|
+
|
|
345
|
+
if parsed_args.plugin:
|
|
346
|
+
return self._run_plugin_action(parsed_args.plugin)
|
|
347
|
+
|
|
348
|
+
# MCP commands
|
|
349
|
+
if parsed_args.mcp_status:
|
|
350
|
+
return self._mcp_show_status()
|
|
351
|
+
|
|
352
|
+
if parsed_args.mcp_serve:
|
|
353
|
+
return self._mcp_run_server()
|
|
354
|
+
|
|
355
|
+
if parsed_args.mcp_connect:
|
|
356
|
+
return self._mcp_connect_servers(parsed_args.mcp_connect)
|
|
357
|
+
|
|
358
|
+
if parsed_args.mcp_tool:
|
|
359
|
+
args_json = parsed_args.args if parsed_args.args else "{}"
|
|
360
|
+
return self._mcp_call_tool(parsed_args.mcp_tool, args_json)
|
|
361
|
+
|
|
362
|
+
# Web dashboard
|
|
363
|
+
if parsed_args.web:
|
|
364
|
+
serve_remote = getattr(parsed_args, 'serve', False)
|
|
365
|
+
return self._run_web_dashboard(parsed_args.port, serve_remote=serve_remote)
|
|
366
|
+
|
|
367
|
+
# Shell completions
|
|
368
|
+
if parsed_args.completion:
|
|
369
|
+
return self._generate_completion(parsed_args.completion)
|
|
370
|
+
|
|
371
|
+
# AI Agents
|
|
372
|
+
if parsed_args.fix:
|
|
373
|
+
auto_apply = getattr(parsed_args, 'apply', False)
|
|
374
|
+
return self._run_auto_fix(parsed_args.fix, auto_apply=auto_apply)
|
|
375
|
+
|
|
376
|
+
if parsed_args.generate_tests:
|
|
377
|
+
output_file = getattr(parsed_args, 'output', None)
|
|
378
|
+
return self._run_test_generator(parsed_args.generate_tests, output_file=output_file)
|
|
379
|
+
|
|
380
|
+
# Agent mode
|
|
381
|
+
if parsed_args.agent:
|
|
382
|
+
self._agent_mode = True
|
|
383
|
+
|
|
384
|
+
if parsed_args.shell or not parsed_args.prompt:
|
|
385
|
+
# Interactive shell mode
|
|
386
|
+
return self._run_shell()
|
|
387
|
+
else:
|
|
388
|
+
# Direct command mode
|
|
389
|
+
return self._run_command(parsed_args.prompt)
|
|
390
|
+
|
|
391
|
+
def _create_parser(self) -> argparse.ArgumentParser:
|
|
392
|
+
"""Create argument parser
|
|
393
|
+
|
|
394
|
+
Returns:
|
|
395
|
+
Argument parser
|
|
396
|
+
"""
|
|
397
|
+
parser = argparse.ArgumentParser(
|
|
398
|
+
prog="nc1709",
|
|
399
|
+
description="NC1709 - A Local-First AI Developer Assistant",
|
|
400
|
+
epilog="Examples:\n"
|
|
401
|
+
" nc1709 'create a Python script to parse JSON'\n"
|
|
402
|
+
" nc1709 --shell\n"
|
|
403
|
+
" nc1709 --config",
|
|
404
|
+
formatter_class=argparse.RawDescriptionHelpFormatter
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
parser.add_argument(
|
|
408
|
+
"prompt",
|
|
409
|
+
nargs="?",
|
|
410
|
+
help="Your request or question"
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
parser.add_argument(
|
|
414
|
+
"-s", "--shell",
|
|
415
|
+
action="store_true",
|
|
416
|
+
help="Start interactive shell mode"
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
parser.add_argument(
|
|
420
|
+
"-v", "--version",
|
|
421
|
+
action="store_true",
|
|
422
|
+
help="Show version information"
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
parser.add_argument(
|
|
426
|
+
"-c", "--config",
|
|
427
|
+
action="store_true",
|
|
428
|
+
help="Show current configuration"
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
parser.add_argument(
|
|
432
|
+
"--verbose",
|
|
433
|
+
action="store_true",
|
|
434
|
+
help="Enable verbose output"
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
# Session management arguments
|
|
438
|
+
parser.add_argument(
|
|
439
|
+
"--sessions",
|
|
440
|
+
action="store_true",
|
|
441
|
+
help="List saved sessions"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
parser.add_argument(
|
|
445
|
+
"--resume",
|
|
446
|
+
metavar="SESSION_ID",
|
|
447
|
+
help="Resume a previous session"
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
# Memory/indexing arguments
|
|
451
|
+
parser.add_argument(
|
|
452
|
+
"--index",
|
|
453
|
+
action="store_true",
|
|
454
|
+
help="Index the current project for semantic search"
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
parser.add_argument(
|
|
458
|
+
"--search",
|
|
459
|
+
metavar="QUERY",
|
|
460
|
+
help="Search indexed code semantically"
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
# Plugin arguments
|
|
464
|
+
parser.add_argument(
|
|
465
|
+
"--plugins",
|
|
466
|
+
action="store_true",
|
|
467
|
+
help="List available plugins"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
parser.add_argument(
|
|
471
|
+
"--plugin",
|
|
472
|
+
metavar="NAME",
|
|
473
|
+
help="Execute a plugin action (e.g., --plugin git:status)"
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
# MCP arguments
|
|
477
|
+
parser.add_argument(
|
|
478
|
+
"--mcp-status",
|
|
479
|
+
action="store_true",
|
|
480
|
+
help="Show MCP server status and available tools"
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
parser.add_argument(
|
|
484
|
+
"--mcp-serve",
|
|
485
|
+
action="store_true",
|
|
486
|
+
help="Run NC1709 as an MCP server (stdio transport)"
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
parser.add_argument(
|
|
490
|
+
"--mcp-connect",
|
|
491
|
+
metavar="CONFIG",
|
|
492
|
+
help="Connect to MCP servers from config file"
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
parser.add_argument(
|
|
496
|
+
"--mcp-tool",
|
|
497
|
+
metavar="TOOL",
|
|
498
|
+
help="Call an MCP tool (e.g., --mcp-tool read_file --args '{\"path\": \"file.txt\"}')"
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
parser.add_argument(
|
|
502
|
+
"--args",
|
|
503
|
+
metavar="JSON",
|
|
504
|
+
help="JSON arguments for --mcp-tool"
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
# Web Dashboard arguments
|
|
508
|
+
parser.add_argument(
|
|
509
|
+
"--web",
|
|
510
|
+
action="store_true",
|
|
511
|
+
help="Start the web dashboard (default: http://localhost:8709)"
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
parser.add_argument(
|
|
515
|
+
"--port",
|
|
516
|
+
type=int,
|
|
517
|
+
default=8709,
|
|
518
|
+
help="Port for web dashboard (default: 8709)"
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
# Remote mode arguments
|
|
522
|
+
parser.add_argument(
|
|
523
|
+
"--remote",
|
|
524
|
+
metavar="URL",
|
|
525
|
+
help="Connect to remote NC1709 server (e.g., --remote https://your-server.ngrok.io)"
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
parser.add_argument(
|
|
529
|
+
"--api-key",
|
|
530
|
+
metavar="KEY",
|
|
531
|
+
help="API key for remote server authentication"
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
parser.add_argument(
|
|
535
|
+
"--serve",
|
|
536
|
+
action="store_true",
|
|
537
|
+
help="Run as a server for remote clients (use with --web)"
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
parser.add_argument(
|
|
541
|
+
"--local",
|
|
542
|
+
action="store_true",
|
|
543
|
+
help="Force local mode (use local Ollama instead of remote server)"
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
# AI Agents arguments
|
|
547
|
+
parser.add_argument(
|
|
548
|
+
"--fix",
|
|
549
|
+
metavar="FILE",
|
|
550
|
+
help="Auto-fix code errors in a file"
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
parser.add_argument(
|
|
554
|
+
"--apply",
|
|
555
|
+
action="store_true",
|
|
556
|
+
help="Auto-apply fixes (use with --fix)"
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
parser.add_argument(
|
|
560
|
+
"--generate-tests",
|
|
561
|
+
metavar="FILE",
|
|
562
|
+
help="Generate unit tests for a file"
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
parser.add_argument(
|
|
566
|
+
"--output",
|
|
567
|
+
metavar="FILE",
|
|
568
|
+
help="Output file for generated tests (use with --generate-tests)"
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# Shell completions
|
|
572
|
+
parser.add_argument(
|
|
573
|
+
"--completion",
|
|
574
|
+
choices=["bash", "zsh", "fish"],
|
|
575
|
+
help="Generate shell completion script"
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
# Agentic mode
|
|
579
|
+
parser.add_argument(
|
|
580
|
+
"--agent", "-a",
|
|
581
|
+
action="store_true",
|
|
582
|
+
help="Enable agentic mode with tool execution (Claude Code-style)"
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
parser.add_argument(
|
|
586
|
+
"--permission",
|
|
587
|
+
choices=["strict", "normal", "permissive", "trust"],
|
|
588
|
+
default="normal",
|
|
589
|
+
help="Permission policy for agent tools (default: normal)"
|
|
590
|
+
)
|
|
591
|
+
|
|
592
|
+
return parser
|
|
593
|
+
|
|
594
|
+
def _print_version(self) -> None:
|
|
595
|
+
"""Print version information"""
|
|
596
|
+
from . import __version__
|
|
597
|
+
print(f"NC1709 CLI v{__version__}")
|
|
598
|
+
print("A Local-First AI Developer Assistant")
|
|
599
|
+
|
|
600
|
+
if self.remote_client:
|
|
601
|
+
print(f"\n🌐 Remote Mode: {self.remote_url}")
|
|
602
|
+
try:
|
|
603
|
+
status = self.remote_client.check_status()
|
|
604
|
+
print(f" Server: {status.get('server', 'nc1709')}")
|
|
605
|
+
print(f" Version: {status.get('version', 'unknown')}")
|
|
606
|
+
models = status.get('models', {})
|
|
607
|
+
if models:
|
|
608
|
+
print("\n Available Models:")
|
|
609
|
+
for task, model in models.items():
|
|
610
|
+
print(f" {task:12} → {model}")
|
|
611
|
+
except Exception as e:
|
|
612
|
+
print(f" (Unable to fetch server info: {e})")
|
|
613
|
+
elif self.llm:
|
|
614
|
+
print("\nConfigured Models:")
|
|
615
|
+
for task_type in TaskType:
|
|
616
|
+
model_info = self.llm.get_model_info(task_type)
|
|
617
|
+
print(f" {task_type.value:12} → {model_info['model']}")
|
|
618
|
+
|
|
619
|
+
def _show_config(self) -> None:
|
|
620
|
+
"""Show current configuration in a user-friendly format"""
|
|
621
|
+
from . import __version__
|
|
622
|
+
|
|
623
|
+
C = '\033[36m' # Cyan
|
|
624
|
+
B = '\033[1m' # Bold
|
|
625
|
+
G = '\033[32m' # Green
|
|
626
|
+
Y = '\033[33m' # Yellow
|
|
627
|
+
D = '\033[2m' # Dim
|
|
628
|
+
R = '\033[0m' # Reset
|
|
629
|
+
|
|
630
|
+
print(f"\n{B}NC1709 Configuration{R}\n")
|
|
631
|
+
|
|
632
|
+
# Connection info
|
|
633
|
+
if self.remote_client:
|
|
634
|
+
print(f" {C}Mode{R} Remote (Cloud)")
|
|
635
|
+
print(f" {C}Server{R} {self.remote_url}")
|
|
636
|
+
else:
|
|
637
|
+
print(f" {C}Mode{R} Local (Ollama)")
|
|
638
|
+
ollama_url = self.config.get("ollama.base_url", "http://localhost:11434")
|
|
639
|
+
print(f" {C}Ollama{R} {ollama_url}")
|
|
640
|
+
|
|
641
|
+
print(f" {C}Version{R} {__version__}")
|
|
642
|
+
|
|
643
|
+
# Safety settings (simplified)
|
|
644
|
+
confirm_writes = self.config.get("safety.confirm_writes", True)
|
|
645
|
+
confirm_cmds = self.config.get("safety.confirm_commands", True)
|
|
646
|
+
safety_status = f"{G}On{R}" if (confirm_writes and confirm_cmds) else f"{Y}Partial{R}"
|
|
647
|
+
print(f" {C}Safety{R} {safety_status}")
|
|
648
|
+
|
|
649
|
+
# Memory
|
|
650
|
+
memory_enabled = self.config.get("memory.enabled", False)
|
|
651
|
+
memory_status = f"{G}Enabled{R}" if memory_enabled else f"{D}Disabled{R}"
|
|
652
|
+
print(f" {C}Memory{R} {memory_status}")
|
|
653
|
+
|
|
654
|
+
# Agent mode
|
|
655
|
+
agent_status = f"{G}Enabled{R}" if self._agent_mode else f"{D}Disabled{R}"
|
|
656
|
+
print(f" {C}Agent Mode{R} {agent_status}")
|
|
657
|
+
|
|
658
|
+
print(f"\n{D}Config file: {self.config.config_path}{R}")
|
|
659
|
+
print(f"{D}Use '/config raw' for full JSON, '/config edit' to modify{R}\n")
|
|
660
|
+
|
|
661
|
+
def _show_config_raw(self) -> None:
|
|
662
|
+
"""Show full configuration as JSON (for advanced users)"""
|
|
663
|
+
import json
|
|
664
|
+
print("\nFull Configuration (JSON):\n")
|
|
665
|
+
print(json.dumps(self.config.config, indent=2))
|
|
666
|
+
print(f"\nConfig file: {self.config.config_path}")
|
|
667
|
+
|
|
668
|
+
def _run_shell(self, resume_session: Optional[str] = None) -> int:
|
|
669
|
+
"""Run interactive shell mode
|
|
670
|
+
|
|
671
|
+
Args:
|
|
672
|
+
resume_session: Session ID to resume
|
|
673
|
+
|
|
674
|
+
Returns:
|
|
675
|
+
Exit code
|
|
676
|
+
"""
|
|
677
|
+
self._print_banner()
|
|
678
|
+
|
|
679
|
+
# Initialize or resume session
|
|
680
|
+
if self.session_manager:
|
|
681
|
+
if resume_session:
|
|
682
|
+
session = self.session_manager.load_session(resume_session)
|
|
683
|
+
if session:
|
|
684
|
+
self.session_manager.current_session = session
|
|
685
|
+
print(f"📂 Resumed session: {session.name} ({len(session.messages)} messages)")
|
|
686
|
+
else:
|
|
687
|
+
print(f"⚠️ Session '{resume_session}' not found, starting new session")
|
|
688
|
+
self.session_manager.start_session(project_path=str(Path.cwd()))
|
|
689
|
+
else:
|
|
690
|
+
self.session_manager.start_session(project_path=str(Path.cwd()))
|
|
691
|
+
print(f"📝 Started new session: {self.session_manager.current_session.id}")
|
|
692
|
+
|
|
693
|
+
print("\nType /help for commands, or just describe what you want.\n")
|
|
694
|
+
|
|
695
|
+
# Set up prompt_toolkit with slash command completion
|
|
696
|
+
prompt_session = self._create_prompt_session()
|
|
697
|
+
|
|
698
|
+
while self.running:
|
|
699
|
+
try:
|
|
700
|
+
# Get user input with styled prompt and autocomplete
|
|
701
|
+
prompt = self._get_user_input(prompt_session)
|
|
702
|
+
|
|
703
|
+
if not prompt:
|
|
704
|
+
continue
|
|
705
|
+
|
|
706
|
+
# Normalize command (remove leading / for slash commands)
|
|
707
|
+
cmd = prompt[1:] if prompt.startswith('/') else prompt
|
|
708
|
+
cmd_lower = cmd.lower()
|
|
709
|
+
|
|
710
|
+
# Handle special commands
|
|
711
|
+
if cmd_lower in ["exit", "quit", "q"]:
|
|
712
|
+
if self.session_manager and self.session_manager.current_session:
|
|
713
|
+
self.session_manager.save_session(self.session_manager.current_session)
|
|
714
|
+
info(f"Session saved: {self.session_manager.current_session.id}")
|
|
715
|
+
success("Goodbye!")
|
|
716
|
+
break
|
|
717
|
+
|
|
718
|
+
if cmd_lower == "help":
|
|
719
|
+
self._print_help()
|
|
720
|
+
continue
|
|
721
|
+
|
|
722
|
+
if cmd_lower == "clear":
|
|
723
|
+
if self.llm:
|
|
724
|
+
self.llm.clear_history()
|
|
725
|
+
success("Conversation history cleared")
|
|
726
|
+
continue
|
|
727
|
+
|
|
728
|
+
if cmd_lower == "history":
|
|
729
|
+
self._show_history()
|
|
730
|
+
continue
|
|
731
|
+
|
|
732
|
+
if cmd_lower == "version":
|
|
733
|
+
self._print_version()
|
|
734
|
+
continue
|
|
735
|
+
|
|
736
|
+
if cmd_lower == "config":
|
|
737
|
+
self._show_config()
|
|
738
|
+
continue
|
|
739
|
+
|
|
740
|
+
if cmd_lower == "config raw":
|
|
741
|
+
self._show_config_raw()
|
|
742
|
+
continue
|
|
743
|
+
|
|
744
|
+
if cmd_lower.startswith("config "):
|
|
745
|
+
self._handle_config_command(cmd)
|
|
746
|
+
continue
|
|
747
|
+
|
|
748
|
+
# Session management commands
|
|
749
|
+
if cmd_lower == "sessions":
|
|
750
|
+
self._list_sessions()
|
|
751
|
+
continue
|
|
752
|
+
|
|
753
|
+
if cmd_lower == "save":
|
|
754
|
+
if self.session_manager and self.session_manager.current_session:
|
|
755
|
+
self.session_manager.save_session(self.session_manager.current_session)
|
|
756
|
+
success(f"Session saved: {self.session_manager.current_session.id}")
|
|
757
|
+
else:
|
|
758
|
+
warning("No active session to save")
|
|
759
|
+
continue
|
|
760
|
+
|
|
761
|
+
if cmd_lower.startswith("search "):
|
|
762
|
+
query = cmd[7:].strip()
|
|
763
|
+
self._search_code(query)
|
|
764
|
+
continue
|
|
765
|
+
|
|
766
|
+
if cmd_lower == "index":
|
|
767
|
+
self._index_project()
|
|
768
|
+
continue
|
|
769
|
+
|
|
770
|
+
# Plugin commands
|
|
771
|
+
if cmd_lower == "plugins":
|
|
772
|
+
self._list_plugins()
|
|
773
|
+
continue
|
|
774
|
+
|
|
775
|
+
if cmd_lower.startswith("git "):
|
|
776
|
+
self._run_plugin_action(f"git:{cmd[4:].strip()}")
|
|
777
|
+
continue
|
|
778
|
+
|
|
779
|
+
if cmd_lower.startswith("docker "):
|
|
780
|
+
self._run_plugin_action(f"docker:{cmd[7:].strip()}")
|
|
781
|
+
continue
|
|
782
|
+
|
|
783
|
+
# MCP commands
|
|
784
|
+
if cmd_lower == "mcp":
|
|
785
|
+
self._mcp_show_status()
|
|
786
|
+
continue
|
|
787
|
+
|
|
788
|
+
if cmd_lower == "mcp tools":
|
|
789
|
+
self._mcp_list_tools()
|
|
790
|
+
continue
|
|
791
|
+
|
|
792
|
+
if cmd_lower.startswith("mcp call "):
|
|
793
|
+
tool_spec = cmd[9:].strip()
|
|
794
|
+
self._mcp_call_tool_interactive(tool_spec)
|
|
795
|
+
continue
|
|
796
|
+
|
|
797
|
+
# Agent mode commands
|
|
798
|
+
if cmd_lower in ["agent", "agent on"]:
|
|
799
|
+
if HAS_AGENT:
|
|
800
|
+
self._agent_mode = True
|
|
801
|
+
success("Agent mode enabled. Autonomous tool execution active.")
|
|
802
|
+
else:
|
|
803
|
+
error("Agent module not available")
|
|
804
|
+
continue
|
|
805
|
+
|
|
806
|
+
if cmd_lower == "agent off":
|
|
807
|
+
self._agent_mode = False
|
|
808
|
+
info("Agent mode disabled. Using standard reasoning engine.")
|
|
809
|
+
continue
|
|
810
|
+
|
|
811
|
+
# Cognitive system commands
|
|
812
|
+
if cmd_lower in ["brain", "brain status"]:
|
|
813
|
+
self._brain_show_status()
|
|
814
|
+
continue
|
|
815
|
+
|
|
816
|
+
if cmd_lower == "brain suggest":
|
|
817
|
+
self._brain_show_suggestions()
|
|
818
|
+
continue
|
|
819
|
+
|
|
820
|
+
if cmd_lower == "brain index":
|
|
821
|
+
self._brain_index_project()
|
|
822
|
+
continue
|
|
823
|
+
|
|
824
|
+
if cmd_lower == "brain insights":
|
|
825
|
+
self._brain_show_insights()
|
|
826
|
+
continue
|
|
827
|
+
|
|
828
|
+
# Checkpoint commands
|
|
829
|
+
if cmd_lower in ["rewind", "undo"]:
|
|
830
|
+
self._rewind_checkpoint()
|
|
831
|
+
continue
|
|
832
|
+
|
|
833
|
+
if cmd_lower in ["forward", "redo"]:
|
|
834
|
+
self._forward_checkpoint()
|
|
835
|
+
continue
|
|
836
|
+
|
|
837
|
+
if cmd_lower == "checkpoints":
|
|
838
|
+
self._list_checkpoints()
|
|
839
|
+
continue
|
|
840
|
+
|
|
841
|
+
# Git autocommit commands
|
|
842
|
+
if cmd_lower == "autocommit":
|
|
843
|
+
self._toggle_autocommit()
|
|
844
|
+
continue
|
|
845
|
+
|
|
846
|
+
if cmd_lower == "autocommit on":
|
|
847
|
+
self._set_autocommit(True)
|
|
848
|
+
continue
|
|
849
|
+
|
|
850
|
+
if cmd_lower == "autocommit off":
|
|
851
|
+
self._set_autocommit(False)
|
|
852
|
+
continue
|
|
853
|
+
|
|
854
|
+
if cmd_lower == "agent tools":
|
|
855
|
+
self._show_agent_tools()
|
|
856
|
+
continue
|
|
857
|
+
|
|
858
|
+
if cmd_lower == "agent status":
|
|
859
|
+
self._show_agent_status()
|
|
860
|
+
continue
|
|
861
|
+
|
|
862
|
+
# Model Registry commands
|
|
863
|
+
if cmd_lower in ["models", "models list"]:
|
|
864
|
+
self._show_models()
|
|
865
|
+
continue
|
|
866
|
+
|
|
867
|
+
if cmd_lower == "models detect":
|
|
868
|
+
self._detect_models()
|
|
869
|
+
continue
|
|
870
|
+
|
|
871
|
+
if cmd_lower == "models recommend":
|
|
872
|
+
self._recommend_models()
|
|
873
|
+
continue
|
|
874
|
+
|
|
875
|
+
# Code action commands (these get passed to AI with context)
|
|
876
|
+
if cmd_lower.startswith("fix "):
|
|
877
|
+
file_path = cmd[4:].strip()
|
|
878
|
+
self._run_auto_fix(file_path)
|
|
879
|
+
continue
|
|
880
|
+
|
|
881
|
+
if cmd_lower.startswith("test "):
|
|
882
|
+
file_path = cmd[5:].strip()
|
|
883
|
+
self._run_test_generator(file_path)
|
|
884
|
+
continue
|
|
885
|
+
|
|
886
|
+
# Quick commands
|
|
887
|
+
if cmd_lower.startswith("run "):
|
|
888
|
+
shell_cmd = cmd[4:].strip()
|
|
889
|
+
self._quick_run_command(shell_cmd)
|
|
890
|
+
continue
|
|
891
|
+
|
|
892
|
+
if cmd_lower == "web":
|
|
893
|
+
self._run_web_dashboard()
|
|
894
|
+
continue
|
|
895
|
+
|
|
896
|
+
# Custom commands
|
|
897
|
+
if cmd_lower == "commands":
|
|
898
|
+
self._list_custom_commands()
|
|
899
|
+
continue
|
|
900
|
+
|
|
901
|
+
# Image input commands
|
|
902
|
+
if cmd_lower.startswith("image "):
|
|
903
|
+
image_path = cmd[6:].strip()
|
|
904
|
+
self._add_image(image_path)
|
|
905
|
+
continue
|
|
906
|
+
|
|
907
|
+
if cmd_lower == "screenshot":
|
|
908
|
+
self._capture_screenshot()
|
|
909
|
+
continue
|
|
910
|
+
|
|
911
|
+
if cmd_lower == "paste":
|
|
912
|
+
self._paste_clipboard_image()
|
|
913
|
+
continue
|
|
914
|
+
|
|
915
|
+
if cmd_lower == "images":
|
|
916
|
+
self._list_pending_images()
|
|
917
|
+
continue
|
|
918
|
+
|
|
919
|
+
if cmd_lower == "clear-images":
|
|
920
|
+
self._clear_pending_images()
|
|
921
|
+
continue
|
|
922
|
+
|
|
923
|
+
# Plan mode commands
|
|
924
|
+
if cmd_lower == "plan":
|
|
925
|
+
self._enter_plan_mode()
|
|
926
|
+
continue
|
|
927
|
+
|
|
928
|
+
if cmd_lower == "plan approve":
|
|
929
|
+
self._approve_plan()
|
|
930
|
+
continue
|
|
931
|
+
|
|
932
|
+
if cmd_lower == "plan reject":
|
|
933
|
+
self._reject_plan()
|
|
934
|
+
continue
|
|
935
|
+
|
|
936
|
+
if cmd_lower == "plan show":
|
|
937
|
+
self._show_plan()
|
|
938
|
+
continue
|
|
939
|
+
|
|
940
|
+
if cmd_lower == "plan exit":
|
|
941
|
+
self._exit_plan_mode()
|
|
942
|
+
continue
|
|
943
|
+
|
|
944
|
+
# GitHub/PR commands
|
|
945
|
+
if cmd_lower == "pr":
|
|
946
|
+
self._create_pr_interactive()
|
|
947
|
+
continue
|
|
948
|
+
|
|
949
|
+
if cmd_lower == "pr list":
|
|
950
|
+
self._list_prs()
|
|
951
|
+
continue
|
|
952
|
+
|
|
953
|
+
if cmd_lower.startswith("pr view "):
|
|
954
|
+
pr_num = cmd[8:].strip()
|
|
955
|
+
self._view_pr(pr_num)
|
|
956
|
+
continue
|
|
957
|
+
|
|
958
|
+
if cmd_lower == "issues":
|
|
959
|
+
self._list_issues()
|
|
960
|
+
continue
|
|
961
|
+
|
|
962
|
+
if cmd_lower.startswith("gh "):
|
|
963
|
+
gh_cmd = cmd[3:].strip()
|
|
964
|
+
self._run_gh_command(gh_cmd)
|
|
965
|
+
continue
|
|
966
|
+
|
|
967
|
+
# Linting commands
|
|
968
|
+
if cmd_lower == "lint":
|
|
969
|
+
self._run_lint()
|
|
970
|
+
continue
|
|
971
|
+
|
|
972
|
+
if cmd_lower == "lint linters":
|
|
973
|
+
self._list_linters()
|
|
974
|
+
continue
|
|
975
|
+
|
|
976
|
+
if cmd_lower.startswith("lint file "):
|
|
977
|
+
file_path = cmd[10:].strip()
|
|
978
|
+
self._lint_file(file_path)
|
|
979
|
+
continue
|
|
980
|
+
|
|
981
|
+
if cmd_lower == "lint fix":
|
|
982
|
+
self._lint_fix()
|
|
983
|
+
continue
|
|
984
|
+
|
|
985
|
+
if cmd_lower.startswith("lint fix "):
|
|
986
|
+
target = cmd[9:].strip()
|
|
987
|
+
self._lint_fix(target)
|
|
988
|
+
continue
|
|
989
|
+
|
|
990
|
+
if cmd_lower.startswith("lint "):
|
|
991
|
+
# Lint specific target
|
|
992
|
+
target = cmd[5:].strip()
|
|
993
|
+
self._run_lint(target=target)
|
|
994
|
+
continue
|
|
995
|
+
|
|
996
|
+
# Check if this is a custom command (try to execute it)
|
|
997
|
+
if HAS_CUSTOM_COMMANDS:
|
|
998
|
+
custom_content = execute_custom_command(cmd)
|
|
999
|
+
if custom_content:
|
|
1000
|
+
# Execute the custom command by passing its content as a prompt
|
|
1001
|
+
info(f"Running custom command: /{cmd}")
|
|
1002
|
+
self._process_request(custom_content)
|
|
1003
|
+
continue
|
|
1004
|
+
|
|
1005
|
+
# Process the request (either a plain prompt or unrecognized command)
|
|
1006
|
+
self._process_request(prompt)
|
|
1007
|
+
|
|
1008
|
+
except KeyboardInterrupt:
|
|
1009
|
+
warning("\nUse 'exit' to quit.")
|
|
1010
|
+
continue
|
|
1011
|
+
|
|
1012
|
+
except Exception as e:
|
|
1013
|
+
error(f"Error: {e}")
|
|
1014
|
+
if self.config.get("ui.verbose"):
|
|
1015
|
+
import traceback
|
|
1016
|
+
traceback.print_exc()
|
|
1017
|
+
|
|
1018
|
+
return 0
|
|
1019
|
+
|
|
1020
|
+
def _create_prompt_session(self):
|
|
1021
|
+
"""Create a prompt_toolkit session with slash command completion
|
|
1022
|
+
|
|
1023
|
+
Returns:
|
|
1024
|
+
PromptSession configured with completions and styling
|
|
1025
|
+
"""
|
|
1026
|
+
from prompt_toolkit import PromptSession
|
|
1027
|
+
from prompt_toolkit.history import InMemoryHistory
|
|
1028
|
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
|
1029
|
+
from prompt_toolkit.styles import Style
|
|
1030
|
+
|
|
1031
|
+
try:
|
|
1032
|
+
from .slash_commands import SlashCommandCompleter
|
|
1033
|
+
completer = SlashCommandCompleter()
|
|
1034
|
+
except ImportError:
|
|
1035
|
+
completer = None
|
|
1036
|
+
|
|
1037
|
+
# Custom style for completions
|
|
1038
|
+
style = Style.from_dict({
|
|
1039
|
+
'completion-menu.completion': 'bg:#333333 #ffffff',
|
|
1040
|
+
'completion-menu.completion.current': 'bg:#00aaaa #ffffff bold',
|
|
1041
|
+
'completion-menu.meta.completion': 'bg:#333333 #888888',
|
|
1042
|
+
'completion-menu.meta.completion.current': 'bg:#00aaaa #ffffff',
|
|
1043
|
+
'scrollbar.background': 'bg:#333333',
|
|
1044
|
+
'scrollbar.button': 'bg:#666666',
|
|
1045
|
+
})
|
|
1046
|
+
|
|
1047
|
+
return PromptSession(
|
|
1048
|
+
history=InMemoryHistory(),
|
|
1049
|
+
auto_suggest=AutoSuggestFromHistory(),
|
|
1050
|
+
completer=completer,
|
|
1051
|
+
complete_while_typing=True, # Show completions as you type
|
|
1052
|
+
style=style,
|
|
1053
|
+
multiline=False,
|
|
1054
|
+
)
|
|
1055
|
+
|
|
1056
|
+
def _get_user_input(self, session) -> str:
|
|
1057
|
+
"""Get user input using prompt_toolkit with completions
|
|
1058
|
+
|
|
1059
|
+
Args:
|
|
1060
|
+
session: PromptSession instance
|
|
1061
|
+
|
|
1062
|
+
Returns:
|
|
1063
|
+
User input string (stripped)
|
|
1064
|
+
"""
|
|
1065
|
+
from prompt_toolkit.formatted_text import HTML
|
|
1066
|
+
|
|
1067
|
+
# Create colored prompt
|
|
1068
|
+
prompt_text = HTML('<cyan>❯</cyan> <bold>nc1709></bold> ')
|
|
1069
|
+
|
|
1070
|
+
try:
|
|
1071
|
+
return session.prompt(prompt_text).strip()
|
|
1072
|
+
except EOFError:
|
|
1073
|
+
return "exit"
|
|
1074
|
+
except KeyboardInterrupt:
|
|
1075
|
+
return ""
|
|
1076
|
+
|
|
1077
|
+
def _quick_run_command(self, cmd: str) -> None:
|
|
1078
|
+
"""Quickly run a shell command and show output
|
|
1079
|
+
|
|
1080
|
+
Args:
|
|
1081
|
+
cmd: Shell command to run
|
|
1082
|
+
"""
|
|
1083
|
+
import subprocess
|
|
1084
|
+
|
|
1085
|
+
print(f"\n{Color.DIM}$ {cmd}{Color.RESET}\n")
|
|
1086
|
+
|
|
1087
|
+
try:
|
|
1088
|
+
result = subprocess.run(
|
|
1089
|
+
cmd,
|
|
1090
|
+
shell=True,
|
|
1091
|
+
capture_output=True,
|
|
1092
|
+
text=True,
|
|
1093
|
+
timeout=60
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
if result.stdout:
|
|
1097
|
+
print(result.stdout)
|
|
1098
|
+
if result.stderr:
|
|
1099
|
+
print(f"{Color.YELLOW}{result.stderr}{Color.RESET}")
|
|
1100
|
+
|
|
1101
|
+
if result.returncode != 0:
|
|
1102
|
+
warning(f"Command exited with code {result.returncode}")
|
|
1103
|
+
else:
|
|
1104
|
+
success("Command completed")
|
|
1105
|
+
|
|
1106
|
+
except subprocess.TimeoutExpired:
|
|
1107
|
+
error("Command timed out after 60 seconds")
|
|
1108
|
+
except Exception as e:
|
|
1109
|
+
error(f"Error running command: {e}")
|
|
1110
|
+
|
|
1111
|
+
def _run_command(self, prompt: str) -> int:
|
|
1112
|
+
"""Run a single command
|
|
1113
|
+
|
|
1114
|
+
Args:
|
|
1115
|
+
prompt: User's prompt
|
|
1116
|
+
|
|
1117
|
+
Returns:
|
|
1118
|
+
Exit code
|
|
1119
|
+
"""
|
|
1120
|
+
try:
|
|
1121
|
+
self._process_request(prompt)
|
|
1122
|
+
return 0
|
|
1123
|
+
except Exception as e:
|
|
1124
|
+
error(f"Error: {e}")
|
|
1125
|
+
return 1
|
|
1126
|
+
|
|
1127
|
+
def _process_request(self, prompt: str) -> None:
|
|
1128
|
+
"""Process a user request
|
|
1129
|
+
|
|
1130
|
+
Args:
|
|
1131
|
+
prompt: User's prompt
|
|
1132
|
+
"""
|
|
1133
|
+
if self.remote_client:
|
|
1134
|
+
# Remote mode with LOCAL tool execution
|
|
1135
|
+
# Server only provides LLM thinking, tools run on user's machine
|
|
1136
|
+
self._process_request_remote_agent(prompt)
|
|
1137
|
+
elif self._agent_mode and HAS_AGENT:
|
|
1138
|
+
# Agent mode - use the agent for tool execution
|
|
1139
|
+
self._process_request_agent(prompt)
|
|
1140
|
+
else:
|
|
1141
|
+
# Local mode - use cognitive system if available
|
|
1142
|
+
if self.cognitive_system and HAS_COGNITIVE:
|
|
1143
|
+
# Use the 5-layer cognitive architecture
|
|
1144
|
+
self._process_request_cognitive(prompt)
|
|
1145
|
+
else:
|
|
1146
|
+
# Fallback to standard reasoning engine
|
|
1147
|
+
# Classify the task
|
|
1148
|
+
task_type = TaskClassifier.classify(prompt)
|
|
1149
|
+
|
|
1150
|
+
# Get context
|
|
1151
|
+
context = {
|
|
1152
|
+
"cwd": str(Path.cwd()),
|
|
1153
|
+
"task_type": task_type.value
|
|
1154
|
+
}
|
|
1155
|
+
|
|
1156
|
+
# Use reasoning engine for complex requests
|
|
1157
|
+
response = self.reasoning_engine.process_request(prompt, context)
|
|
1158
|
+
|
|
1159
|
+
# Print response with text wrapping
|
|
1160
|
+
print_response(response)
|
|
1161
|
+
|
|
1162
|
+
def _process_request_cognitive(self, prompt: str) -> None:
|
|
1163
|
+
"""Process a user request using the 5-layer cognitive architecture
|
|
1164
|
+
|
|
1165
|
+
This is the core NC1709 differentiator:
|
|
1166
|
+
- Layer 1: Intelligent Router - routes to best model
|
|
1167
|
+
- Layer 2: Deep Context - provides codebase understanding
|
|
1168
|
+
- Layer 3: Multi-Agent Council - experts collaborate on complex tasks
|
|
1169
|
+
- Layer 4: Learning Core - learns from user patterns
|
|
1170
|
+
- Layer 5: Anticipation - predicts needs before asked
|
|
1171
|
+
|
|
1172
|
+
Args:
|
|
1173
|
+
prompt: User's prompt
|
|
1174
|
+
"""
|
|
1175
|
+
from .cli_ui import thinking, info
|
|
1176
|
+
|
|
1177
|
+
thinking("Processing with cognitive architecture...")
|
|
1178
|
+
|
|
1179
|
+
# Create cognitive request
|
|
1180
|
+
request = CognitiveRequest(
|
|
1181
|
+
prompt=prompt,
|
|
1182
|
+
context={"cwd": str(Path.cwd())},
|
|
1183
|
+
stream=False,
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
# Process through cognitive system
|
|
1187
|
+
response = self.cognitive_system.process(request)
|
|
1188
|
+
|
|
1189
|
+
# Print main response with text wrapping
|
|
1190
|
+
print_response(response.content)
|
|
1191
|
+
|
|
1192
|
+
# Show cognitive metadata (subtle)
|
|
1193
|
+
model_info = f"[{response.category}]"
|
|
1194
|
+
if response.council_used and response.council_agents:
|
|
1195
|
+
model_info += f" Council: {', '.join(response.council_agents)}"
|
|
1196
|
+
else:
|
|
1197
|
+
model_info += f" {response.model_used}"
|
|
1198
|
+
|
|
1199
|
+
if response.processing_time_ms:
|
|
1200
|
+
model_info += f" ({response.processing_time_ms}ms)"
|
|
1201
|
+
|
|
1202
|
+
info(model_info)
|
|
1203
|
+
|
|
1204
|
+
# Show proactive suggestions if any
|
|
1205
|
+
if response.suggestions:
|
|
1206
|
+
print()
|
|
1207
|
+
info("💡 Suggestions:")
|
|
1208
|
+
for suggestion in response.suggestions[:3]:
|
|
1209
|
+
confidence = suggestion.get('confidence', 0)
|
|
1210
|
+
icon = "🔥" if confidence > 0.8 else "💭" if confidence > 0.5 else "💡"
|
|
1211
|
+
print(f" {icon} {suggestion['title']}")
|
|
1212
|
+
if suggestion.get('description'):
|
|
1213
|
+
print(f" {suggestion['description'][:80]}...")
|
|
1214
|
+
|
|
1215
|
+
def _process_request_remote_agent(self, prompt: str) -> None:
|
|
1216
|
+
"""Process a user request using remote LLM but LOCAL tool execution
|
|
1217
|
+
|
|
1218
|
+
This is the correct architecture:
|
|
1219
|
+
- Server: Only runs LLM (thinking/reasoning)
|
|
1220
|
+
- Client: Executes all tools locally on user's machine
|
|
1221
|
+
|
|
1222
|
+
Args:
|
|
1223
|
+
prompt: User's prompt
|
|
1224
|
+
"""
|
|
1225
|
+
import json
|
|
1226
|
+
import re
|
|
1227
|
+
|
|
1228
|
+
# Initialize local tool registry for executing tools
|
|
1229
|
+
if not hasattr(self, '_local_registry') or self._local_registry is None:
|
|
1230
|
+
from .agent.tools.base import ToolRegistry
|
|
1231
|
+
from .agent.tools.file_tools import register_file_tools
|
|
1232
|
+
from .agent.tools.search_tools import register_search_tools
|
|
1233
|
+
from .agent.tools.bash_tool import register_bash_tools
|
|
1234
|
+
from .agent.tools.web_tools import register_web_tools
|
|
1235
|
+
|
|
1236
|
+
self._local_registry = ToolRegistry()
|
|
1237
|
+
register_file_tools(self._local_registry)
|
|
1238
|
+
register_search_tools(self._local_registry)
|
|
1239
|
+
register_bash_tools(self._local_registry)
|
|
1240
|
+
register_web_tools(self._local_registry)
|
|
1241
|
+
|
|
1242
|
+
# Get conversation history from session (if available)
|
|
1243
|
+
messages = []
|
|
1244
|
+
if self.session_manager and self.session_manager.current_session:
|
|
1245
|
+
# Load previous messages for context (last 20 messages)
|
|
1246
|
+
messages = self.session_manager.get_current_history(limit=20)
|
|
1247
|
+
|
|
1248
|
+
# Add current user prompt
|
|
1249
|
+
messages.append({"role": "user", "content": prompt})
|
|
1250
|
+
|
|
1251
|
+
# Save user message to session
|
|
1252
|
+
if self.session_manager:
|
|
1253
|
+
self.session_manager.add_message("user", prompt, auto_save=True)
|
|
1254
|
+
|
|
1255
|
+
max_iterations = 50
|
|
1256
|
+
iteration = 0
|
|
1257
|
+
tool_history = []
|
|
1258
|
+
final_response = ""
|
|
1259
|
+
|
|
1260
|
+
print() # Add spacing
|
|
1261
|
+
|
|
1262
|
+
while iteration < max_iterations:
|
|
1263
|
+
iteration += 1
|
|
1264
|
+
thinking(f"Thinking... (iteration {iteration})")
|
|
1265
|
+
|
|
1266
|
+
try:
|
|
1267
|
+
# Call remote server for LLM response (NO tool execution on server)
|
|
1268
|
+
result = self.remote_client.agent_chat(
|
|
1269
|
+
messages=messages,
|
|
1270
|
+
cwd=str(Path.cwd()),
|
|
1271
|
+
tools=list(self._local_registry.list_names())
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
response = result.get("response", "")
|
|
1275
|
+
|
|
1276
|
+
# Parse tool calls from response
|
|
1277
|
+
tool_calls = self._parse_tool_calls_from_response(response)
|
|
1278
|
+
|
|
1279
|
+
if not tool_calls:
|
|
1280
|
+
# No tool calls - LLM is done, show final response
|
|
1281
|
+
# Clean the response (remove any tool markers)
|
|
1282
|
+
clean_response = self._clean_response_text(response)
|
|
1283
|
+
print_response(clean_response)
|
|
1284
|
+
|
|
1285
|
+
# Save assistant response to session for memory
|
|
1286
|
+
if self.session_manager:
|
|
1287
|
+
self.session_manager.add_message("assistant", clean_response, auto_save=True)
|
|
1288
|
+
|
|
1289
|
+
# Show tool execution summary
|
|
1290
|
+
if tool_history:
|
|
1291
|
+
print(f"\n{Color.DIM}Tools executed: {len(tool_history)}{Color.RESET}")
|
|
1292
|
+
for entry in tool_history[-5:]:
|
|
1293
|
+
icon = Icons.SUCCESS if entry['success'] else Icons.FAILURE
|
|
1294
|
+
print(f" {icon} {entry['tool']}({entry['target']})")
|
|
1295
|
+
|
|
1296
|
+
# Flush any remaining files to index
|
|
1297
|
+
self._flush_index_queue()
|
|
1298
|
+
|
|
1299
|
+
return
|
|
1300
|
+
|
|
1301
|
+
# Execute tools LOCALLY
|
|
1302
|
+
all_results = []
|
|
1303
|
+
for tool_call in tool_calls:
|
|
1304
|
+
tool_name = tool_call["name"]
|
|
1305
|
+
tool_params = tool_call["parameters"]
|
|
1306
|
+
|
|
1307
|
+
tool = self._local_registry.get(tool_name)
|
|
1308
|
+
if not tool:
|
|
1309
|
+
result_text = f"Error: Unknown tool '{tool_name}'"
|
|
1310
|
+
all_results.append(f"[{tool_name}] {result_text}")
|
|
1311
|
+
tool_history.append({"tool": tool_name, "target": "?", "success": False})
|
|
1312
|
+
continue
|
|
1313
|
+
|
|
1314
|
+
# Get target for display
|
|
1315
|
+
target = tool._get_target(**tool_params) if hasattr(tool, '_get_target') else str(tool_params)[:30]
|
|
1316
|
+
|
|
1317
|
+
# Check if tool needs approval
|
|
1318
|
+
if self._local_registry.needs_approval(tool_name):
|
|
1319
|
+
print(f"\n{Color.YELLOW}Tool requires approval:{Color.RESET}")
|
|
1320
|
+
print(f" {Color.BOLD}{tool_name}{Color.RESET}({Color.CYAN}{target}{Color.RESET})")
|
|
1321
|
+
if tool_params:
|
|
1322
|
+
print(f" Parameters: {json.dumps(tool_params, indent=2)[:200]}")
|
|
1323
|
+
|
|
1324
|
+
approval = input(f"\n{Color.BOLD}Allow?{Color.RESET} [y/N/always]: ").strip().lower()
|
|
1325
|
+
if approval == "always":
|
|
1326
|
+
self._local_registry.approve_for_session(tool_name)
|
|
1327
|
+
elif approval not in ["y", "yes"]:
|
|
1328
|
+
result_text = "Tool execution denied by user"
|
|
1329
|
+
all_results.append(f"[{tool_name}] {result_text}")
|
|
1330
|
+
tool_history.append({"tool": tool_name, "target": target, "success": False})
|
|
1331
|
+
continue
|
|
1332
|
+
|
|
1333
|
+
# Execute tool locally
|
|
1334
|
+
info(f"Executing: {tool_name}({target})")
|
|
1335
|
+
|
|
1336
|
+
try:
|
|
1337
|
+
tool_result = tool.run(**tool_params)
|
|
1338
|
+
if tool_result.success:
|
|
1339
|
+
result_text = tool_result.output
|
|
1340
|
+
success(f"{tool_name} completed")
|
|
1341
|
+
tool_history.append({"tool": tool_name, "target": target, "success": True})
|
|
1342
|
+
|
|
1343
|
+
# Auto-index files when Read tool is used
|
|
1344
|
+
if tool_name == "Read" and hasattr(self, '_user_id'):
|
|
1345
|
+
self._queue_file_for_indexing(
|
|
1346
|
+
file_path=tool_params.get("file_path", target),
|
|
1347
|
+
content=result_text
|
|
1348
|
+
)
|
|
1349
|
+
else:
|
|
1350
|
+
result_text = f"Error: {tool_result.error}"
|
|
1351
|
+
warning(f"{tool_name} failed: {tool_result.error}")
|
|
1352
|
+
tool_history.append({"tool": tool_name, "target": target, "success": False})
|
|
1353
|
+
|
|
1354
|
+
all_results.append(f"[{tool_name}({target})] {result_text}")
|
|
1355
|
+
|
|
1356
|
+
except Exception as e:
|
|
1357
|
+
result_text = f"Exception: {str(e)}"
|
|
1358
|
+
error(f"{tool_name} error: {e}")
|
|
1359
|
+
all_results.append(f"[{tool_name}] {result_text}")
|
|
1360
|
+
tool_history.append({"tool": tool_name, "target": target, "success": False})
|
|
1361
|
+
|
|
1362
|
+
# Add assistant response and tool results to conversation
|
|
1363
|
+
messages.append({"role": "assistant", "content": response})
|
|
1364
|
+
messages.append({
|
|
1365
|
+
"role": "user",
|
|
1366
|
+
"content": f"Tool results:\n\n" + "\n\n".join(all_results) + "\n\nContinue with the task based on these results."
|
|
1367
|
+
})
|
|
1368
|
+
|
|
1369
|
+
except Exception as e:
|
|
1370
|
+
error(f"Remote request failed: {e}")
|
|
1371
|
+
if self.config.get("ui.verbose"):
|
|
1372
|
+
import traceback
|
|
1373
|
+
traceback.print_exc()
|
|
1374
|
+
return
|
|
1375
|
+
|
|
1376
|
+
warning(f"Reached maximum iterations ({max_iterations})")
|
|
1377
|
+
|
|
1378
|
+
def _parse_tool_calls_from_response(self, response: str) -> list:
|
|
1379
|
+
"""Parse tool calls from LLM response
|
|
1380
|
+
|
|
1381
|
+
Args:
|
|
1382
|
+
response: LLM response text
|
|
1383
|
+
|
|
1384
|
+
Returns:
|
|
1385
|
+
List of tool calls [{"name": ..., "parameters": {...}}, ...]
|
|
1386
|
+
"""
|
|
1387
|
+
import json
|
|
1388
|
+
import re
|
|
1389
|
+
|
|
1390
|
+
tool_calls = []
|
|
1391
|
+
|
|
1392
|
+
# Pattern 1: ```tool ... ``` blocks
|
|
1393
|
+
pattern = r"```tool\s*\n?(.*?)\n?```"
|
|
1394
|
+
matches = re.findall(pattern, response, re.DOTALL)
|
|
1395
|
+
|
|
1396
|
+
for match in matches:
|
|
1397
|
+
try:
|
|
1398
|
+
data = json.loads(match.strip())
|
|
1399
|
+
if "tool" in data:
|
|
1400
|
+
tool_calls.append({
|
|
1401
|
+
"name": data["tool"],
|
|
1402
|
+
"parameters": data.get("parameters", {})
|
|
1403
|
+
})
|
|
1404
|
+
except json.JSONDecodeError:
|
|
1405
|
+
continue
|
|
1406
|
+
|
|
1407
|
+
# Pattern 2: JSON objects with "tool" key
|
|
1408
|
+
json_pattern = r'\{[^{}]*"tool"\s*:\s*"[^"]+"\s*[^{}]*\}'
|
|
1409
|
+
json_matches = re.findall(json_pattern, response)
|
|
1410
|
+
|
|
1411
|
+
for match in json_matches:
|
|
1412
|
+
# Don't duplicate
|
|
1413
|
+
if any(match in tc.get("raw", "") for tc in tool_calls):
|
|
1414
|
+
continue
|
|
1415
|
+
try:
|
|
1416
|
+
data = json.loads(match)
|
|
1417
|
+
if "tool" in data:
|
|
1418
|
+
tool_calls.append({
|
|
1419
|
+
"name": data["tool"],
|
|
1420
|
+
"parameters": data.get("parameters", {}),
|
|
1421
|
+
"raw": match
|
|
1422
|
+
})
|
|
1423
|
+
except json.JSONDecodeError:
|
|
1424
|
+
continue
|
|
1425
|
+
|
|
1426
|
+
return tool_calls
|
|
1427
|
+
|
|
1428
|
+
def _clean_response_text(self, response: str) -> str:
|
|
1429
|
+
"""Remove tool call markers from final response
|
|
1430
|
+
|
|
1431
|
+
Args:
|
|
1432
|
+
response: Raw LLM response
|
|
1433
|
+
|
|
1434
|
+
Returns:
|
|
1435
|
+
Cleaned response text
|
|
1436
|
+
"""
|
|
1437
|
+
import re
|
|
1438
|
+
|
|
1439
|
+
# Remove tool blocks
|
|
1440
|
+
response = re.sub(r"```tool\s*\n?.*?\n?```", "", response, flags=re.DOTALL)
|
|
1441
|
+
# Remove JSON tool calls
|
|
1442
|
+
response = re.sub(r'\{[^{}]*"tool"\s*:\s*"[^"]+"\s*[^{}]*\}', "", response)
|
|
1443
|
+
return response.strip()
|
|
1444
|
+
|
|
1445
|
+
def _queue_file_for_indexing(self, file_path: str, content: str) -> None:
|
|
1446
|
+
"""Queue a file for indexing on the server
|
|
1447
|
+
|
|
1448
|
+
Args:
|
|
1449
|
+
file_path: Path to the file
|
|
1450
|
+
content: File content
|
|
1451
|
+
"""
|
|
1452
|
+
if not hasattr(self, '_files_to_index'):
|
|
1453
|
+
self._files_to_index = []
|
|
1454
|
+
if not hasattr(self, '_indexed_files'):
|
|
1455
|
+
self._indexed_files = set()
|
|
1456
|
+
|
|
1457
|
+
# Don't re-index same file in this session
|
|
1458
|
+
if file_path in self._indexed_files:
|
|
1459
|
+
return
|
|
1460
|
+
|
|
1461
|
+
# Determine language from extension
|
|
1462
|
+
ext_to_lang = {
|
|
1463
|
+
'.py': 'python', '.js': 'javascript', '.ts': 'typescript',
|
|
1464
|
+
'.jsx': 'javascript', '.tsx': 'typescript', '.go': 'go',
|
|
1465
|
+
'.rs': 'rust', '.java': 'java', '.c': 'c', '.cpp': 'cpp',
|
|
1466
|
+
'.h': 'c', '.hpp': 'cpp', '.rb': 'ruby', '.php': 'php',
|
|
1467
|
+
'.swift': 'swift', '.kt': 'kotlin', '.scala': 'scala',
|
|
1468
|
+
'.md': 'markdown', '.json': 'json', '.yaml': 'yaml',
|
|
1469
|
+
'.yml': 'yaml', '.toml': 'toml', '.html': 'html',
|
|
1470
|
+
'.css': 'css', '.sql': 'sql', '.sh': 'shell',
|
|
1471
|
+
}
|
|
1472
|
+
ext = Path(file_path).suffix.lower()
|
|
1473
|
+
language = ext_to_lang.get(ext, 'text')
|
|
1474
|
+
|
|
1475
|
+
self._files_to_index.append({
|
|
1476
|
+
"path": file_path,
|
|
1477
|
+
"content": content[:50000], # Limit content size
|
|
1478
|
+
"language": language
|
|
1479
|
+
})
|
|
1480
|
+
self._indexed_files.add(file_path)
|
|
1481
|
+
|
|
1482
|
+
# Batch index every 5 files
|
|
1483
|
+
if len(self._files_to_index) >= 5:
|
|
1484
|
+
self._flush_index_queue()
|
|
1485
|
+
|
|
1486
|
+
def _flush_index_queue(self) -> None:
|
|
1487
|
+
"""Send queued files to server for indexing"""
|
|
1488
|
+
if not hasattr(self, '_files_to_index') or not self._files_to_index:
|
|
1489
|
+
return
|
|
1490
|
+
|
|
1491
|
+
if not self.remote_client or not hasattr(self, '_user_id'):
|
|
1492
|
+
return
|
|
1493
|
+
|
|
1494
|
+
try:
|
|
1495
|
+
project_name = Path.cwd().name
|
|
1496
|
+
|
|
1497
|
+
result = self.remote_client.index_code(
|
|
1498
|
+
user_id=self._user_id,
|
|
1499
|
+
files=self._files_to_index,
|
|
1500
|
+
project_name=project_name
|
|
1501
|
+
)
|
|
1502
|
+
|
|
1503
|
+
# Clear queue on success
|
|
1504
|
+
self._files_to_index = []
|
|
1505
|
+
|
|
1506
|
+
except Exception as e:
|
|
1507
|
+
# Silently fail - indexing is best-effort
|
|
1508
|
+
pass
|
|
1509
|
+
|
|
1510
|
+
def _process_request_agent(self, prompt: str) -> None:
|
|
1511
|
+
"""Process a user request using the agent
|
|
1512
|
+
|
|
1513
|
+
Args:
|
|
1514
|
+
prompt: User's prompt
|
|
1515
|
+
"""
|
|
1516
|
+
if not self.agent:
|
|
1517
|
+
error("Agent not available. Check LLM configuration.")
|
|
1518
|
+
return
|
|
1519
|
+
|
|
1520
|
+
print() # Add spacing
|
|
1521
|
+
thinking("Processing with agent...")
|
|
1522
|
+
|
|
1523
|
+
try:
|
|
1524
|
+
response = self.agent.run(prompt)
|
|
1525
|
+
|
|
1526
|
+
# Show tool execution history
|
|
1527
|
+
tool_history = self.agent.get_tool_history()
|
|
1528
|
+
if tool_history:
|
|
1529
|
+
print(f"\n{Color.DIM}Tools executed: {len(tool_history)}{Color.RESET}")
|
|
1530
|
+
for entry in tool_history[-5:]: # Show last 5
|
|
1531
|
+
icon = Icons.SUCCESS if entry['success'] else Icons.FAILURE
|
|
1532
|
+
duration = f"{entry['duration_ms']:.0f}ms" if entry['duration_ms'] else ""
|
|
1533
|
+
print(f" {icon} {entry['tool']}({entry['target']}) {Color.DIM}{duration}{Color.RESET}")
|
|
1534
|
+
|
|
1535
|
+
# Print final response with text wrapping
|
|
1536
|
+
print_response(response)
|
|
1537
|
+
|
|
1538
|
+
except Exception as e:
|
|
1539
|
+
error(f"Agent error: {e}")
|
|
1540
|
+
if self.config.get("ui.verbose"):
|
|
1541
|
+
import traceback
|
|
1542
|
+
traceback.print_exc()
|
|
1543
|
+
|
|
1544
|
+
def _show_agent_tools(self) -> None:
|
|
1545
|
+
"""Show available agent tools"""
|
|
1546
|
+
if not HAS_AGENT:
|
|
1547
|
+
error("Agent module not available")
|
|
1548
|
+
return
|
|
1549
|
+
|
|
1550
|
+
if not self.agent:
|
|
1551
|
+
warning("Agent not initialized. Enable agent mode first with 'agent on'")
|
|
1552
|
+
return
|
|
1553
|
+
|
|
1554
|
+
print(f"\n{Color.BOLD}🔧 Agent Tools{Color.RESET}")
|
|
1555
|
+
print("=" * 60)
|
|
1556
|
+
|
|
1557
|
+
# Group tools by category
|
|
1558
|
+
tools_by_category = {}
|
|
1559
|
+
for tool_name in self.agent.registry.list_names():
|
|
1560
|
+
tool = self.agent.registry.get(tool_name)
|
|
1561
|
+
if tool:
|
|
1562
|
+
category = getattr(tool, 'category', 'other')
|
|
1563
|
+
if category not in tools_by_category:
|
|
1564
|
+
tools_by_category[category] = []
|
|
1565
|
+
tools_by_category[category].append(tool)
|
|
1566
|
+
|
|
1567
|
+
for category, tools in sorted(tools_by_category.items()):
|
|
1568
|
+
print(f"\n{Color.CYAN}{category.title()}{Color.RESET}")
|
|
1569
|
+
for tool in tools:
|
|
1570
|
+
perm = self.agent.registry.get_permission(tool.name)
|
|
1571
|
+
perm_icon = {
|
|
1572
|
+
"auto": f"{Color.GREEN}✓{Color.RESET}",
|
|
1573
|
+
"ask": f"{Color.YELLOW}?{Color.RESET}",
|
|
1574
|
+
"deny": f"{Color.RED}✗{Color.RESET}",
|
|
1575
|
+
}.get(perm.value, "?")
|
|
1576
|
+
print(f" {perm_icon} {Color.BOLD}{tool.name}{Color.RESET}")
|
|
1577
|
+
print(f" {Color.DIM}{tool.description[:60]}...{Color.RESET}" if len(tool.description) > 60 else f" {Color.DIM}{tool.description}{Color.RESET}")
|
|
1578
|
+
|
|
1579
|
+
print(f"\n{Color.DIM}Permission key: ✓=auto, ?=ask, ✗=deny{Color.RESET}")
|
|
1580
|
+
print(f"Total tools: {len(self.agent.registry.list_names())}")
|
|
1581
|
+
|
|
1582
|
+
def _show_agent_status(self) -> None:
|
|
1583
|
+
"""Show agent status and tool history"""
|
|
1584
|
+
if not HAS_AGENT:
|
|
1585
|
+
error("Agent module not available")
|
|
1586
|
+
return
|
|
1587
|
+
|
|
1588
|
+
print(f"\n{Color.BOLD}🤖 Agent Status{Color.RESET}")
|
|
1589
|
+
print("=" * 60)
|
|
1590
|
+
|
|
1591
|
+
# Mode status
|
|
1592
|
+
mode_status = f"{Color.GREEN}Enabled{Color.RESET}" if self._agent_mode else f"{Color.DIM}Disabled{Color.RESET}"
|
|
1593
|
+
print(f"\nAgent Mode: {mode_status}")
|
|
1594
|
+
|
|
1595
|
+
if self._agent:
|
|
1596
|
+
# Agent state
|
|
1597
|
+
print(f"State: {self._agent.state.value}")
|
|
1598
|
+
print(f"Iteration: {self._agent.iteration_count}")
|
|
1599
|
+
print(f"Registered Tools: {len(self._agent.registry.list_names())}")
|
|
1600
|
+
|
|
1601
|
+
# Tool history
|
|
1602
|
+
history = self._agent.get_tool_history()
|
|
1603
|
+
if history:
|
|
1604
|
+
print(f"\n{Color.BOLD}Recent Tool Executions:{Color.RESET}")
|
|
1605
|
+
for entry in history[-10:]:
|
|
1606
|
+
icon = Icons.SUCCESS if entry['success'] else Icons.FAILURE
|
|
1607
|
+
duration = f"{entry['duration_ms']:.0f}ms" if entry['duration_ms'] else ""
|
|
1608
|
+
print(f" {icon} {entry['tool']}({entry['target']}) {Color.DIM}{duration}{Color.RESET}")
|
|
1609
|
+
else:
|
|
1610
|
+
print(f"\n{Color.DIM}No tool executions yet{Color.RESET}")
|
|
1611
|
+
else:
|
|
1612
|
+
print(f"\n{Color.DIM}Agent not initialized. Use 'agent on' to enable.{Color.RESET}")
|
|
1613
|
+
|
|
1614
|
+
def _show_models(self) -> None:
|
|
1615
|
+
"""Show Model Registry status and available models"""
|
|
1616
|
+
try:
|
|
1617
|
+
from .models import (
|
|
1618
|
+
get_all_models,
|
|
1619
|
+
get_model_spec,
|
|
1620
|
+
ModelCapability,
|
|
1621
|
+
ModelManager,
|
|
1622
|
+
)
|
|
1623
|
+
except ImportError:
|
|
1624
|
+
error("Model Registry not available. Install nc1709 with all features.")
|
|
1625
|
+
return
|
|
1626
|
+
|
|
1627
|
+
print(f"\n{Color.BOLD}Model Registry{Color.RESET}")
|
|
1628
|
+
print("=" * 70)
|
|
1629
|
+
|
|
1630
|
+
# Get all known models
|
|
1631
|
+
all_models = get_all_models()
|
|
1632
|
+
print(f"\n{Color.GREEN}Known Models:{Color.RESET} {len(all_models)}")
|
|
1633
|
+
|
|
1634
|
+
# Group by capability
|
|
1635
|
+
capability_groups = {}
|
|
1636
|
+
for model_name in all_models:
|
|
1637
|
+
spec = get_model_spec(model_name)
|
|
1638
|
+
if spec:
|
|
1639
|
+
for cap in spec.capabilities:
|
|
1640
|
+
if cap.value not in capability_groups:
|
|
1641
|
+
capability_groups[cap.value] = []
|
|
1642
|
+
capability_groups[cap.value].append(spec)
|
|
1643
|
+
|
|
1644
|
+
# Print models by capability
|
|
1645
|
+
cap_icons = {
|
|
1646
|
+
"code_generation": " ",
|
|
1647
|
+
"reasoning": " ",
|
|
1648
|
+
"fast_inference": " ",
|
|
1649
|
+
"long_context": " ",
|
|
1650
|
+
"instruction_following": " ",
|
|
1651
|
+
"math": " ",
|
|
1652
|
+
"vision": " ",
|
|
1653
|
+
"function_calling": " ",
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
for cap_name in ["code_generation", "reasoning", "fast_inference", "long_context"]:
|
|
1657
|
+
if cap_name in capability_groups:
|
|
1658
|
+
models = capability_groups[cap_name]
|
|
1659
|
+
icon = cap_icons.get(cap_name, " ")
|
|
1660
|
+
print(f"\n{Color.CYAN}{icon} {cap_name.replace('_', ' ').title()}{Color.RESET}")
|
|
1661
|
+
seen = set()
|
|
1662
|
+
for spec in models:
|
|
1663
|
+
if spec.ollama_name not in seen:
|
|
1664
|
+
seen.add(spec.ollama_name)
|
|
1665
|
+
ctx = f"{spec.context_window // 1024}K" if spec.context_window else "?"
|
|
1666
|
+
print(f" {Color.BOLD}{spec.ollama_name:<30}{Color.RESET} {Color.DIM}ctx:{ctx}{Color.RESET}")
|
|
1667
|
+
|
|
1668
|
+
# Show tiering configuration
|
|
1669
|
+
try:
|
|
1670
|
+
from .performance.tiering import DEFAULT_TIERS, ModelTier
|
|
1671
|
+
print(f"\n{Color.BOLD}Model Tiering Configuration:{Color.RESET}")
|
|
1672
|
+
for tier, config in DEFAULT_TIERS.items():
|
|
1673
|
+
if tier != ModelTier.COUNCIL:
|
|
1674
|
+
tier_name = tier.value.replace("_", " ").title()
|
|
1675
|
+
model_name = config.model.replace("ollama/", "")
|
|
1676
|
+
print(f" {tier_name:<12} -> {Color.GREEN}{model_name}{Color.RESET}")
|
|
1677
|
+
except ImportError:
|
|
1678
|
+
pass
|
|
1679
|
+
|
|
1680
|
+
print(f"\n{Color.DIM}Use '/models detect' to auto-detect from Ollama{Color.RESET}")
|
|
1681
|
+
print(f"{Color.DIM}Use '/models recommend' to get task recommendations{Color.RESET}")
|
|
1682
|
+
|
|
1683
|
+
def _detect_models(self) -> None:
|
|
1684
|
+
"""Auto-detect models from Ollama"""
|
|
1685
|
+
try:
|
|
1686
|
+
from .models import ModelDetector
|
|
1687
|
+
import asyncio
|
|
1688
|
+
except ImportError:
|
|
1689
|
+
error("Model Registry not available")
|
|
1690
|
+
return
|
|
1691
|
+
|
|
1692
|
+
print(f"\n{Color.BOLD}Detecting Models from Ollama...{Color.RESET}")
|
|
1693
|
+
|
|
1694
|
+
detector = ModelDetector()
|
|
1695
|
+
try:
|
|
1696
|
+
loop = asyncio.get_event_loop()
|
|
1697
|
+
except RuntimeError:
|
|
1698
|
+
loop = asyncio.new_event_loop()
|
|
1699
|
+
asyncio.set_event_loop(loop)
|
|
1700
|
+
|
|
1701
|
+
try:
|
|
1702
|
+
models = loop.run_until_complete(detector.list_available_models())
|
|
1703
|
+
if models:
|
|
1704
|
+
print(f"\n{Color.GREEN}Found {len(models)} models:{Color.RESET}\n")
|
|
1705
|
+
for model_name in models:
|
|
1706
|
+
spec = loop.run_until_complete(detector.get_model_spec(model_name))
|
|
1707
|
+
if spec:
|
|
1708
|
+
caps = ", ".join(c.value.replace("_", " ") for c in list(spec.capabilities)[:3])
|
|
1709
|
+
ctx = f"{spec.context_window // 1024}K" if spec.context_window else "?"
|
|
1710
|
+
print(f" {Color.BOLD}{spec.ollama_name:<30}{Color.RESET}")
|
|
1711
|
+
print(f" {Color.DIM}Capabilities: {caps}{Color.RESET}")
|
|
1712
|
+
print(f" {Color.DIM}Context: {ctx}, Format: {spec.prompt_format.value}{Color.RESET}")
|
|
1713
|
+
else:
|
|
1714
|
+
warning("No models found. Is Ollama running?")
|
|
1715
|
+
except Exception as e:
|
|
1716
|
+
error(f"Failed to detect models: {e}")
|
|
1717
|
+
|
|
1718
|
+
def _recommend_models(self) -> None:
|
|
1719
|
+
"""Get model recommendations for different tasks"""
|
|
1720
|
+
try:
|
|
1721
|
+
from .models import ModelManager
|
|
1722
|
+
except ImportError:
|
|
1723
|
+
error("Model Registry not available")
|
|
1724
|
+
return
|
|
1725
|
+
|
|
1726
|
+
print(f"\n{Color.BOLD}Model Recommendations{Color.RESET}")
|
|
1727
|
+
print("=" * 60)
|
|
1728
|
+
|
|
1729
|
+
manager = ModelManager()
|
|
1730
|
+
tasks = ["coding", "general", "reasoning", "fast"]
|
|
1731
|
+
|
|
1732
|
+
for task in tasks:
|
|
1733
|
+
spec = manager.recommend_model(task)
|
|
1734
|
+
if spec:
|
|
1735
|
+
print(f"\n{Color.CYAN}{task.title()}{Color.RESET}")
|
|
1736
|
+
print(f" Recommended: {Color.GREEN}{spec.ollama_name}{Color.RESET}")
|
|
1737
|
+
score = spec.suitability.get(task, 0)
|
|
1738
|
+
print(f" Suitability: {score:.0%}")
|
|
1739
|
+
settings = manager.get_recommended_settings(spec.ollama_name, task)
|
|
1740
|
+
print(f" Temperature: {settings.get('temperature', 0.7)}")
|
|
1741
|
+
|
|
1742
|
+
print(f"\n{Color.DIM}Use these models via tiering or set model explicitly{Color.RESET}")
|
|
1743
|
+
|
|
1744
|
+
def _print_auth_screen(self) -> None:
|
|
1745
|
+
"""Print compact auth screen with API key instructions"""
|
|
1746
|
+
from . import __version__
|
|
1747
|
+
|
|
1748
|
+
# Colors that work on both light and dark terminals
|
|
1749
|
+
C = '\033[36m' # Cyan (works on both)
|
|
1750
|
+
B = '\033[1m' # Bold
|
|
1751
|
+
G = '\033[32m' # Green
|
|
1752
|
+
Y = '\033[33m' # Yellow
|
|
1753
|
+
M = '\033[35m' # Magenta
|
|
1754
|
+
R = '\033[0m' # Reset
|
|
1755
|
+
|
|
1756
|
+
print(f'''
|
|
1757
|
+
{B}{C} ███╗ ██╗ ██████╗ ██╗███████╗ ██████╗ █████╗ {R}
|
|
1758
|
+
{B}{C} ████╗ ██║██╔════╝███║╚════██║██╔═████╗██╔══██╗{R}
|
|
1759
|
+
{B}{C} ██╔██╗ ██║██║ ╚██║ ██╔╝██║██╔██║╚██████║{R}
|
|
1760
|
+
{B}{C} ██║╚██╗██║██║ ██║ ██╔╝ ████╔╝██║ ╚═══██║{R}
|
|
1761
|
+
{B}{C} ██║ ╚████║╚██████╗ ██║ ██║ ╚██████╔╝ █████╔╝{R}
|
|
1762
|
+
{B}{C} ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚════╝ {R}
|
|
1763
|
+
{B}Bring your code to life{R} v{__version__}
|
|
1764
|
+
|
|
1765
|
+
{C}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{R}
|
|
1766
|
+
|
|
1767
|
+
{B}Your AI coding partner that reads, writes, and
|
|
1768
|
+
runs code directly on your machine.{R}
|
|
1769
|
+
|
|
1770
|
+
{B}{G}✓{R}{B} Generate & edit code {G}✓{R}{B} Run shell commands{R}
|
|
1771
|
+
{B}{G}✓{R}{B} Debug & fix errors {G}✓{R}{B} Manage files & projects{R}
|
|
1772
|
+
|
|
1773
|
+
{B}{C}Languages{R} Python · JavaScript · TypeScript · Go · Rust
|
|
1774
|
+
{B}{C}Frameworks{R} React · Node.js · FastAPI · Django · Express
|
|
1775
|
+
{B}{C}DevOps{R} Docker · Kubernetes · Kafka · Redis · Nginx
|
|
1776
|
+
{B}{C}Workflows{R} CI/CD · Git · Testing · Deployment · APIs
|
|
1777
|
+
|
|
1778
|
+
{C}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{R}
|
|
1779
|
+
|
|
1780
|
+
{B}{Y}🔐 API Key Required{R}
|
|
1781
|
+
|
|
1782
|
+
{B}{C}→{R} nc1709 --api-key "your-api-key"
|
|
1783
|
+
{B}{C}→{R} export NC1709_API_KEY="your-api-key"
|
|
1784
|
+
|
|
1785
|
+
{C}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{R}
|
|
1786
|
+
{B}{M}📧 Need a key? support@lafzusa.com{R}
|
|
1787
|
+
{C}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{R}
|
|
1788
|
+
''')
|
|
1789
|
+
|
|
1790
|
+
def _print_startup_banner(self) -> None:
|
|
1791
|
+
"""Print startup banner (kept for compatibility)"""
|
|
1792
|
+
from . import __version__
|
|
1793
|
+
|
|
1794
|
+
C = '\033[96m' # Bright Cyan
|
|
1795
|
+
W = '\033[97m' # Bright White
|
|
1796
|
+
D = '\033[2m' # Dim
|
|
1797
|
+
R = '\033[0m' # Reset
|
|
1798
|
+
|
|
1799
|
+
print(f'''
|
|
1800
|
+
{C} NC1709{R}
|
|
1801
|
+
{W} Bring your code to life{R}
|
|
1802
|
+
{D} v{__version__}{R}
|
|
1803
|
+
|
|
1804
|
+
{D} Your AI coding partner — reads, writes, and
|
|
1805
|
+
runs code directly on your machine.{R}
|
|
1806
|
+
''')
|
|
1807
|
+
|
|
1808
|
+
def _print_banner(self) -> None:
|
|
1809
|
+
"""Print welcome banner after successful authentication"""
|
|
1810
|
+
from . import __version__
|
|
1811
|
+
|
|
1812
|
+
# Colors that work on both light and dark terminals
|
|
1813
|
+
C = '\033[36m' # Cyan
|
|
1814
|
+
B = '\033[1m' # Bold
|
|
1815
|
+
G = '\033[32m' # Green
|
|
1816
|
+
Y = '\033[33m' # Yellow
|
|
1817
|
+
D = '\033[2m' # Dim
|
|
1818
|
+
R = '\033[0m' # Reset
|
|
1819
|
+
|
|
1820
|
+
print(f'''
|
|
1821
|
+
{B}{C} ███╗ ██╗ ██████╗ ██╗███████╗ ██████╗ █████╗ {R} {G}✓{R} Generate & edit code
|
|
1822
|
+
{B}{C} ████╗ ██║██╔════╝███║╚════██║██╔═████╗██╔══██╗{R} {G}✓{R} Run shell commands
|
|
1823
|
+
{B}{C} ██╔██╗ ██║██║ ╚██║ ██╔╝██║██╔██║╚██████║{R} {G}✓{R} Debug & fix errors
|
|
1824
|
+
{B}{C} ██║╚██╗██║██║ ██║ ██╔╝ ████╔╝██║ ╚═══██║{R} {G}✓{R} Multi-agent system
|
|
1825
|
+
{B}{C} ██║ ╚████║╚██████╗ ██║ ██║ ╚██████╔╝ █████╔╝{R} {G}✓{R} Learns your style
|
|
1826
|
+
{B}{C} ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚════╝ {R}
|
|
1827
|
+
|
|
1828
|
+
{B}Bring your code to life{R} {D}v{__version__}{R} {G}●{R} {B}Connected{R}
|
|
1829
|
+
|
|
1830
|
+
{D}Python · JS · TS · Go · Rust │ React · FastAPI · Docker · Git{R}
|
|
1831
|
+
|
|
1832
|
+
{C}›{R} Describe what you want {C}›{R} Type {Y}/help{R} for commands
|
|
1833
|
+
''')
|
|
1834
|
+
# Check for updates (non-blocking, cached)
|
|
1835
|
+
try:
|
|
1836
|
+
from .version_check import check_and_notify
|
|
1837
|
+
update_msg = check_and_notify()
|
|
1838
|
+
if update_msg:
|
|
1839
|
+
print(f"{Y}{update_msg}{R}")
|
|
1840
|
+
except Exception:
|
|
1841
|
+
pass # Never fail startup due to version check
|
|
1842
|
+
|
|
1843
|
+
def _print_help(self) -> None:
|
|
1844
|
+
"""Print help message with slash commands"""
|
|
1845
|
+
try:
|
|
1846
|
+
from .slash_commands import format_help_text
|
|
1847
|
+
print(format_help_text())
|
|
1848
|
+
except ImportError:
|
|
1849
|
+
# Fallback if slash_commands not available
|
|
1850
|
+
print(self._get_fallback_help())
|
|
1851
|
+
|
|
1852
|
+
# Additional tips
|
|
1853
|
+
print("""
|
|
1854
|
+
\033[1mUsage Tips:\033[0m
|
|
1855
|
+
|
|
1856
|
+
• Type / to see all commands with autocomplete
|
|
1857
|
+
• Press Tab to complete commands
|
|
1858
|
+
• Just describe what you want in plain English
|
|
1859
|
+
• The AI will ask before making changes
|
|
1860
|
+
|
|
1861
|
+
\033[36mExamples:\033[0m
|
|
1862
|
+
|
|
1863
|
+
• "Read main.py and explain what it does"
|
|
1864
|
+
• "Create a Python script to fetch data from an API"
|
|
1865
|
+
• /git status or /docker ps
|
|
1866
|
+
• /search authentication logic
|
|
1867
|
+
""")
|
|
1868
|
+
|
|
1869
|
+
def _get_fallback_help(self) -> str:
|
|
1870
|
+
"""Get fallback help text if slash_commands module not available"""
|
|
1871
|
+
return """
|
|
1872
|
+
\033[1mAvailable Commands:\033[0m
|
|
1873
|
+
|
|
1874
|
+
/help Show this help message
|
|
1875
|
+
/exit Exit NC1709
|
|
1876
|
+
/clear Clear conversation history
|
|
1877
|
+
/history Show command history
|
|
1878
|
+
/config View/modify configuration
|
|
1879
|
+
|
|
1880
|
+
\033[36mAgent Mode:\033[0m
|
|
1881
|
+
/agent on Enable agent mode
|
|
1882
|
+
/agent off Disable agent mode
|
|
1883
|
+
/agent tools List available tools
|
|
1884
|
+
/agent status Show agent status
|
|
1885
|
+
|
|
1886
|
+
\033[36mGit:\033[0m
|
|
1887
|
+
/git status Show git status
|
|
1888
|
+
/git diff Show changes
|
|
1889
|
+
/git log Show commit history
|
|
1890
|
+
|
|
1891
|
+
\033[36mDocker:\033[0m
|
|
1892
|
+
/docker ps List containers
|
|
1893
|
+
/docker logs View logs
|
|
1894
|
+
|
|
1895
|
+
\033[36mSearch:\033[0m
|
|
1896
|
+
/search <q> Search code
|
|
1897
|
+
/index Index project
|
|
1898
|
+
|
|
1899
|
+
\033[36mCognitive:\033[0m
|
|
1900
|
+
/brain status Show cognitive system status
|
|
1901
|
+
/brain suggest Get proactive suggestions
|
|
1902
|
+
/brain index Index project for context awareness
|
|
1903
|
+
"""
|
|
1904
|
+
|
|
1905
|
+
def _show_history(self) -> None:
|
|
1906
|
+
"""Show command execution history"""
|
|
1907
|
+
history = self.executor.get_execution_history(limit=20)
|
|
1908
|
+
|
|
1909
|
+
if not history:
|
|
1910
|
+
print("No command history yet.")
|
|
1911
|
+
return
|
|
1912
|
+
|
|
1913
|
+
print("\nRecent Command History:")
|
|
1914
|
+
print("="*60)
|
|
1915
|
+
|
|
1916
|
+
for i, entry in enumerate(history, 1):
|
|
1917
|
+
status = "✅" if entry["success"] else "❌"
|
|
1918
|
+
print(f"{i}. {status} {entry['command']}")
|
|
1919
|
+
print(f" Time: {entry['timestamp']}")
|
|
1920
|
+
print(f" Exit code: {entry['return_code']}")
|
|
1921
|
+
print()
|
|
1922
|
+
|
|
1923
|
+
def _rewind_checkpoint(self) -> None:
|
|
1924
|
+
"""Rewind to previous checkpoint (undo last file change)"""
|
|
1925
|
+
if not HAS_CHECKPOINTS:
|
|
1926
|
+
warning("Checkpoints module not available")
|
|
1927
|
+
return
|
|
1928
|
+
|
|
1929
|
+
manager = get_checkpoint_manager()
|
|
1930
|
+
|
|
1931
|
+
if not manager.can_rewind():
|
|
1932
|
+
warning("No checkpoints to rewind to")
|
|
1933
|
+
return
|
|
1934
|
+
|
|
1935
|
+
checkpoint = manager.rewind()
|
|
1936
|
+
if checkpoint:
|
|
1937
|
+
files = list(checkpoint.files.keys())
|
|
1938
|
+
file_names = [Path(f).name for f in files]
|
|
1939
|
+
success(f"Rewound to checkpoint: {checkpoint.description}")
|
|
1940
|
+
info(f"Restored {len(files)} file(s): {', '.join(file_names)}")
|
|
1941
|
+
else:
|
|
1942
|
+
error("Failed to rewind")
|
|
1943
|
+
|
|
1944
|
+
def _forward_checkpoint(self) -> None:
|
|
1945
|
+
"""Go forward in checkpoint history (redo after rewind)"""
|
|
1946
|
+
if not HAS_CHECKPOINTS:
|
|
1947
|
+
warning("Checkpoints module not available")
|
|
1948
|
+
return
|
|
1949
|
+
|
|
1950
|
+
manager = get_checkpoint_manager()
|
|
1951
|
+
|
|
1952
|
+
if not manager.can_forward():
|
|
1953
|
+
warning("Already at latest checkpoint")
|
|
1954
|
+
return
|
|
1955
|
+
|
|
1956
|
+
checkpoint = manager.forward()
|
|
1957
|
+
if checkpoint:
|
|
1958
|
+
files = list(checkpoint.files.keys())
|
|
1959
|
+
file_names = [Path(f).name for f in files]
|
|
1960
|
+
success(f"Forward to checkpoint: {checkpoint.description}")
|
|
1961
|
+
info(f"Restored {len(files)} file(s): {', '.join(file_names)}")
|
|
1962
|
+
else:
|
|
1963
|
+
error("Failed to go forward")
|
|
1964
|
+
|
|
1965
|
+
def _list_checkpoints(self) -> None:
|
|
1966
|
+
"""List recent checkpoints"""
|
|
1967
|
+
if not HAS_CHECKPOINTS:
|
|
1968
|
+
warning("Checkpoints module not available")
|
|
1969
|
+
return
|
|
1970
|
+
|
|
1971
|
+
manager = get_checkpoint_manager()
|
|
1972
|
+
checkpoints = manager.list_checkpoints(limit=15)
|
|
1973
|
+
|
|
1974
|
+
if not checkpoints:
|
|
1975
|
+
info("No checkpoints yet. Checkpoints are created automatically before file edits.")
|
|
1976
|
+
return
|
|
1977
|
+
|
|
1978
|
+
B = Color.BOLD
|
|
1979
|
+
C = Color.CYAN
|
|
1980
|
+
G = Color.GREEN
|
|
1981
|
+
D = Color.DIM
|
|
1982
|
+
R = Color.RESET
|
|
1983
|
+
|
|
1984
|
+
print(f"\n{B}Recent Checkpoints{R}")
|
|
1985
|
+
print("=" * 60)
|
|
1986
|
+
|
|
1987
|
+
for cp in checkpoints:
|
|
1988
|
+
current = f" {G}<-- current{R}" if cp["is_current"] else ""
|
|
1989
|
+
files = [Path(f).name for f in cp["files"]]
|
|
1990
|
+
file_list = ", ".join(files[:3])
|
|
1991
|
+
if len(files) > 3:
|
|
1992
|
+
file_list += f", +{len(files) - 3} more"
|
|
1993
|
+
|
|
1994
|
+
print(f"{C}[{cp['index']}]{R} {cp['description']}{current}")
|
|
1995
|
+
print(f" {D}Tool: {cp['tool']} | Files: {file_list}{R}")
|
|
1996
|
+
print(f" {D}{cp['timestamp'][:19]}{R}")
|
|
1997
|
+
print()
|
|
1998
|
+
|
|
1999
|
+
print(f"{D}Use /rewind to undo, /forward to redo{R}\n")
|
|
2000
|
+
|
|
2001
|
+
def _toggle_autocommit(self) -> None:
|
|
2002
|
+
"""Toggle automatic git commits"""
|
|
2003
|
+
if not HAS_GIT_INTEGRATION:
|
|
2004
|
+
warning("Git integration module not available")
|
|
2005
|
+
return
|
|
2006
|
+
|
|
2007
|
+
git = get_git_integration()
|
|
2008
|
+
|
|
2009
|
+
if not git.is_repo:
|
|
2010
|
+
warning("Not in a git repository")
|
|
2011
|
+
return
|
|
2012
|
+
|
|
2013
|
+
git.auto_commit = not git.auto_commit
|
|
2014
|
+
status = "enabled" if git.auto_commit else "disabled"
|
|
2015
|
+
success(f"Auto-commit {status}")
|
|
2016
|
+
|
|
2017
|
+
def _set_autocommit(self, enabled: bool) -> None:
|
|
2018
|
+
"""Set automatic git commits on/off"""
|
|
2019
|
+
if not HAS_GIT_INTEGRATION:
|
|
2020
|
+
warning("Git integration module not available")
|
|
2021
|
+
return
|
|
2022
|
+
|
|
2023
|
+
git = get_git_integration()
|
|
2024
|
+
|
|
2025
|
+
if not git.is_repo:
|
|
2026
|
+
warning("Not in a git repository")
|
|
2027
|
+
return
|
|
2028
|
+
|
|
2029
|
+
git.auto_commit = enabled
|
|
2030
|
+
status = "enabled" if enabled else "disabled"
|
|
2031
|
+
success(f"Auto-commit {status}")
|
|
2032
|
+
|
|
2033
|
+
def _list_custom_commands(self) -> None:
|
|
2034
|
+
"""List available custom slash commands"""
|
|
2035
|
+
if not HAS_CUSTOM_COMMANDS:
|
|
2036
|
+
warning("Custom commands module not available")
|
|
2037
|
+
return
|
|
2038
|
+
|
|
2039
|
+
manager = get_custom_command_manager()
|
|
2040
|
+
commands = manager.list_commands()
|
|
2041
|
+
|
|
2042
|
+
if not commands:
|
|
2043
|
+
print("\nNo custom commands found.\n")
|
|
2044
|
+
print("Create custom commands in:")
|
|
2045
|
+
print(" ~/.nc1709/commands/*.md (personal commands)")
|
|
2046
|
+
print(" .nc1709/commands/*.md (project commands)\n")
|
|
2047
|
+
print("Example command file (fix-bug.md):")
|
|
2048
|
+
print(" # Fix a bug in the codebase")
|
|
2049
|
+
print(" ")
|
|
2050
|
+
print(" Look at the error message provided.")
|
|
2051
|
+
print(" Find the relevant code files.")
|
|
2052
|
+
print(" Analyze the root cause and implement a fix.\n")
|
|
2053
|
+
return
|
|
2054
|
+
|
|
2055
|
+
# Group by scope
|
|
2056
|
+
personal_cmds = [c for c in commands if c.scope == "personal"]
|
|
2057
|
+
project_cmds = [c for c in commands if c.scope == "project"]
|
|
2058
|
+
|
|
2059
|
+
print("\n\033[1mCustom Slash Commands\033[0m\n")
|
|
2060
|
+
|
|
2061
|
+
if personal_cmds:
|
|
2062
|
+
print("\033[36mPersonal Commands (~/.nc1709/commands/):\033[0m")
|
|
2063
|
+
for cmd in personal_cmds:
|
|
2064
|
+
print(f" \033[1m/{cmd.name:<20}\033[0m {cmd.description}")
|
|
2065
|
+
print()
|
|
2066
|
+
|
|
2067
|
+
if project_cmds:
|
|
2068
|
+
print("\033[36mProject Commands (.nc1709/commands/):\033[0m")
|
|
2069
|
+
for cmd in project_cmds:
|
|
2070
|
+
# Remove project: prefix for display
|
|
2071
|
+
display_name = cmd.name.replace("project:", "")
|
|
2072
|
+
print(f" \033[1m/{cmd.name:<20}\033[0m {cmd.description}")
|
|
2073
|
+
print()
|
|
2074
|
+
|
|
2075
|
+
print("\033[90mTip: Type / and press Tab to autocomplete custom commands\033[0m\n")
|
|
2076
|
+
|
|
2077
|
+
def _add_image(self, image_path: str) -> None:
|
|
2078
|
+
"""Add an image file for the next prompt"""
|
|
2079
|
+
if not HAS_IMAGE_INPUT:
|
|
2080
|
+
warning("Image input module not available")
|
|
2081
|
+
return
|
|
2082
|
+
|
|
2083
|
+
handler = get_image_handler()
|
|
2084
|
+
if handler.add_image(image_path):
|
|
2085
|
+
img_info = get_image_info(image_path)
|
|
2086
|
+
if img_info:
|
|
2087
|
+
dims = ""
|
|
2088
|
+
if img_info.get("width") and img_info.get("height"):
|
|
2089
|
+
dims = f" ({img_info['width']}x{img_info['height']})"
|
|
2090
|
+
success(f"Added image: {img_info['name']}{dims} - {img_info['size_human']}")
|
|
2091
|
+
else:
|
|
2092
|
+
success(f"Added image: {image_path}")
|
|
2093
|
+
print("\033[90mImage will be included in your next prompt\033[0m")
|
|
2094
|
+
else:
|
|
2095
|
+
error(f"Failed to load image: {image_path}")
|
|
2096
|
+
print("Supported formats: PNG, JPG, GIF, WebP, BMP")
|
|
2097
|
+
|
|
2098
|
+
def _capture_screenshot(self) -> None:
|
|
2099
|
+
"""Capture a screenshot for the next prompt"""
|
|
2100
|
+
if not HAS_IMAGE_INPUT:
|
|
2101
|
+
warning("Image input module not available")
|
|
2102
|
+
return
|
|
2103
|
+
|
|
2104
|
+
import platform
|
|
2105
|
+
if platform.system() != 'Darwin':
|
|
2106
|
+
warning("Screenshot capture is only available on macOS")
|
|
2107
|
+
return
|
|
2108
|
+
|
|
2109
|
+
info("Capture a screenshot... (drag to select area, ESC to cancel)")
|
|
2110
|
+
|
|
2111
|
+
handler = get_image_handler()
|
|
2112
|
+
if handler.add_screenshot():
|
|
2113
|
+
success("Screenshot captured and added")
|
|
2114
|
+
print("\033[90mScreenshot will be included in your next prompt\033[0m")
|
|
2115
|
+
else:
|
|
2116
|
+
warning("Screenshot cancelled or failed")
|
|
2117
|
+
|
|
2118
|
+
def _paste_clipboard_image(self) -> None:
|
|
2119
|
+
"""Paste image from clipboard for the next prompt"""
|
|
2120
|
+
if not HAS_IMAGE_INPUT:
|
|
2121
|
+
warning("Image input module not available")
|
|
2122
|
+
return
|
|
2123
|
+
|
|
2124
|
+
handler = get_image_handler()
|
|
2125
|
+
if handler.add_clipboard():
|
|
2126
|
+
success("Image pasted from clipboard")
|
|
2127
|
+
print("\033[90mImage will be included in your next prompt\033[0m")
|
|
2128
|
+
else:
|
|
2129
|
+
warning("No image found in clipboard or paste failed")
|
|
2130
|
+
print("Tip: Copy an image to clipboard first, or install pngpaste (brew install pngpaste)")
|
|
2131
|
+
|
|
2132
|
+
def _list_pending_images(self) -> None:
|
|
2133
|
+
"""List pending images for next prompt"""
|
|
2134
|
+
if not HAS_IMAGE_INPUT:
|
|
2135
|
+
warning("Image input module not available")
|
|
2136
|
+
return
|
|
2137
|
+
|
|
2138
|
+
handler = get_image_handler()
|
|
2139
|
+
images = handler.get_pending_images()
|
|
2140
|
+
|
|
2141
|
+
if not images:
|
|
2142
|
+
print("\nNo pending images.")
|
|
2143
|
+
print("\nAdd images with:")
|
|
2144
|
+
print(" /image <path> - Add an image file")
|
|
2145
|
+
print(" /screenshot - Capture a screenshot")
|
|
2146
|
+
print(" /paste - Paste from clipboard\n")
|
|
2147
|
+
return
|
|
2148
|
+
|
|
2149
|
+
print(f"\n\033[1mPending Images ({len(images)})\033[0m\n")
|
|
2150
|
+
|
|
2151
|
+
for i, img in enumerate(images, 1):
|
|
2152
|
+
from pathlib import Path
|
|
2153
|
+
name = Path(img.path).name
|
|
2154
|
+
dims = ""
|
|
2155
|
+
if img.width and img.height:
|
|
2156
|
+
dims = f" ({img.width}x{img.height})"
|
|
2157
|
+
size = f"{img.size_bytes / 1024:.1f} KB"
|
|
2158
|
+
print(f" {i}. {name}{dims} - {size}")
|
|
2159
|
+
|
|
2160
|
+
print(f"\n\033[90mThese images will be included in your next prompt\033[0m")
|
|
2161
|
+
print(f"\033[90mUse /clear-images to remove them\033[0m\n")
|
|
2162
|
+
|
|
2163
|
+
def _clear_pending_images(self) -> None:
|
|
2164
|
+
"""Clear pending images"""
|
|
2165
|
+
if not HAS_IMAGE_INPUT:
|
|
2166
|
+
warning("Image input module not available")
|
|
2167
|
+
return
|
|
2168
|
+
|
|
2169
|
+
handler = get_image_handler()
|
|
2170
|
+
count = len(handler.get_pending_images())
|
|
2171
|
+
handler.clear_pending()
|
|
2172
|
+
success(f"Cleared {count} pending image(s)")
|
|
2173
|
+
|
|
2174
|
+
def _enter_plan_mode(self) -> None:
|
|
2175
|
+
"""Enter plan mode"""
|
|
2176
|
+
if not HAS_PLAN_MODE:
|
|
2177
|
+
warning("Plan mode module not available")
|
|
2178
|
+
return
|
|
2179
|
+
|
|
2180
|
+
manager = get_plan_manager()
|
|
2181
|
+
manager.enter_plan_mode()
|
|
2182
|
+
|
|
2183
|
+
print("\n\033[1;36mPlan Mode Activated\033[0m\n")
|
|
2184
|
+
print("In plan mode, the AI will:")
|
|
2185
|
+
print(" 1. Analyze your request thoroughly")
|
|
2186
|
+
print(" 2. Create a step-by-step plan")
|
|
2187
|
+
print(" 3. Identify affected files and risks")
|
|
2188
|
+
print(" 4. Wait for your approval before making changes\n")
|
|
2189
|
+
print("Commands:")
|
|
2190
|
+
print(" /plan approve - Approve and execute the plan")
|
|
2191
|
+
print(" /plan reject - Reject the current plan")
|
|
2192
|
+
print(" /plan show - Show the current plan")
|
|
2193
|
+
print(" /plan exit - Exit plan mode\n")
|
|
2194
|
+
print("\033[90mDescribe what you want to accomplish...\033[0m\n")
|
|
2195
|
+
|
|
2196
|
+
def _approve_plan(self) -> None:
|
|
2197
|
+
"""Approve and execute the current plan"""
|
|
2198
|
+
if not HAS_PLAN_MODE:
|
|
2199
|
+
warning("Plan mode module not available")
|
|
2200
|
+
return
|
|
2201
|
+
|
|
2202
|
+
manager = get_plan_manager()
|
|
2203
|
+
|
|
2204
|
+
if not manager.current_plan:
|
|
2205
|
+
warning("No plan to approve. Create a plan first.")
|
|
2206
|
+
return
|
|
2207
|
+
|
|
2208
|
+
if manager.approve_plan():
|
|
2209
|
+
success("Plan approved!")
|
|
2210
|
+
manager.start_execution()
|
|
2211
|
+
|
|
2212
|
+
# Get the execution prompt and pass to AI
|
|
2213
|
+
execution_prompt = manager.get_execution_prompt()
|
|
2214
|
+
if execution_prompt:
|
|
2215
|
+
info("Executing plan...")
|
|
2216
|
+
self._process_request(execution_prompt)
|
|
2217
|
+
|
|
2218
|
+
manager.complete_plan(success=True)
|
|
2219
|
+
else:
|
|
2220
|
+
error("Failed to approve plan")
|
|
2221
|
+
|
|
2222
|
+
def _reject_plan(self) -> None:
|
|
2223
|
+
"""Reject the current plan"""
|
|
2224
|
+
if not HAS_PLAN_MODE:
|
|
2225
|
+
warning("Plan mode module not available")
|
|
2226
|
+
return
|
|
2227
|
+
|
|
2228
|
+
manager = get_plan_manager()
|
|
2229
|
+
|
|
2230
|
+
if not manager.current_plan:
|
|
2231
|
+
warning("No plan to reject")
|
|
2232
|
+
return
|
|
2233
|
+
|
|
2234
|
+
manager.reject_plan()
|
|
2235
|
+
info("Plan rejected. Describe a new approach or use /plan exit to leave plan mode.")
|
|
2236
|
+
|
|
2237
|
+
def _show_plan(self) -> None:
|
|
2238
|
+
"""Show the current plan"""
|
|
2239
|
+
if not HAS_PLAN_MODE:
|
|
2240
|
+
warning("Plan mode module not available")
|
|
2241
|
+
return
|
|
2242
|
+
|
|
2243
|
+
manager = get_plan_manager()
|
|
2244
|
+
summary = manager.get_plan_summary()
|
|
2245
|
+
|
|
2246
|
+
if summary:
|
|
2247
|
+
print(summary)
|
|
2248
|
+
else:
|
|
2249
|
+
print("\nNo current plan.")
|
|
2250
|
+
print("Describe what you want to accomplish to generate a plan.\n")
|
|
2251
|
+
|
|
2252
|
+
def _exit_plan_mode(self) -> None:
|
|
2253
|
+
"""Exit plan mode"""
|
|
2254
|
+
if not HAS_PLAN_MODE:
|
|
2255
|
+
warning("Plan mode module not available")
|
|
2256
|
+
return
|
|
2257
|
+
|
|
2258
|
+
manager = get_plan_manager()
|
|
2259
|
+
manager.exit_plan_mode()
|
|
2260
|
+
info("Exited plan mode")
|
|
2261
|
+
|
|
2262
|
+
def _create_pr_interactive(self) -> None:
|
|
2263
|
+
"""Create a PR interactively"""
|
|
2264
|
+
if not HAS_GITHUB:
|
|
2265
|
+
warning("GitHub integration module not available")
|
|
2266
|
+
return
|
|
2267
|
+
|
|
2268
|
+
gh = get_github_integration()
|
|
2269
|
+
|
|
2270
|
+
if not gh.is_available:
|
|
2271
|
+
warning("GitHub CLI (gh) not found. Install it with: brew install gh")
|
|
2272
|
+
return
|
|
2273
|
+
|
|
2274
|
+
if not gh.is_authenticated:
|
|
2275
|
+
warning("Not authenticated with GitHub. Run: gh auth login")
|
|
2276
|
+
return
|
|
2277
|
+
|
|
2278
|
+
branch = gh.get_current_branch()
|
|
2279
|
+
if not branch:
|
|
2280
|
+
error("Not in a git repository or no branch found")
|
|
2281
|
+
return
|
|
2282
|
+
|
|
2283
|
+
if branch in ["main", "master"]:
|
|
2284
|
+
warning(f"You're on the {branch} branch. Create a feature branch first.")
|
|
2285
|
+
return
|
|
2286
|
+
|
|
2287
|
+
print(f"\n\033[1mCreate Pull Request\033[0m")
|
|
2288
|
+
print(f"Branch: {branch}\n")
|
|
2289
|
+
|
|
2290
|
+
# Prompt for PR details
|
|
2291
|
+
try:
|
|
2292
|
+
title = input("\033[36mPR Title: \033[0m").strip()
|
|
2293
|
+
if not title:
|
|
2294
|
+
warning("PR title is required")
|
|
2295
|
+
return
|
|
2296
|
+
|
|
2297
|
+
print("\033[36mPR Description (end with empty line):\033[0m")
|
|
2298
|
+
body_lines = []
|
|
2299
|
+
while True:
|
|
2300
|
+
line = input()
|
|
2301
|
+
if not line:
|
|
2302
|
+
break
|
|
2303
|
+
body_lines.append(line)
|
|
2304
|
+
body = "\n".join(body_lines)
|
|
2305
|
+
|
|
2306
|
+
# Push branch if needed
|
|
2307
|
+
info("Pushing branch to remote...")
|
|
2308
|
+
if not gh.push_branch():
|
|
2309
|
+
warning("Failed to push branch. You may need to push manually.")
|
|
2310
|
+
|
|
2311
|
+
info("Creating PR...")
|
|
2312
|
+
pr = gh.create_pr(title=title, body=body)
|
|
2313
|
+
|
|
2314
|
+
if pr:
|
|
2315
|
+
success(f"PR created: #{pr.number}")
|
|
2316
|
+
print(f"URL: {pr.url}")
|
|
2317
|
+
else:
|
|
2318
|
+
error("Failed to create PR")
|
|
2319
|
+
|
|
2320
|
+
except KeyboardInterrupt:
|
|
2321
|
+
print()
|
|
2322
|
+
warning("Cancelled")
|
|
2323
|
+
|
|
2324
|
+
def _list_prs(self) -> None:
|
|
2325
|
+
"""List open PRs"""
|
|
2326
|
+
if not HAS_GITHUB:
|
|
2327
|
+
warning("GitHub integration module not available")
|
|
2328
|
+
return
|
|
2329
|
+
|
|
2330
|
+
gh = get_github_integration()
|
|
2331
|
+
|
|
2332
|
+
if not gh.is_available:
|
|
2333
|
+
warning("GitHub CLI (gh) not found")
|
|
2334
|
+
return
|
|
2335
|
+
|
|
2336
|
+
prs = gh.list_prs(state="open")
|
|
2337
|
+
|
|
2338
|
+
if not prs:
|
|
2339
|
+
print("\nNo open pull requests.\n")
|
|
2340
|
+
return
|
|
2341
|
+
|
|
2342
|
+
print(f"\n\033[1mOpen Pull Requests ({len(prs)})\033[0m\n")
|
|
2343
|
+
|
|
2344
|
+
for pr in prs:
|
|
2345
|
+
print(format_pr_summary(pr))
|
|
2346
|
+
print()
|
|
2347
|
+
|
|
2348
|
+
def _view_pr(self, pr_num: str) -> None:
|
|
2349
|
+
"""View a specific PR"""
|
|
2350
|
+
if not HAS_GITHUB:
|
|
2351
|
+
warning("GitHub integration module not available")
|
|
2352
|
+
return
|
|
2353
|
+
|
|
2354
|
+
gh = get_github_integration()
|
|
2355
|
+
|
|
2356
|
+
if not gh.is_available:
|
|
2357
|
+
warning("GitHub CLI (gh) not found")
|
|
2358
|
+
return
|
|
2359
|
+
|
|
2360
|
+
try:
|
|
2361
|
+
number = int(pr_num)
|
|
2362
|
+
except ValueError:
|
|
2363
|
+
error(f"Invalid PR number: {pr_num}")
|
|
2364
|
+
return
|
|
2365
|
+
|
|
2366
|
+
pr = gh.get_pr(number)
|
|
2367
|
+
|
|
2368
|
+
if pr:
|
|
2369
|
+
print()
|
|
2370
|
+
print(format_pr_summary(pr))
|
|
2371
|
+
if pr.body:
|
|
2372
|
+
print(f"\n\033[90m{pr.body[:500]}{'...' if len(pr.body) > 500 else ''}\033[0m")
|
|
2373
|
+
print()
|
|
2374
|
+
else:
|
|
2375
|
+
error(f"PR #{number} not found")
|
|
2376
|
+
|
|
2377
|
+
def _list_issues(self) -> None:
|
|
2378
|
+
"""List open issues"""
|
|
2379
|
+
if not HAS_GITHUB:
|
|
2380
|
+
warning("GitHub integration module not available")
|
|
2381
|
+
return
|
|
2382
|
+
|
|
2383
|
+
gh = get_github_integration()
|
|
2384
|
+
|
|
2385
|
+
if not gh.is_available:
|
|
2386
|
+
warning("GitHub CLI (gh) not found")
|
|
2387
|
+
return
|
|
2388
|
+
|
|
2389
|
+
issues = gh.list_issues(state="open")
|
|
2390
|
+
|
|
2391
|
+
if not issues:
|
|
2392
|
+
print("\nNo open issues.\n")
|
|
2393
|
+
return
|
|
2394
|
+
|
|
2395
|
+
print(f"\n\033[1mOpen Issues ({len(issues)})\033[0m\n")
|
|
2396
|
+
|
|
2397
|
+
for issue in issues:
|
|
2398
|
+
print(format_issue_summary(issue))
|
|
2399
|
+
print()
|
|
2400
|
+
|
|
2401
|
+
def _run_gh_command(self, args: str) -> None:
|
|
2402
|
+
"""Run a gh CLI command"""
|
|
2403
|
+
if not HAS_GITHUB:
|
|
2404
|
+
warning("GitHub integration module not available")
|
|
2405
|
+
return
|
|
2406
|
+
|
|
2407
|
+
import subprocess
|
|
2408
|
+
try:
|
|
2409
|
+
result = subprocess.run(
|
|
2410
|
+
["gh"] + args.split(),
|
|
2411
|
+
capture_output=False,
|
|
2412
|
+
text=True,
|
|
2413
|
+
timeout=60
|
|
2414
|
+
)
|
|
2415
|
+
except subprocess.TimeoutExpired:
|
|
2416
|
+
error("Command timed out")
|
|
2417
|
+
except FileNotFoundError:
|
|
2418
|
+
warning("GitHub CLI (gh) not found. Install it with: brew install gh")
|
|
2419
|
+
except Exception as e:
|
|
2420
|
+
error(f"Error running gh command: {e}")
|
|
2421
|
+
|
|
2422
|
+
# ==================== Linting Commands ====================
|
|
2423
|
+
|
|
2424
|
+
def _run_lint(self, target: str = None, fix: bool = False) -> None:
|
|
2425
|
+
"""Run linter on project or specific target"""
|
|
2426
|
+
if not HAS_LINTING:
|
|
2427
|
+
warning("Linting module not available")
|
|
2428
|
+
return
|
|
2429
|
+
|
|
2430
|
+
manager = get_linting_manager()
|
|
2431
|
+
|
|
2432
|
+
if not manager.available_linters:
|
|
2433
|
+
warning("No linters detected. Install ruff, eslint, or other linters.")
|
|
2434
|
+
return
|
|
2435
|
+
|
|
2436
|
+
if target:
|
|
2437
|
+
# Lint specific file
|
|
2438
|
+
linter = manager.get_linter_for_file(target)
|
|
2439
|
+
if not linter:
|
|
2440
|
+
warning(f"No suitable linter found for: {target}")
|
|
2441
|
+
return
|
|
2442
|
+
|
|
2443
|
+
info(f"Running {linter} on {target}...")
|
|
2444
|
+
result = manager.run_linter(linter, target, fix=fix)
|
|
2445
|
+
print(format_lint_result(result))
|
|
2446
|
+
else:
|
|
2447
|
+
# Lint entire project
|
|
2448
|
+
info(f"Running linters on project ({', '.join(manager.available_linters)})...")
|
|
2449
|
+
results = manager.lint_project(fix=fix)
|
|
2450
|
+
|
|
2451
|
+
total_errors = 0
|
|
2452
|
+
total_warnings = 0
|
|
2453
|
+
|
|
2454
|
+
for linter_name, result in results.items():
|
|
2455
|
+
print(format_lint_result(result))
|
|
2456
|
+
total_errors += result.error_count
|
|
2457
|
+
total_warnings += result.warning_count
|
|
2458
|
+
print()
|
|
2459
|
+
|
|
2460
|
+
if total_errors == 0 and total_warnings == 0:
|
|
2461
|
+
success("All linters passed with no issues!")
|
|
2462
|
+
else:
|
|
2463
|
+
info(f"Total: {total_errors} errors, {total_warnings} warnings")
|
|
2464
|
+
|
|
2465
|
+
def _lint_file(self, file_path: str) -> None:
|
|
2466
|
+
"""Lint a specific file"""
|
|
2467
|
+
self._run_lint(target=file_path)
|
|
2468
|
+
|
|
2469
|
+
def _lint_fix(self, target: str = None) -> None:
|
|
2470
|
+
"""Run linter with auto-fix enabled"""
|
|
2471
|
+
self._run_lint(target=target, fix=True)
|
|
2472
|
+
|
|
2473
|
+
def _list_linters(self) -> None:
|
|
2474
|
+
"""List available linters"""
|
|
2475
|
+
if not HAS_LINTING:
|
|
2476
|
+
warning("Linting module not available")
|
|
2477
|
+
return
|
|
2478
|
+
|
|
2479
|
+
manager = get_linting_manager()
|
|
2480
|
+
|
|
2481
|
+
print("\n\033[1mAvailable Linters\033[0m\n")
|
|
2482
|
+
|
|
2483
|
+
if not manager.available_linters:
|
|
2484
|
+
warning("No linters detected.")
|
|
2485
|
+
print("\nSupported linters (install any):")
|
|
2486
|
+
print(" • ruff - Fast Python linter")
|
|
2487
|
+
print(" • flake8 - Python style checker")
|
|
2488
|
+
print(" • pylint - Python code analyzer")
|
|
2489
|
+
print(" • mypy - Python type checker")
|
|
2490
|
+
print(" • eslint - JavaScript/TypeScript linter")
|
|
2491
|
+
print(" • tsc - TypeScript compiler")
|
|
2492
|
+
print(" • golangci-lint - Go linter")
|
|
2493
|
+
print(" • cargo clippy - Rust linter")
|
|
2494
|
+
return
|
|
2495
|
+
|
|
2496
|
+
for linter in manager.available_linters:
|
|
2497
|
+
from .linting import LINTERS
|
|
2498
|
+
config = LINTERS.get(linter)
|
|
2499
|
+
if config:
|
|
2500
|
+
patterns = ", ".join(config.file_patterns)
|
|
2501
|
+
print(f" \033[32m✓\033[0m \033[1m{linter}\033[0m ({patterns})")
|
|
2502
|
+
|
|
2503
|
+
print(f"\nTotal: {len(manager.available_linters)} linters available")
|
|
2504
|
+
|
|
2505
|
+
def _handle_config_command(self, prompt: str) -> None:
|
|
2506
|
+
"""Handle config commands
|
|
2507
|
+
|
|
2508
|
+
Args:
|
|
2509
|
+
prompt: Config command
|
|
2510
|
+
"""
|
|
2511
|
+
parts = prompt.split(maxsplit=2)
|
|
2512
|
+
|
|
2513
|
+
if len(parts) < 2:
|
|
2514
|
+
self._show_config()
|
|
2515
|
+
return
|
|
2516
|
+
|
|
2517
|
+
if parts[1] == "get":
|
|
2518
|
+
if len(parts) < 3:
|
|
2519
|
+
print("Usage: config get <key>")
|
|
2520
|
+
return
|
|
2521
|
+
key = parts[2]
|
|
2522
|
+
value = self.config.get(key)
|
|
2523
|
+
print(f"{key} = {value}")
|
|
2524
|
+
|
|
2525
|
+
elif parts[1] == "set":
|
|
2526
|
+
if len(parts) < 3:
|
|
2527
|
+
print("Usage: config set <key> <value>")
|
|
2528
|
+
return
|
|
2529
|
+
# Parse key=value
|
|
2530
|
+
if "=" not in parts[2]:
|
|
2531
|
+
print("Usage: config set <key>=<value>")
|
|
2532
|
+
return
|
|
2533
|
+
key, value = parts[2].split("=", 1)
|
|
2534
|
+
self.config.set(key.strip(), value.strip())
|
|
2535
|
+
print(f"✅ Set {key} = {value}")
|
|
2536
|
+
|
|
2537
|
+
else:
|
|
2538
|
+
print("Unknown config command. Use 'config get <key>' or 'config set <key>=<value>'")
|
|
2539
|
+
|
|
2540
|
+
def _list_sessions(self) -> int:
|
|
2541
|
+
"""List saved sessions
|
|
2542
|
+
|
|
2543
|
+
Returns:
|
|
2544
|
+
Exit code
|
|
2545
|
+
"""
|
|
2546
|
+
if not self.session_manager:
|
|
2547
|
+
print("⚠️ Session management not available")
|
|
2548
|
+
return 1
|
|
2549
|
+
|
|
2550
|
+
sessions = self.session_manager.list_sessions(limit=20)
|
|
2551
|
+
|
|
2552
|
+
if not sessions:
|
|
2553
|
+
print("No saved sessions found.")
|
|
2554
|
+
return 0
|
|
2555
|
+
|
|
2556
|
+
print("\n📚 Saved Sessions:")
|
|
2557
|
+
print("=" * 70)
|
|
2558
|
+
|
|
2559
|
+
for session in sessions:
|
|
2560
|
+
msg_count = session.get("message_count", 0)
|
|
2561
|
+
project = session.get("project_path", "N/A")
|
|
2562
|
+
if project and len(project) > 30:
|
|
2563
|
+
project = "..." + project[-27:]
|
|
2564
|
+
|
|
2565
|
+
print(f" ID: {session['id']}")
|
|
2566
|
+
print(f" Name: {session['name']}")
|
|
2567
|
+
print(f" Messages: {msg_count}")
|
|
2568
|
+
print(f" Updated: {session.get('updated_at', 'N/A')[:19]}")
|
|
2569
|
+
print(f" Project: {project}")
|
|
2570
|
+
print("-" * 70)
|
|
2571
|
+
|
|
2572
|
+
print(f"\nTo resume a session: nc1709 --resume <session_id>")
|
|
2573
|
+
return 0
|
|
2574
|
+
|
|
2575
|
+
def _index_project(self) -> int:
|
|
2576
|
+
"""Index the current project for semantic search
|
|
2577
|
+
|
|
2578
|
+
Returns:
|
|
2579
|
+
Exit code
|
|
2580
|
+
"""
|
|
2581
|
+
try:
|
|
2582
|
+
from .memory.indexer import ProjectIndexer
|
|
2583
|
+
|
|
2584
|
+
print(f"🔍 Indexing project: {Path.cwd()}")
|
|
2585
|
+
print("This may take a few minutes for large projects...\n")
|
|
2586
|
+
|
|
2587
|
+
indexer = ProjectIndexer(str(Path.cwd()))
|
|
2588
|
+
stats = indexer.index_project(show_progress=True)
|
|
2589
|
+
|
|
2590
|
+
print(f"\n✅ Indexing complete!")
|
|
2591
|
+
print(f" Files indexed: {stats['files_indexed']}")
|
|
2592
|
+
print(f" Total chunks: {stats['chunks_created']}")
|
|
2593
|
+
|
|
2594
|
+
if stats['errors']:
|
|
2595
|
+
print(f" Errors: {len(stats['errors'])}")
|
|
2596
|
+
|
|
2597
|
+
return 0
|
|
2598
|
+
|
|
2599
|
+
except ImportError:
|
|
2600
|
+
print("⚠️ Memory module dependencies not installed.")
|
|
2601
|
+
print(" Install with: pip install chromadb sentence-transformers")
|
|
2602
|
+
return 1
|
|
2603
|
+
except Exception as e:
|
|
2604
|
+
print(f"❌ Error indexing project: {e}")
|
|
2605
|
+
return 1
|
|
2606
|
+
|
|
2607
|
+
def _search_code(self, query: str) -> int:
|
|
2608
|
+
"""Search indexed code
|
|
2609
|
+
|
|
2610
|
+
Args:
|
|
2611
|
+
query: Search query
|
|
2612
|
+
|
|
2613
|
+
Returns:
|
|
2614
|
+
Exit code
|
|
2615
|
+
"""
|
|
2616
|
+
if not query:
|
|
2617
|
+
print("Usage: search <query>")
|
|
2618
|
+
return 1
|
|
2619
|
+
|
|
2620
|
+
try:
|
|
2621
|
+
from .memory.indexer import ProjectIndexer
|
|
2622
|
+
|
|
2623
|
+
indexer = ProjectIndexer(str(Path.cwd()))
|
|
2624
|
+
|
|
2625
|
+
# Check if project is indexed
|
|
2626
|
+
summary = indexer.get_project_summary()
|
|
2627
|
+
if summary['total_files'] == 0:
|
|
2628
|
+
print("⚠️ Project not indexed yet. Run 'index' first.")
|
|
2629
|
+
return 1
|
|
2630
|
+
|
|
2631
|
+
print(f"\n🔍 Searching for: {query}\n")
|
|
2632
|
+
|
|
2633
|
+
results = indexer.search(query, n_results=5)
|
|
2634
|
+
|
|
2635
|
+
if not results:
|
|
2636
|
+
print("No results found.")
|
|
2637
|
+
return 0
|
|
2638
|
+
|
|
2639
|
+
print(f"Found {len(results)} results:\n")
|
|
2640
|
+
print("=" * 70)
|
|
2641
|
+
|
|
2642
|
+
for i, result in enumerate(results, 1):
|
|
2643
|
+
similarity = result.get('similarity', 0) * 100
|
|
2644
|
+
location = result.get('location', 'Unknown')
|
|
2645
|
+
language = result.get('language', 'unknown')
|
|
2646
|
+
|
|
2647
|
+
print(f"\n📄 Result {i} ({similarity:.1f}% match)")
|
|
2648
|
+
print(f" Location: {location}")
|
|
2649
|
+
print(f" Language: {language}")
|
|
2650
|
+
print("-" * 70)
|
|
2651
|
+
|
|
2652
|
+
# Show code preview (first 10 lines)
|
|
2653
|
+
code = result.get('content', '')
|
|
2654
|
+
lines = code.split('\n')[:10]
|
|
2655
|
+
for line in lines:
|
|
2656
|
+
print(f" {line[:80]}")
|
|
2657
|
+
if len(code.split('\n')) > 10:
|
|
2658
|
+
print(" ...")
|
|
2659
|
+
|
|
2660
|
+
print("\n" + "=" * 70)
|
|
2661
|
+
return 0
|
|
2662
|
+
|
|
2663
|
+
except ImportError:
|
|
2664
|
+
print("⚠️ Memory module dependencies not installed.")
|
|
2665
|
+
print(" Install with: pip install chromadb sentence-transformers")
|
|
2666
|
+
return 1
|
|
2667
|
+
except Exception as e:
|
|
2668
|
+
print(f"❌ Error searching: {e}")
|
|
2669
|
+
return 1
|
|
2670
|
+
|
|
2671
|
+
def _list_plugins(self) -> int:
|
|
2672
|
+
"""List available plugins
|
|
2673
|
+
|
|
2674
|
+
Returns:
|
|
2675
|
+
Exit code
|
|
2676
|
+
"""
|
|
2677
|
+
if not self.plugin_manager:
|
|
2678
|
+
print("⚠️ Plugin system not available")
|
|
2679
|
+
return 1
|
|
2680
|
+
|
|
2681
|
+
status = self.plugin_manager.get_status()
|
|
2682
|
+
|
|
2683
|
+
if not status:
|
|
2684
|
+
print("No plugins registered.")
|
|
2685
|
+
return 0
|
|
2686
|
+
|
|
2687
|
+
print("\n🔌 Available Plugins:")
|
|
2688
|
+
print("=" * 70)
|
|
2689
|
+
|
|
2690
|
+
for name, info in status.items():
|
|
2691
|
+
status_icon = "✅" if info["status"] == "active" else "❌"
|
|
2692
|
+
builtin_tag = " [built-in]" if info.get("builtin") else ""
|
|
2693
|
+
|
|
2694
|
+
print(f"\n {status_icon} {name} v{info['version']}{builtin_tag}")
|
|
2695
|
+
print(f" Status: {info['status']}")
|
|
2696
|
+
|
|
2697
|
+
if info.get("error"):
|
|
2698
|
+
print(f" Error: {info['error']}")
|
|
2699
|
+
|
|
2700
|
+
# Show actions for loaded plugins
|
|
2701
|
+
plugin = self.plugin_manager.get_plugin(name)
|
|
2702
|
+
if plugin and plugin.actions:
|
|
2703
|
+
actions = ", ".join(plugin.actions.keys())
|
|
2704
|
+
print(f" Actions: {actions}")
|
|
2705
|
+
|
|
2706
|
+
print("\n" + "=" * 70)
|
|
2707
|
+
print("\nUsage: nc1709 --plugin <name>:<action>")
|
|
2708
|
+
print("Example: nc1709 --plugin git:status")
|
|
2709
|
+
return 0
|
|
2710
|
+
|
|
2711
|
+
def _run_plugin_action(self, action_spec: str) -> int:
|
|
2712
|
+
"""Run a plugin action
|
|
2713
|
+
|
|
2714
|
+
Args:
|
|
2715
|
+
action_spec: Plugin:action specification (e.g., "git:status")
|
|
2716
|
+
|
|
2717
|
+
Returns:
|
|
2718
|
+
Exit code
|
|
2719
|
+
"""
|
|
2720
|
+
if not self.plugin_manager:
|
|
2721
|
+
print("⚠️ Plugin system not available")
|
|
2722
|
+
return 1
|
|
2723
|
+
|
|
2724
|
+
# Parse action spec
|
|
2725
|
+
if ":" not in action_spec:
|
|
2726
|
+
# Try to find a plugin that can handle this as a request
|
|
2727
|
+
handlers = self.plugin_manager.find_handler(action_spec)
|
|
2728
|
+
if handlers:
|
|
2729
|
+
plugin_name = handlers[0][0]
|
|
2730
|
+
plugin = self.plugin_manager.get_plugin(plugin_name)
|
|
2731
|
+
if plugin and hasattr(plugin, 'handle_request'):
|
|
2732
|
+
result = plugin.handle_request(action_spec)
|
|
2733
|
+
if result:
|
|
2734
|
+
self._print_action_result(result)
|
|
2735
|
+
return 0 if result.success else 1
|
|
2736
|
+
|
|
2737
|
+
print(f"❌ Invalid format. Use: <plugin>:<action>")
|
|
2738
|
+
print(f" Example: git:status, docker:ps")
|
|
2739
|
+
return 1
|
|
2740
|
+
|
|
2741
|
+
parts = action_spec.split(":", 1)
|
|
2742
|
+
plugin_name = parts[0].strip()
|
|
2743
|
+
action_name = parts[1].strip() if len(parts) > 1 else ""
|
|
2744
|
+
|
|
2745
|
+
# Get the plugin
|
|
2746
|
+
plugin = self.plugin_manager.get_plugin(plugin_name)
|
|
2747
|
+
if not plugin:
|
|
2748
|
+
# Try to load it
|
|
2749
|
+
if not self.plugin_manager.load_plugin(plugin_name):
|
|
2750
|
+
print(f"❌ Plugin '{plugin_name}' not found")
|
|
2751
|
+
return 1
|
|
2752
|
+
plugin = self.plugin_manager.get_plugin(plugin_name)
|
|
2753
|
+
|
|
2754
|
+
# If no action specified, show plugin help
|
|
2755
|
+
if not action_name:
|
|
2756
|
+
print(plugin.get_help())
|
|
2757
|
+
return 0
|
|
2758
|
+
|
|
2759
|
+
# Check if action exists
|
|
2760
|
+
if action_name not in plugin.actions:
|
|
2761
|
+
# Try to handle as natural language
|
|
2762
|
+
if hasattr(plugin, 'handle_request'):
|
|
2763
|
+
result = plugin.handle_request(action_name)
|
|
2764
|
+
if result:
|
|
2765
|
+
self._print_action_result(result)
|
|
2766
|
+
return 0 if result.success else 1
|
|
2767
|
+
|
|
2768
|
+
print(f"❌ Unknown action: {action_name}")
|
|
2769
|
+
print(f" Available actions: {', '.join(plugin.actions.keys())}")
|
|
2770
|
+
return 1
|
|
2771
|
+
|
|
2772
|
+
# Execute the action
|
|
2773
|
+
result = self.plugin_manager.execute_action(plugin_name, action_name)
|
|
2774
|
+
self._print_action_result(result)
|
|
2775
|
+
return 0 if result.success else 1
|
|
2776
|
+
|
|
2777
|
+
def _print_action_result(self, result) -> None:
|
|
2778
|
+
"""Print an action result
|
|
2779
|
+
|
|
2780
|
+
Args:
|
|
2781
|
+
result: ActionResult to print
|
|
2782
|
+
"""
|
|
2783
|
+
if result.success:
|
|
2784
|
+
print(f"\n✅ {result.message}")
|
|
2785
|
+
else:
|
|
2786
|
+
print(f"\n❌ {result.message}")
|
|
2787
|
+
if result.error:
|
|
2788
|
+
print(f" Error: {result.error}")
|
|
2789
|
+
|
|
2790
|
+
# Print data if present
|
|
2791
|
+
if result.data:
|
|
2792
|
+
if isinstance(result.data, str):
|
|
2793
|
+
print(f"\n{result.data}")
|
|
2794
|
+
elif isinstance(result.data, list):
|
|
2795
|
+
for item in result.data:
|
|
2796
|
+
if hasattr(item, '__dict__'):
|
|
2797
|
+
# Dataclass or object
|
|
2798
|
+
print(f" - {item}")
|
|
2799
|
+
else:
|
|
2800
|
+
print(f" - {item}")
|
|
2801
|
+
elif hasattr(result.data, '__dict__'):
|
|
2802
|
+
# Single object
|
|
2803
|
+
for key, value in vars(result.data).items():
|
|
2804
|
+
if not key.startswith('_'):
|
|
2805
|
+
print(f" {key}: {value}")
|
|
2806
|
+
|
|
2807
|
+
# =========================================================================
|
|
2808
|
+
# MCP Methods
|
|
2809
|
+
# =========================================================================
|
|
2810
|
+
|
|
2811
|
+
def _mcp_show_status(self) -> int:
|
|
2812
|
+
"""Show MCP status
|
|
2813
|
+
|
|
2814
|
+
Returns:
|
|
2815
|
+
Exit code
|
|
2816
|
+
"""
|
|
2817
|
+
if not self.mcp_manager:
|
|
2818
|
+
print("⚠️ MCP module not available")
|
|
2819
|
+
return 1
|
|
2820
|
+
|
|
2821
|
+
status = self.mcp_manager.get_status()
|
|
2822
|
+
|
|
2823
|
+
print("\n🔌 MCP Status:")
|
|
2824
|
+
print("=" * 60)
|
|
2825
|
+
|
|
2826
|
+
# Server info
|
|
2827
|
+
server = status["server"]
|
|
2828
|
+
print(f"\n📡 Local Server: {server['name']} v{server['version']}")
|
|
2829
|
+
print(f" Running: {'Yes' if server['running'] else 'No'}")
|
|
2830
|
+
print(f" Tools: {server['tools']}")
|
|
2831
|
+
print(f" Resources: {server['resources']}")
|
|
2832
|
+
print(f" Prompts: {server['prompts']}")
|
|
2833
|
+
|
|
2834
|
+
# Connected servers
|
|
2835
|
+
client = status["client"]
|
|
2836
|
+
print(f"\n🌐 Connected Servers: {client['connected_servers']}")
|
|
2837
|
+
|
|
2838
|
+
if client["servers"]:
|
|
2839
|
+
for srv in client["servers"]:
|
|
2840
|
+
status_icon = "✅" if srv["connected"] else "❌"
|
|
2841
|
+
print(f" {status_icon} {srv['name']}: {srv['tools']} tools, {srv['resources']} resources")
|
|
2842
|
+
|
|
2843
|
+
print("\n" + "=" * 60)
|
|
2844
|
+
print("\nCommands:")
|
|
2845
|
+
print(" mcp tools - List available tools")
|
|
2846
|
+
print(" mcp call <t> - Call a tool")
|
|
2847
|
+
print(" --mcp-serve - Run as MCP server")
|
|
2848
|
+
return 0
|
|
2849
|
+
|
|
2850
|
+
def _mcp_list_tools(self) -> int:
|
|
2851
|
+
"""List MCP tools
|
|
2852
|
+
|
|
2853
|
+
Returns:
|
|
2854
|
+
Exit code
|
|
2855
|
+
"""
|
|
2856
|
+
if not self.mcp_manager:
|
|
2857
|
+
print("⚠️ MCP module not available")
|
|
2858
|
+
return 1
|
|
2859
|
+
|
|
2860
|
+
all_tools = self.mcp_manager.get_all_tools()
|
|
2861
|
+
|
|
2862
|
+
print("\n🔧 Available MCP Tools:")
|
|
2863
|
+
print("=" * 60)
|
|
2864
|
+
|
|
2865
|
+
# Local tools
|
|
2866
|
+
if all_tools["local"]:
|
|
2867
|
+
print("\n📍 Local Tools:")
|
|
2868
|
+
for tool in all_tools["local"]:
|
|
2869
|
+
print(f"\n {tool.name}")
|
|
2870
|
+
print(f" Description: {tool.description}")
|
|
2871
|
+
if tool.parameters:
|
|
2872
|
+
params = ", ".join(p.name for p in tool.parameters)
|
|
2873
|
+
print(f" Parameters: {params}")
|
|
2874
|
+
|
|
2875
|
+
# Remote tools
|
|
2876
|
+
if all_tools["remote"]:
|
|
2877
|
+
print("\n🌐 Remote Tools:")
|
|
2878
|
+
for tool in all_tools["remote"]:
|
|
2879
|
+
print(f"\n {tool.name}")
|
|
2880
|
+
print(f" Description: {tool.description}")
|
|
2881
|
+
|
|
2882
|
+
if not all_tools["local"] and not all_tools["remote"]:
|
|
2883
|
+
print("\nNo tools available.")
|
|
2884
|
+
|
|
2885
|
+
print("\n" + "=" * 60)
|
|
2886
|
+
return 0
|
|
2887
|
+
|
|
2888
|
+
def _mcp_call_tool_interactive(self, tool_spec: str) -> int:
|
|
2889
|
+
"""Call an MCP tool from interactive mode
|
|
2890
|
+
|
|
2891
|
+
Args:
|
|
2892
|
+
tool_spec: Tool name and args (e.g., "read_file path=main.py")
|
|
2893
|
+
|
|
2894
|
+
Returns:
|
|
2895
|
+
Exit code
|
|
2896
|
+
"""
|
|
2897
|
+
import json
|
|
2898
|
+
|
|
2899
|
+
parts = tool_spec.split(maxsplit=1)
|
|
2900
|
+
tool_name = parts[0]
|
|
2901
|
+
args_str = parts[1] if len(parts) > 1 else ""
|
|
2902
|
+
|
|
2903
|
+
# Parse key=value args
|
|
2904
|
+
args = {}
|
|
2905
|
+
if args_str:
|
|
2906
|
+
for pair in args_str.split():
|
|
2907
|
+
if "=" in pair:
|
|
2908
|
+
key, value = pair.split("=", 1)
|
|
2909
|
+
# Try to parse as JSON for complex values
|
|
2910
|
+
try:
|
|
2911
|
+
args[key] = json.loads(value)
|
|
2912
|
+
except json.JSONDecodeError:
|
|
2913
|
+
args[key] = value
|
|
2914
|
+
|
|
2915
|
+
return self._mcp_call_tool(tool_name, json.dumps(args))
|
|
2916
|
+
|
|
2917
|
+
def _mcp_call_tool(self, tool_name: str, args_json: str) -> int:
|
|
2918
|
+
"""Call an MCP tool
|
|
2919
|
+
|
|
2920
|
+
Args:
|
|
2921
|
+
tool_name: Tool name
|
|
2922
|
+
args_json: JSON string of arguments
|
|
2923
|
+
|
|
2924
|
+
Returns:
|
|
2925
|
+
Exit code
|
|
2926
|
+
"""
|
|
2927
|
+
import json
|
|
2928
|
+
import asyncio
|
|
2929
|
+
|
|
2930
|
+
if not self.mcp_manager:
|
|
2931
|
+
print("⚠️ MCP module not available")
|
|
2932
|
+
return 1
|
|
2933
|
+
|
|
2934
|
+
try:
|
|
2935
|
+
args = json.loads(args_json)
|
|
2936
|
+
except json.JSONDecodeError as e:
|
|
2937
|
+
print(f"❌ Invalid JSON arguments: {e}")
|
|
2938
|
+
return 1
|
|
2939
|
+
|
|
2940
|
+
print(f"\n🔧 Calling tool: {tool_name}")
|
|
2941
|
+
if args:
|
|
2942
|
+
print(f" Arguments: {args}")
|
|
2943
|
+
|
|
2944
|
+
try:
|
|
2945
|
+
# Run async call
|
|
2946
|
+
result = asyncio.run(self.mcp_manager.call_tool(tool_name, args))
|
|
2947
|
+
|
|
2948
|
+
if "error" in result:
|
|
2949
|
+
print(f"\n❌ Error: {result['error']}")
|
|
2950
|
+
return 1
|
|
2951
|
+
|
|
2952
|
+
print("\n✅ Result:")
|
|
2953
|
+
print("-" * 40)
|
|
2954
|
+
|
|
2955
|
+
if "content" in result:
|
|
2956
|
+
for item in result["content"]:
|
|
2957
|
+
if item.get("type") == "text":
|
|
2958
|
+
print(item.get("text", ""))
|
|
2959
|
+
else:
|
|
2960
|
+
print(json.dumps(result, indent=2))
|
|
2961
|
+
|
|
2962
|
+
return 0
|
|
2963
|
+
|
|
2964
|
+
except Exception as e:
|
|
2965
|
+
print(f"❌ Error calling tool: {e}")
|
|
2966
|
+
return 1
|
|
2967
|
+
|
|
2968
|
+
def _mcp_run_server(self) -> int:
|
|
2969
|
+
"""Run NC1709 as an MCP server
|
|
2970
|
+
|
|
2971
|
+
Returns:
|
|
2972
|
+
Exit code (never returns normally)
|
|
2973
|
+
"""
|
|
2974
|
+
import asyncio
|
|
2975
|
+
|
|
2976
|
+
if not self.mcp_manager:
|
|
2977
|
+
print("⚠️ MCP module not available", file=sys.stderr)
|
|
2978
|
+
return 1
|
|
2979
|
+
|
|
2980
|
+
# Don't print to stdout as it's used for MCP communication
|
|
2981
|
+
print("Starting NC1709 MCP server (stdio)...", file=sys.stderr)
|
|
2982
|
+
|
|
2983
|
+
try:
|
|
2984
|
+
asyncio.run(self.mcp_manager.server.run_stdio())
|
|
2985
|
+
except KeyboardInterrupt:
|
|
2986
|
+
print("\nServer stopped.", file=sys.stderr)
|
|
2987
|
+
|
|
2988
|
+
return 0
|
|
2989
|
+
|
|
2990
|
+
def _mcp_connect_servers(self, config_path: str) -> int:
|
|
2991
|
+
"""Connect to MCP servers from config
|
|
2992
|
+
|
|
2993
|
+
Args:
|
|
2994
|
+
config_path: Path to MCP config file
|
|
2995
|
+
|
|
2996
|
+
Returns:
|
|
2997
|
+
Exit code
|
|
2998
|
+
"""
|
|
2999
|
+
import asyncio
|
|
3000
|
+
|
|
3001
|
+
if not self.mcp_manager:
|
|
3002
|
+
print("⚠️ MCP module not available")
|
|
3003
|
+
return 1
|
|
3004
|
+
|
|
3005
|
+
print(f"🔌 Connecting to MCP servers from: {config_path}")
|
|
3006
|
+
|
|
3007
|
+
try:
|
|
3008
|
+
count = asyncio.run(self.mcp_manager.auto_discover_servers(config_path))
|
|
3009
|
+
|
|
3010
|
+
if count > 0:
|
|
3011
|
+
print(f"\n✅ Connected to {count} server(s)")
|
|
3012
|
+
|
|
3013
|
+
# Show connected servers
|
|
3014
|
+
servers = self.mcp_manager.list_connected_servers()
|
|
3015
|
+
for srv in servers:
|
|
3016
|
+
print(f" - {srv['name']}: {srv['tools']} tools")
|
|
3017
|
+
|
|
3018
|
+
return 0
|
|
3019
|
+
else:
|
|
3020
|
+
print("\n⚠️ No servers connected. Check config file.")
|
|
3021
|
+
return 1
|
|
3022
|
+
|
|
3023
|
+
except Exception as e:
|
|
3024
|
+
print(f"❌ Error connecting: {e}")
|
|
3025
|
+
return 1
|
|
3026
|
+
|
|
3027
|
+
# =========================================================================
|
|
3028
|
+
# Cognitive System Methods (Brain)
|
|
3029
|
+
# =========================================================================
|
|
3030
|
+
|
|
3031
|
+
def _brain_show_status(self) -> None:
|
|
3032
|
+
"""Show cognitive system status"""
|
|
3033
|
+
if not HAS_COGNITIVE:
|
|
3034
|
+
warning("Cognitive module not available")
|
|
3035
|
+
return
|
|
3036
|
+
|
|
3037
|
+
if not self.cognitive_system:
|
|
3038
|
+
warning("Cognitive system not initialized")
|
|
3039
|
+
return
|
|
3040
|
+
|
|
3041
|
+
stats = self.cognitive_system.get_system_stats()
|
|
3042
|
+
|
|
3043
|
+
print("\n🧠 NC1709 Cognitive System Status:")
|
|
3044
|
+
print("=" * 60)
|
|
3045
|
+
|
|
3046
|
+
# Uptime
|
|
3047
|
+
uptime = stats.get("uptime_seconds", 0)
|
|
3048
|
+
hours = int(uptime // 3600)
|
|
3049
|
+
minutes = int((uptime % 3600) // 60)
|
|
3050
|
+
print(f"\n⏱️ Uptime: {hours}h {minutes}m")
|
|
3051
|
+
|
|
3052
|
+
# Request stats
|
|
3053
|
+
print(f"\n📊 Statistics:")
|
|
3054
|
+
print(f" Total requests: {stats.get('total_requests', 0)}")
|
|
3055
|
+
print(f" Council convenes: {stats.get('council_convenes', 0)}")
|
|
3056
|
+
council_rate = stats.get('council_rate', 0) * 100
|
|
3057
|
+
print(f" Council rate: {council_rate:.1f}%")
|
|
3058
|
+
|
|
3059
|
+
# Layer status
|
|
3060
|
+
print(f"\n🔧 Layer Status:")
|
|
3061
|
+
layers = stats.get("layers_active", {})
|
|
3062
|
+
layer_names = {
|
|
3063
|
+
"router": "Layer 1: Intelligent Router",
|
|
3064
|
+
"context_engine": "Layer 2: Deep Context Engine",
|
|
3065
|
+
"council": "Layer 3: Multi-Agent Council",
|
|
3066
|
+
"learning": "Layer 4: Learning Core",
|
|
3067
|
+
"anticipation": "Layer 5: Anticipation Engine",
|
|
3068
|
+
}
|
|
3069
|
+
for key, name in layer_names.items():
|
|
3070
|
+
status = "✅ Active" if layers.get(key) else "⚪ Inactive"
|
|
3071
|
+
print(f" {status} {name}")
|
|
3072
|
+
|
|
3073
|
+
# Project stats if available
|
|
3074
|
+
if "project" in stats:
|
|
3075
|
+
project = stats["project"]
|
|
3076
|
+
print(f"\n📁 Project Index:")
|
|
3077
|
+
print(f" Files indexed: {project.get('files_indexed', 0)}")
|
|
3078
|
+
print(f" Total lines: {project.get('total_lines', 0):,}")
|
|
3079
|
+
print(f" Patterns detected: {project.get('patterns_detected', 0)}")
|
|
3080
|
+
|
|
3081
|
+
def _brain_show_suggestions(self) -> None:
|
|
3082
|
+
"""Show proactive suggestions from the anticipation engine"""
|
|
3083
|
+
if not HAS_COGNITIVE:
|
|
3084
|
+
warning("Cognitive module not available")
|
|
3085
|
+
return
|
|
3086
|
+
|
|
3087
|
+
if not self.cognitive_system:
|
|
3088
|
+
warning("Cognitive system not initialized")
|
|
3089
|
+
return
|
|
3090
|
+
|
|
3091
|
+
suggestions = self.cognitive_system.get_suggestions(limit=5)
|
|
3092
|
+
|
|
3093
|
+
if not suggestions:
|
|
3094
|
+
info("No suggestions available yet. Keep using NC1709 to build context!")
|
|
3095
|
+
return
|
|
3096
|
+
|
|
3097
|
+
print("\n💡 Proactive Suggestions:")
|
|
3098
|
+
print("=" * 60)
|
|
3099
|
+
|
|
3100
|
+
for i, suggestion in enumerate(suggestions, 1):
|
|
3101
|
+
confidence = suggestion.get('confidence', 0)
|
|
3102
|
+
icon = "🔥" if confidence > 0.8 else "💭" if confidence > 0.5 else "💡"
|
|
3103
|
+
|
|
3104
|
+
print(f"\n{i}. {icon} {suggestion['title']}")
|
|
3105
|
+
if suggestion.get('description'):
|
|
3106
|
+
print(f" {suggestion['description']}")
|
|
3107
|
+
print(f" Type: {suggestion['type']} | Confidence: {confidence:.0%}")
|
|
3108
|
+
|
|
3109
|
+
if suggestion.get('action'):
|
|
3110
|
+
action = suggestion['action']
|
|
3111
|
+
if action.get('type') == 'open_file':
|
|
3112
|
+
print(f" 📂 Open: {action.get('target')}")
|
|
3113
|
+
elif action.get('type') == 'suggest_command':
|
|
3114
|
+
print(f" ⌨️ Command: {action.get('target')}")
|
|
3115
|
+
|
|
3116
|
+
def _brain_index_project(self) -> None:
|
|
3117
|
+
"""Index the project for context awareness"""
|
|
3118
|
+
if not HAS_COGNITIVE:
|
|
3119
|
+
warning("Cognitive module not available")
|
|
3120
|
+
return
|
|
3121
|
+
|
|
3122
|
+
if not self.cognitive_system:
|
|
3123
|
+
warning("Cognitive system not initialized")
|
|
3124
|
+
return
|
|
3125
|
+
|
|
3126
|
+
from .cli_ui import thinking
|
|
3127
|
+
|
|
3128
|
+
thinking("Indexing project for context awareness...")
|
|
3129
|
+
|
|
3130
|
+
try:
|
|
3131
|
+
result = self.cognitive_system.index_project(incremental=False)
|
|
3132
|
+
success(f"Indexed {result.get('files_indexed', 0)} files with {result.get('nodes', 0)} code nodes")
|
|
3133
|
+
except Exception as e:
|
|
3134
|
+
error(f"Error indexing project: {e}")
|
|
3135
|
+
|
|
3136
|
+
def _brain_show_insights(self) -> None:
|
|
3137
|
+
"""Show user insights from the learning core"""
|
|
3138
|
+
if not HAS_COGNITIVE:
|
|
3139
|
+
warning("Cognitive module not available")
|
|
3140
|
+
return
|
|
3141
|
+
|
|
3142
|
+
if not self.cognitive_system:
|
|
3143
|
+
warning("Cognitive system not initialized")
|
|
3144
|
+
return
|
|
3145
|
+
|
|
3146
|
+
insights = self.cognitive_system.get_user_insights()
|
|
3147
|
+
|
|
3148
|
+
if "error" in insights:
|
|
3149
|
+
warning(insights["error"])
|
|
3150
|
+
return
|
|
3151
|
+
|
|
3152
|
+
print("\n🧠 User Insights:")
|
|
3153
|
+
print("=" * 60)
|
|
3154
|
+
|
|
3155
|
+
# Summary stats
|
|
3156
|
+
print(f"\n📊 Activity:")
|
|
3157
|
+
print(f" Total interactions: {insights.get('total_interactions', 0)}")
|
|
3158
|
+
print(f" Session count: {insights.get('session_count', 0)}")
|
|
3159
|
+
|
|
3160
|
+
# Top categories
|
|
3161
|
+
if insights.get('top_categories'):
|
|
3162
|
+
print(f"\n📂 Top Task Categories:")
|
|
3163
|
+
for cat, count in insights.get('top_categories', {}).items():
|
|
3164
|
+
print(f" • {cat}: {count} interactions")
|
|
3165
|
+
|
|
3166
|
+
# Preferences
|
|
3167
|
+
if insights.get('preferences'):
|
|
3168
|
+
print(f"\n⚙️ Learned Preferences:")
|
|
3169
|
+
for pref in insights.get('preferences', [])[:5]:
|
|
3170
|
+
print(f" • {pref.get('category', 'general')}: {pref.get('preference', '')}")
|
|
3171
|
+
|
|
3172
|
+
# Patterns
|
|
3173
|
+
if insights.get('patterns'):
|
|
3174
|
+
print(f"\n🔍 Detected Patterns:")
|
|
3175
|
+
for pattern in insights.get('patterns', [])[:3]:
|
|
3176
|
+
print(f" • {pattern.get('description', 'No description')}")
|
|
3177
|
+
|
|
3178
|
+
# =========================================================================
|
|
3179
|
+
# Web Dashboard Methods
|
|
3180
|
+
# =========================================================================
|
|
3181
|
+
|
|
3182
|
+
def _run_web_dashboard(self, port: int = 8709, serve_remote: bool = False) -> int:
|
|
3183
|
+
"""Run the web dashboard
|
|
3184
|
+
|
|
3185
|
+
Args:
|
|
3186
|
+
port: Port to run on
|
|
3187
|
+
serve_remote: If True, bind to 0.0.0.0 for remote clients
|
|
3188
|
+
|
|
3189
|
+
Returns:
|
|
3190
|
+
Exit code
|
|
3191
|
+
"""
|
|
3192
|
+
try:
|
|
3193
|
+
from .web import run_server
|
|
3194
|
+
run_server(host="127.0.0.1", port=port, serve_remote=serve_remote)
|
|
3195
|
+
return 0
|
|
3196
|
+
except ImportError as e:
|
|
3197
|
+
print("⚠️ Web dashboard dependencies not installed.")
|
|
3198
|
+
print(" Install with: pip install fastapi uvicorn")
|
|
3199
|
+
print(f" Error: {e}")
|
|
3200
|
+
return 1
|
|
3201
|
+
except Exception as e:
|
|
3202
|
+
error(f"Error starting web dashboard: {e}")
|
|
3203
|
+
return 1
|
|
3204
|
+
|
|
3205
|
+
# =========================================================================
|
|
3206
|
+
# Shell Completions
|
|
3207
|
+
# =========================================================================
|
|
3208
|
+
|
|
3209
|
+
def _generate_completion(self, shell: str) -> int:
|
|
3210
|
+
"""Generate shell completion script
|
|
3211
|
+
|
|
3212
|
+
Args:
|
|
3213
|
+
shell: Shell type (bash, zsh, fish)
|
|
3214
|
+
|
|
3215
|
+
Returns:
|
|
3216
|
+
Exit code
|
|
3217
|
+
"""
|
|
3218
|
+
try:
|
|
3219
|
+
from .shell_completions import get_completion_script
|
|
3220
|
+
script = get_completion_script(shell)
|
|
3221
|
+
print(script)
|
|
3222
|
+
return 0
|
|
3223
|
+
except ImportError:
|
|
3224
|
+
error("Shell completions module not available")
|
|
3225
|
+
return 1
|
|
3226
|
+
except Exception as e:
|
|
3227
|
+
error(f"Error generating completions: {e}")
|
|
3228
|
+
return 1
|
|
3229
|
+
|
|
3230
|
+
# =========================================================================
|
|
3231
|
+
# AI Agents
|
|
3232
|
+
# =========================================================================
|
|
3233
|
+
|
|
3234
|
+
def _run_auto_fix(self, file_path: str, auto_apply: bool = False) -> int:
|
|
3235
|
+
"""Run auto-fix agent on a file
|
|
3236
|
+
|
|
3237
|
+
Args:
|
|
3238
|
+
file_path: Path to file to fix
|
|
3239
|
+
auto_apply: Whether to auto-apply fixes
|
|
3240
|
+
|
|
3241
|
+
Returns:
|
|
3242
|
+
Exit code
|
|
3243
|
+
"""
|
|
3244
|
+
try:
|
|
3245
|
+
from .agents.auto_fix import AutoFixAgent
|
|
3246
|
+
|
|
3247
|
+
with action_spinner(f"Analyzing {file_path}") as spinner:
|
|
3248
|
+
agent = AutoFixAgent(self.llm)
|
|
3249
|
+
spinner.update("Detecting errors")
|
|
3250
|
+
errors = agent.analyze_file(file_path)
|
|
3251
|
+
|
|
3252
|
+
if not errors:
|
|
3253
|
+
spinner.success(f"No errors found in {file_path}")
|
|
3254
|
+
return 0
|
|
3255
|
+
|
|
3256
|
+
spinner.update(f"Found {len(errors)} error(s)")
|
|
3257
|
+
spinner.add_action("Analyze", file_path)
|
|
3258
|
+
|
|
3259
|
+
# Generate fixes
|
|
3260
|
+
fixes = agent.fix_errors(errors)
|
|
3261
|
+
spinner.success(f"Found {len(errors)} error(s), generated {len(fixes)} fix(es)")
|
|
3262
|
+
|
|
3263
|
+
# Display errors and fixes
|
|
3264
|
+
print(f"\n{Color.BOLD}Auto-Fix Analysis: {file_path}{Color.RESET}")
|
|
3265
|
+
print(f"{Color.DIM}{'─'*60}{Color.RESET}")
|
|
3266
|
+
|
|
3267
|
+
for i, err in enumerate(errors, 1):
|
|
3268
|
+
print(f"\n{Color.RED}{Icons.FAILURE}{Color.RESET} Error {i}: {err.message}")
|
|
3269
|
+
print(f" {Color.DIM}Line {err.line}: {err.error_type}{Color.RESET}")
|
|
3270
|
+
|
|
3271
|
+
if fixes:
|
|
3272
|
+
print(f"\n{Color.BOLD}Generated Fixes:{Color.RESET}")
|
|
3273
|
+
for i, fix in enumerate(fixes, 1):
|
|
3274
|
+
confidence = f"{fix.confidence*100:.0f}%" if hasattr(fix, 'confidence') else "N/A"
|
|
3275
|
+
print(f" {Color.GREEN}{Icons.SUCCESS}{Color.RESET} Fix {i}: {fix.description}")
|
|
3276
|
+
print(f" {Color.DIM}Confidence: {confidence}{Color.RESET}")
|
|
3277
|
+
|
|
3278
|
+
if auto_apply and fixes:
|
|
3279
|
+
print(f"\n{Color.YELLOW}Applying fixes...{Color.RESET}")
|
|
3280
|
+
agent.fix_file(file_path, auto_apply=True)
|
|
3281
|
+
success(f"Fixes applied to {file_path}")
|
|
3282
|
+
|
|
3283
|
+
return 0
|
|
3284
|
+
|
|
3285
|
+
except ImportError:
|
|
3286
|
+
error("Auto-fix agent not available")
|
|
3287
|
+
return 1
|
|
3288
|
+
except Exception as e:
|
|
3289
|
+
error(f"Error running auto-fix: {e}")
|
|
3290
|
+
return 1
|
|
3291
|
+
|
|
3292
|
+
def _run_test_generator(self, file_path: str, output_file: Optional[str] = None) -> int:
|
|
3293
|
+
"""Run test generator agent on a file
|
|
3294
|
+
|
|
3295
|
+
Args:
|
|
3296
|
+
file_path: Path to file to generate tests for
|
|
3297
|
+
output_file: Optional output file path
|
|
3298
|
+
|
|
3299
|
+
Returns:
|
|
3300
|
+
Exit code
|
|
3301
|
+
"""
|
|
3302
|
+
try:
|
|
3303
|
+
from .agents.test_generator import TestGeneratorAgent
|
|
3304
|
+
|
|
3305
|
+
with action_spinner(f"Analyzing {file_path}") as spinner:
|
|
3306
|
+
agent = TestGeneratorAgent(self.llm)
|
|
3307
|
+
spinner.update("Finding functions to test")
|
|
3308
|
+
functions = agent.analyze_file(file_path)
|
|
3309
|
+
|
|
3310
|
+
if not functions:
|
|
3311
|
+
spinner.success(f"No testable functions found in {file_path}")
|
|
3312
|
+
return 0
|
|
3313
|
+
|
|
3314
|
+
spinner.update(f"Found {len(functions)} function(s)")
|
|
3315
|
+
spinner.add_action("Analyze", file_path)
|
|
3316
|
+
|
|
3317
|
+
# Generate tests
|
|
3318
|
+
tests = agent.generate_tests(functions)
|
|
3319
|
+
spinner.success(f"Generated {len(tests)} test(s)")
|
|
3320
|
+
|
|
3321
|
+
# Display results
|
|
3322
|
+
print(f"\n{Color.BOLD}Test Generator: {file_path}{Color.RESET}")
|
|
3323
|
+
print(f"{Color.DIM}{'─'*60}{Color.RESET}")
|
|
3324
|
+
|
|
3325
|
+
print(f"\n{Color.CYAN}Functions found:{Color.RESET}")
|
|
3326
|
+
for func in functions:
|
|
3327
|
+
print(f" {Icons.TREE_BRANCH} {func.name}() - line {func.line}")
|
|
3328
|
+
|
|
3329
|
+
print(f"\n{Color.CYAN}Tests generated:{Color.RESET}")
|
|
3330
|
+
for test in tests:
|
|
3331
|
+
print(f" {Color.GREEN}{Icons.SUCCESS}{Color.RESET} {test.name}")
|
|
3332
|
+
|
|
3333
|
+
# Write output file
|
|
3334
|
+
if output_file:
|
|
3335
|
+
agent.generate_test_file(file_path, output_file)
|
|
3336
|
+
success(f"Tests written to {output_file}")
|
|
3337
|
+
else:
|
|
3338
|
+
# Default output file
|
|
3339
|
+
from pathlib import Path
|
|
3340
|
+
p = Path(file_path)
|
|
3341
|
+
default_output = p.parent / f"test_{p.name}"
|
|
3342
|
+
agent.generate_test_file(file_path, str(default_output))
|
|
3343
|
+
success(f"Tests written to {default_output}")
|
|
3344
|
+
|
|
3345
|
+
return 0
|
|
3346
|
+
|
|
3347
|
+
except ImportError:
|
|
3348
|
+
error("Test generator agent not available")
|
|
3349
|
+
return 1
|
|
3350
|
+
except Exception as e:
|
|
3351
|
+
error(f"Error generating tests: {e}")
|
|
3352
|
+
return 1
|
|
3353
|
+
|
|
3354
|
+
|
|
3355
|
+
def main():
|
|
3356
|
+
"""Main entry point"""
|
|
3357
|
+
# Pre-parse to get remote args before creating CLI
|
|
3358
|
+
parser = argparse.ArgumentParser(add_help=False)
|
|
3359
|
+
parser.add_argument("--remote", metavar="URL")
|
|
3360
|
+
parser.add_argument("--api-key", metavar="KEY")
|
|
3361
|
+
parser.add_argument("--local", action="store_true")
|
|
3362
|
+
pre_args, _ = parser.parse_known_args()
|
|
3363
|
+
|
|
3364
|
+
# Determine remote URL
|
|
3365
|
+
# --local flag disables remote mode entirely
|
|
3366
|
+
if pre_args.local:
|
|
3367
|
+
remote_url = None
|
|
3368
|
+
else:
|
|
3369
|
+
remote_url = pre_args.remote # Will fall back to DEFAULT_API_URL in __init__
|
|
3370
|
+
|
|
3371
|
+
# Create CLI with remote settings if provided
|
|
3372
|
+
cli = NC1709CLI(
|
|
3373
|
+
remote_url=remote_url,
|
|
3374
|
+
api_key=pre_args.api_key
|
|
3375
|
+
)
|
|
3376
|
+
sys.exit(cli.run())
|
|
3377
|
+
|
|
3378
|
+
|
|
3379
|
+
if __name__ == "__main__":
|
|
3380
|
+
main()
|