cognautic-cli 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognautic/__init__.py +7 -0
- cognautic/ai_engine.py +2213 -0
- cognautic/auto_continuation.py +196 -0
- cognautic/cli.py +1064 -0
- cognautic/config.py +245 -0
- cognautic/file_tagger.py +194 -0
- cognautic/memory.py +419 -0
- cognautic/provider_endpoints.py +424 -0
- cognautic/rules.py +246 -0
- cognautic/tools/__init__.py +19 -0
- cognautic/tools/base.py +59 -0
- cognautic/tools/code_analysis.py +391 -0
- cognautic/tools/command_runner.py +292 -0
- cognautic/tools/file_operations.py +394 -0
- cognautic/tools/registry.py +115 -0
- cognautic/tools/response_control.py +48 -0
- cognautic/tools/web_search.py +336 -0
- cognautic/utils.py +297 -0
- cognautic/websocket_server.py +485 -0
- cognautic_cli-1.1.1.dist-info/METADATA +604 -0
- cognautic_cli-1.1.1.dist-info/RECORD +25 -0
- cognautic_cli-1.1.1.dist-info/WHEEL +5 -0
- cognautic_cli-1.1.1.dist-info/entry_points.txt +2 -0
- cognautic_cli-1.1.1.dist-info/licenses/LICENSE +21 -0
- cognautic_cli-1.1.1.dist-info/top_level.txt +1 -0
cognautic/cli.py
ADDED
|
@@ -0,0 +1,1064 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main CLI interface for Cognautic
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import click
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import readline
|
|
10
|
+
import signal
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from rich.console import Console
|
|
14
|
+
from rich.panel import Panel
|
|
15
|
+
from rich.text import Text
|
|
16
|
+
|
|
17
|
+
# Suppress verbose logging from Google AI and other libraries
|
|
18
|
+
os.environ['GRPC_VERBOSITY'] = 'NONE'
|
|
19
|
+
os.environ['GRPC_TRACE'] = ''
|
|
20
|
+
os.environ['GLOG_minloglevel'] = '3' # Suppress all Google logging
|
|
21
|
+
os.environ['GLOG_logtostderr'] = '0' # Don't log to stderr
|
|
22
|
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress TensorFlow logs
|
|
23
|
+
os.environ['ABSL_LOGGING_VERBOSITY'] = '3' # Suppress ABSL logs
|
|
24
|
+
|
|
25
|
+
# Redirect stderr to suppress C++ library logs
|
|
26
|
+
import sys
|
|
27
|
+
import io
|
|
28
|
+
|
|
29
|
+
# Create a custom stderr that filters out Google AI noise
|
|
30
|
+
class FilteredStderr:
|
|
31
|
+
def __init__(self, original_stderr):
|
|
32
|
+
self.original_stderr = original_stderr
|
|
33
|
+
self.buffer = ""
|
|
34
|
+
|
|
35
|
+
def write(self, text):
|
|
36
|
+
# Filter out Google AI error messages
|
|
37
|
+
if any(pattern in text for pattern in [
|
|
38
|
+
'ALTS creds ignored',
|
|
39
|
+
'plugin_credentials.cc',
|
|
40
|
+
'validate_metadata_from_plugin',
|
|
41
|
+
'Plugin added invalid metadata',
|
|
42
|
+
'All log messages before absl::InitializeLog',
|
|
43
|
+
'INTERNAL:Illegal header value'
|
|
44
|
+
]):
|
|
45
|
+
return
|
|
46
|
+
self.original_stderr.write(text)
|
|
47
|
+
|
|
48
|
+
def flush(self):
|
|
49
|
+
self.original_stderr.flush()
|
|
50
|
+
|
|
51
|
+
def isatty(self):
|
|
52
|
+
return self.original_stderr.isatty()
|
|
53
|
+
|
|
54
|
+
# Install the filtered stderr
|
|
55
|
+
sys.stderr = FilteredStderr(sys.stderr)
|
|
56
|
+
|
|
57
|
+
logging.getLogger('google').setLevel(logging.CRITICAL)
|
|
58
|
+
logging.getLogger('google.generativeai').setLevel(logging.CRITICAL)
|
|
59
|
+
logging.getLogger('websockets').setLevel(logging.ERROR)
|
|
60
|
+
logging.getLogger('asyncio').setLevel(logging.ERROR)
|
|
61
|
+
logging.getLogger('grpc').setLevel(logging.CRITICAL)
|
|
62
|
+
|
|
63
|
+
# Set root logger to WARNING to suppress debug messages
|
|
64
|
+
logging.basicConfig(level=logging.WARNING)
|
|
65
|
+
|
|
66
|
+
from .config import ConfigManager
|
|
67
|
+
from .ai_engine import AIEngine
|
|
68
|
+
from .websocket_server import WebSocketServer
|
|
69
|
+
from .memory import MemoryManager
|
|
70
|
+
from .rules import RulesManager
|
|
71
|
+
|
|
72
|
+
console = Console()
|
|
73
|
+
|
|
74
|
+
# Global flag to stop AI response
|
|
75
|
+
stop_response = False
|
|
76
|
+
|
|
77
|
+
def signal_handler(signum, frame):
|
|
78
|
+
"""Handle Ctrl+X (SIGQUIT) to stop AI response"""
|
|
79
|
+
global stop_response
|
|
80
|
+
stop_response = True
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@click.group(invoke_without_command=True)
|
|
86
|
+
@click.version_option(version="1.1.1", prog_name="Cognautic CLI")
|
|
87
|
+
@click.pass_context
|
|
88
|
+
def main(ctx):
|
|
89
|
+
"""Cognautic CLI - AI-powered development assistant"""
|
|
90
|
+
if ctx.invoked_subcommand is None:
|
|
91
|
+
# No subcommand provided, start interactive chat
|
|
92
|
+
ctx.invoke(chat)
|
|
93
|
+
|
|
94
|
+
@main.command()
|
|
95
|
+
@click.option('--provider', help='AI provider to configure')
|
|
96
|
+
@click.option('--api-key', help='API key for the provider')
|
|
97
|
+
@click.option('--interactive', is_flag=True, help='Interactive setup mode')
|
|
98
|
+
def setup(provider, api_key, interactive):
|
|
99
|
+
"""Initialize Cognautic CLI and configure API keys"""
|
|
100
|
+
console.print(Panel.fit("š Cognautic CLI Setup", style="bold blue"))
|
|
101
|
+
|
|
102
|
+
config_manager = ConfigManager()
|
|
103
|
+
|
|
104
|
+
if interactive:
|
|
105
|
+
config_manager.interactive_setup()
|
|
106
|
+
elif provider and api_key:
|
|
107
|
+
config_manager.set_api_key(provider, api_key)
|
|
108
|
+
console.print(f"ā
API key for {provider} configured successfully", style="green")
|
|
109
|
+
else:
|
|
110
|
+
console.print("ā Please provide --provider and --api-key, or use --interactive", style="red")
|
|
111
|
+
|
|
112
|
+
@main.command()
|
|
113
|
+
@click.option('--provider', help='AI provider to use')
|
|
114
|
+
@click.option('--model', help='Specific model to use')
|
|
115
|
+
@click.option('--project-path', type=click.Path(exists=True), help='Project path to work with')
|
|
116
|
+
@click.option('--websocket-port', default=8765, help='WebSocket server port')
|
|
117
|
+
@click.option('--session', help='Session ID to continue')
|
|
118
|
+
def chat(provider, model, project_path, websocket_port, session):
|
|
119
|
+
"""Start interactive chat session with AI agent"""
|
|
120
|
+
ascii_art = """
|
|
121
|
+
āāāāāāā āāāāāāā āāāāāāā āāāā āāā āāāāāā āāā āāāāāāāāāāāāāāā āāāāāāā
|
|
122
|
+
āāāāāāāāāāāāāāāāāāāāāāāāā āāāāā āāāāāāāāāāāāāā āāāāāāāāāāāāāāāāāāāāāāā
|
|
123
|
+
āāā āāā āāāāāā āāāāāāāāāā āāāāāāāāāāāāāā āāā āāā āāāāāā
|
|
124
|
+
āāā āāā āāāāāā āāāāāāāāāāāāāāāāāāāāāāāā āāā āāā āāāāāā
|
|
125
|
+
āāāāāāāāāāāāāāāāāāāāāāāāāāāāā āāāāāāāāā āāāāāāāāāāāā āāā āāāāāāāāāāā
|
|
126
|
+
āāāāāāā āāāāāāā āāāāāāā āāā āāāāāāāā āāā āāāāāāā āāā āāā āāāāāāā
|
|
127
|
+
"""
|
|
128
|
+
console.print(ascii_art, style="bold green")
|
|
129
|
+
|
|
130
|
+
# Handle Sentinel objects from Click
|
|
131
|
+
if not isinstance(provider, str):
|
|
132
|
+
provider = None
|
|
133
|
+
if not isinstance(model, str):
|
|
134
|
+
model = None
|
|
135
|
+
if not isinstance(project_path, (str, type(None))):
|
|
136
|
+
project_path = None
|
|
137
|
+
if not isinstance(session, str):
|
|
138
|
+
session = None
|
|
139
|
+
|
|
140
|
+
config_manager = ConfigManager()
|
|
141
|
+
memory_manager = MemoryManager()
|
|
142
|
+
|
|
143
|
+
# Check if any providers are configured
|
|
144
|
+
available_providers = config_manager.list_providers()
|
|
145
|
+
if not available_providers:
|
|
146
|
+
console.print("š§ No API keys configured. Let's set up Cognautic CLI first!", style="yellow")
|
|
147
|
+
config_manager.interactive_setup()
|
|
148
|
+
available_providers = config_manager.list_providers()
|
|
149
|
+
if not available_providers:
|
|
150
|
+
console.print("ā Setup cancelled. Cannot start chat without API keys.", style="red")
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
# Handle session loading
|
|
154
|
+
if session:
|
|
155
|
+
if memory_manager.load_session(session):
|
|
156
|
+
current_session = memory_manager.get_current_session()
|
|
157
|
+
if not provider:
|
|
158
|
+
provider = current_session.provider
|
|
159
|
+
if not model:
|
|
160
|
+
model = current_session.model
|
|
161
|
+
if not project_path:
|
|
162
|
+
project_path = current_session.workspace
|
|
163
|
+
else:
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
# Use provided provider or last used provider or default
|
|
167
|
+
if not provider or not isinstance(provider, str):
|
|
168
|
+
provider = config_manager.get_config_value('last_provider') or config_manager.get_config_value('default_provider') or available_providers[0]
|
|
169
|
+
|
|
170
|
+
if not config_manager.has_api_key(provider):
|
|
171
|
+
console.print(f"ā No API key found for {provider}. Available providers: {', '.join(available_providers)}", style="red")
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
# Load saved model for the provider if no model was specified
|
|
175
|
+
if not model or not isinstance(model, str):
|
|
176
|
+
saved_model = config_manager.get_provider_model(provider)
|
|
177
|
+
if saved_model:
|
|
178
|
+
model = saved_model
|
|
179
|
+
|
|
180
|
+
ai_engine = AIEngine(config_manager)
|
|
181
|
+
|
|
182
|
+
# Start WebSocket server in background
|
|
183
|
+
websocket_server = WebSocketServer(ai_engine, port=websocket_port)
|
|
184
|
+
|
|
185
|
+
async def run_chat():
|
|
186
|
+
global stop_response
|
|
187
|
+
|
|
188
|
+
# Set up Ctrl+X signal handler (SIGQUIT)
|
|
189
|
+
signal.signal(signal.SIGQUIT, signal_handler)
|
|
190
|
+
|
|
191
|
+
# Start WebSocket server
|
|
192
|
+
server_task = asyncio.create_task(websocket_server.start())
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
console.print("š” Type '/help' for commands, 'exit' to quit")
|
|
196
|
+
if project_path:
|
|
197
|
+
console.print(f"š Working in: {project_path}")
|
|
198
|
+
|
|
199
|
+
# Only show session info if continuing an existing session
|
|
200
|
+
if memory_manager.get_current_session():
|
|
201
|
+
current_session = memory_manager.get_current_session()
|
|
202
|
+
console.print(f"š Continuing session: {current_session.session_id} - {current_session.title}")
|
|
203
|
+
# Show recent conversation history
|
|
204
|
+
history = memory_manager.get_conversation_history(limit=3)
|
|
205
|
+
if history:
|
|
206
|
+
console.print("\n[dim]Recent conversation:[/dim]")
|
|
207
|
+
for msg in history[-3:]:
|
|
208
|
+
role_color = "cyan" if msg.role == "user" else "magenta"
|
|
209
|
+
console.print(f"[{role_color}]{msg.role.title()}:[/{role_color}] {msg.content[:100]}{'...' if len(msg.content) > 100 else ''}")
|
|
210
|
+
|
|
211
|
+
console.print("-" * 50)
|
|
212
|
+
|
|
213
|
+
# Set workspace - use provided project_path or current working directory
|
|
214
|
+
current_workspace = project_path or os.getcwd()
|
|
215
|
+
# Load saved provider/model from config if not specified
|
|
216
|
+
saved_provider = config_manager.get_config_value('last_provider')
|
|
217
|
+
saved_model = config_manager.get_config_value('last_model')
|
|
218
|
+
|
|
219
|
+
current_model = model or saved_model # Track current model in this scope
|
|
220
|
+
current_provider = provider or saved_provider or 'openai' # Track current provider in this scope
|
|
221
|
+
session_created = False # Track if session has been created
|
|
222
|
+
|
|
223
|
+
# Store the original working directory where cognautic was run
|
|
224
|
+
original_cwd = os.getcwd()
|
|
225
|
+
|
|
226
|
+
# Setup readline for command history and arrow keys
|
|
227
|
+
history_file = Path.home() / ".cognautic" / ".chat_history"
|
|
228
|
+
history_file.parent.mkdir(parents=True, exist_ok=True)
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
readline.read_history_file(str(history_file))
|
|
232
|
+
except FileNotFoundError:
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
# Set history length
|
|
236
|
+
readline.set_history_length(1000)
|
|
237
|
+
|
|
238
|
+
# Show current workspace
|
|
239
|
+
console.print(f"š Workspace: {current_workspace}")
|
|
240
|
+
|
|
241
|
+
while True:
|
|
242
|
+
try:
|
|
243
|
+
# Show current workspace in prompt
|
|
244
|
+
workspace_info = f" [{Path(current_workspace).name}]" if current_workspace else ""
|
|
245
|
+
|
|
246
|
+
# Use readline delimiters for non-printing characters to fix text reordering
|
|
247
|
+
# \001 and \002 tell readline that the enclosed characters don't take up space
|
|
248
|
+
# This prevents cursor position issues when editing
|
|
249
|
+
# ANSI color codes: \033[1;36m = bold cyan, \033[0m = reset
|
|
250
|
+
prompt = f"\001\033[1;36m\002You{workspace_info}:\001\033[0m\002 "
|
|
251
|
+
user_input = input(prompt)
|
|
252
|
+
|
|
253
|
+
if user_input.lower() in ['exit', 'quit']:
|
|
254
|
+
break
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
# Handle slash commands
|
|
258
|
+
if user_input.startswith('/'):
|
|
259
|
+
# Create context dict for slash commands
|
|
260
|
+
context = {
|
|
261
|
+
'current_workspace': current_workspace,
|
|
262
|
+
'model': current_model,
|
|
263
|
+
'provider': current_provider,
|
|
264
|
+
'memory_manager': memory_manager,
|
|
265
|
+
'original_cwd': original_cwd,
|
|
266
|
+
'config_manager': config_manager
|
|
267
|
+
}
|
|
268
|
+
result = await handle_slash_command(user_input, config_manager, ai_engine, context)
|
|
269
|
+
if result:
|
|
270
|
+
# Update local variables from context
|
|
271
|
+
old_workspace = current_workspace
|
|
272
|
+
current_workspace = context.get('current_workspace', current_workspace)
|
|
273
|
+
current_model = context.get('model', current_model)
|
|
274
|
+
current_provider = context.get('provider', current_provider)
|
|
275
|
+
|
|
276
|
+
# If workspace changed, update the session
|
|
277
|
+
if old_workspace != current_workspace and memory_manager.get_current_session():
|
|
278
|
+
memory_manager.update_session_info(workspace=current_workspace)
|
|
279
|
+
|
|
280
|
+
continue
|
|
281
|
+
else:
|
|
282
|
+
break
|
|
283
|
+
|
|
284
|
+
# Create session only when user sends first actual message (not slash command)
|
|
285
|
+
if not session_created and not memory_manager.get_current_session():
|
|
286
|
+
session_id = memory_manager.create_session(
|
|
287
|
+
provider=current_provider,
|
|
288
|
+
model=current_model,
|
|
289
|
+
workspace=current_workspace
|
|
290
|
+
)
|
|
291
|
+
session_created = True
|
|
292
|
+
|
|
293
|
+
# Add user message to memory
|
|
294
|
+
memory_manager.add_message("user", user_input)
|
|
295
|
+
|
|
296
|
+
# Update session title if this is the first message
|
|
297
|
+
current_session = memory_manager.get_current_session()
|
|
298
|
+
if current_session and current_session.message_count == 1:
|
|
299
|
+
title = memory_manager.generate_session_title(user_input)
|
|
300
|
+
memory_manager.update_session_info(title=title)
|
|
301
|
+
|
|
302
|
+
# Process user input with AI (including conversation history)
|
|
303
|
+
console.print(f"[dim]Processing with {current_provider}, model: {current_model or 'default'}...[/dim]")
|
|
304
|
+
|
|
305
|
+
# Get conversation history for context
|
|
306
|
+
conversation_history = memory_manager.get_context_for_ai(limit=10)
|
|
307
|
+
|
|
308
|
+
# Get typing speed from config (default: fast)
|
|
309
|
+
typing_speed = config_manager.get_config_value('typing_speed') or 'fast'
|
|
310
|
+
speed_map = {
|
|
311
|
+
'instant': 0,
|
|
312
|
+
'fast': 0.001,
|
|
313
|
+
'normal': 0.005,
|
|
314
|
+
'slow': 0.01
|
|
315
|
+
}
|
|
316
|
+
typing_delay = speed_map.get(typing_speed, 0.001) if isinstance(typing_speed, str) else float(typing_speed)
|
|
317
|
+
|
|
318
|
+
# Stream the response character by character
|
|
319
|
+
console.print("[bold magenta]AI:[/bold magenta] ", end="")
|
|
320
|
+
full_response = ""
|
|
321
|
+
response_stopped = False
|
|
322
|
+
stop_response = False # Reset stop flag
|
|
323
|
+
|
|
324
|
+
try:
|
|
325
|
+
async for chunk in ai_engine.process_message_stream(
|
|
326
|
+
user_input,
|
|
327
|
+
provider=current_provider,
|
|
328
|
+
model=current_model,
|
|
329
|
+
project_path=current_workspace,
|
|
330
|
+
conversation_history=conversation_history
|
|
331
|
+
):
|
|
332
|
+
# Check if stop was requested
|
|
333
|
+
if stop_response:
|
|
334
|
+
response_stopped = True
|
|
335
|
+
console.print("\n\n[yellow]āøļø AI response stopped by user (Ctrl+X)[/yellow]")
|
|
336
|
+
break
|
|
337
|
+
|
|
338
|
+
# Display character by character for typewriter effect
|
|
339
|
+
for char in chunk:
|
|
340
|
+
# Check stop flag during character display too
|
|
341
|
+
if stop_response:
|
|
342
|
+
response_stopped = True
|
|
343
|
+
console.print("\n\n[yellow]āøļø AI response stopped by user (Ctrl+X)[/yellow]")
|
|
344
|
+
break
|
|
345
|
+
console.print(char, end="")
|
|
346
|
+
full_response += char
|
|
347
|
+
if typing_delay > 0:
|
|
348
|
+
await asyncio.sleep(typing_delay)
|
|
349
|
+
|
|
350
|
+
if response_stopped:
|
|
351
|
+
break
|
|
352
|
+
except KeyboardInterrupt:
|
|
353
|
+
# Ctrl+C pressed - exit chat
|
|
354
|
+
break
|
|
355
|
+
|
|
356
|
+
console.print() # New line after streaming
|
|
357
|
+
|
|
358
|
+
# Add AI response to memory (even if stopped)
|
|
359
|
+
if full_response:
|
|
360
|
+
memory_manager.add_message("assistant", full_response)
|
|
361
|
+
|
|
362
|
+
# Don't break - continue to next prompt
|
|
363
|
+
|
|
364
|
+
except KeyboardInterrupt:
|
|
365
|
+
# Ctrl+C pressed while waiting for user input - exit chat
|
|
366
|
+
break
|
|
367
|
+
except Exception as e:
|
|
368
|
+
console.print(f"ā Error: {str(e)}", style="red")
|
|
369
|
+
|
|
370
|
+
finally:
|
|
371
|
+
# Save command history
|
|
372
|
+
try:
|
|
373
|
+
readline.write_history_file(str(history_file))
|
|
374
|
+
except Exception:
|
|
375
|
+
pass
|
|
376
|
+
|
|
377
|
+
server_task.cancel()
|
|
378
|
+
console.print("š Chat session ended")
|
|
379
|
+
|
|
380
|
+
asyncio.run(run_chat())
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
@main.command()
|
|
384
|
+
@click.argument('action')
|
|
385
|
+
@click.argument('key', required=False)
|
|
386
|
+
@click.argument('value', required=False)
|
|
387
|
+
def config(action, key, value):
|
|
388
|
+
"""Manage configuration and API keys"""
|
|
389
|
+
config_manager = ConfigManager()
|
|
390
|
+
|
|
391
|
+
if action == 'list':
|
|
392
|
+
config_data = config_manager.get_config()
|
|
393
|
+
console.print_json(data=config_data)
|
|
394
|
+
elif action == 'set' and key and value:
|
|
395
|
+
config_manager.set_config(key, value)
|
|
396
|
+
console.print(f"ā
Set {key} = {value}", style="green")
|
|
397
|
+
elif action == 'get' and key:
|
|
398
|
+
value = config_manager.get_config_value(key)
|
|
399
|
+
console.print(f"{key} = {value}")
|
|
400
|
+
elif action == 'delete' and key:
|
|
401
|
+
config_manager.delete_config(key)
|
|
402
|
+
console.print(f"ā
Deleted {key}", style="green")
|
|
403
|
+
elif action == 'reset':
|
|
404
|
+
config_manager.reset_config()
|
|
405
|
+
console.print("ā
Configuration reset to defaults", style="green")
|
|
406
|
+
else:
|
|
407
|
+
console.print("ā Invalid config action. Use: list, set <key> <value>, get <key>, delete <key>, reset", style="red")
|
|
408
|
+
|
|
409
|
+
@main.command()
|
|
410
|
+
def providers():
|
|
411
|
+
"""List all available AI providers and their endpoints"""
|
|
412
|
+
from .provider_endpoints import PROVIDER_ENDPOINTS, get_all_providers
|
|
413
|
+
|
|
414
|
+
console.print(Panel.fit("š¤ Available AI Providers & Endpoints", style="bold blue"))
|
|
415
|
+
|
|
416
|
+
for provider_name in get_all_providers():
|
|
417
|
+
config = PROVIDER_ENDPOINTS[provider_name]
|
|
418
|
+
|
|
419
|
+
console.print(f"\n[bold cyan]{provider_name.upper()}[/bold cyan]")
|
|
420
|
+
console.print(f"Base URL: [green]{config['base_url']}[/green]")
|
|
421
|
+
|
|
422
|
+
# List endpoints
|
|
423
|
+
endpoints = {k: v for k, v in config.items() if k.endswith('_endpoint')}
|
|
424
|
+
if endpoints:
|
|
425
|
+
console.print("Endpoints:")
|
|
426
|
+
for endpoint_name, endpoint_path in endpoints.items():
|
|
427
|
+
clean_name = endpoint_name.replace('_endpoint', '').replace('_', ' ').title()
|
|
428
|
+
console.print(f" ⢠{clean_name}: [yellow]{endpoint_path}[/yellow]")
|
|
429
|
+
|
|
430
|
+
# Show auth method
|
|
431
|
+
if 'headers' in config:
|
|
432
|
+
auth_header = None
|
|
433
|
+
for key, value in config['headers'].items():
|
|
434
|
+
if 'api_key' in value.lower() or 'authorization' in key.lower():
|
|
435
|
+
auth_header = f"{key}: {value}"
|
|
436
|
+
break
|
|
437
|
+
if auth_header:
|
|
438
|
+
console.print(f"Auth: [dim]{auth_header}[/dim]")
|
|
439
|
+
|
|
440
|
+
if 'auth_param' in config:
|
|
441
|
+
console.print(f"Auth: [dim]URL parameter: {config['auth_param']}[/dim]")
|
|
442
|
+
|
|
443
|
+
@main.command()
|
|
444
|
+
@click.argument('action', required=False)
|
|
445
|
+
@click.argument('rule_type', required=False)
|
|
446
|
+
@click.argument('args', nargs=-1, required=False)
|
|
447
|
+
@click.option('--workspace', '-w', help='Workspace path for workspace rules')
|
|
448
|
+
def rules(action, rule_type, args, workspace):
|
|
449
|
+
"""Manage global and workspace rules for AI behavior
|
|
450
|
+
|
|
451
|
+
Examples:
|
|
452
|
+
cognautic rules # List all rules
|
|
453
|
+
cognautic rules add global "Use type hints" # Add global rule
|
|
454
|
+
cognautic rules add workspace "Follow PEP 8" -w . # Add workspace rule
|
|
455
|
+
cognautic rules remove global 0 # Remove global rule by index
|
|
456
|
+
cognautic rules clear workspace -w . # Clear workspace rules
|
|
457
|
+
"""
|
|
458
|
+
rules_manager = RulesManager()
|
|
459
|
+
|
|
460
|
+
if not action:
|
|
461
|
+
# Display all rules
|
|
462
|
+
rules_manager.display_rules(workspace)
|
|
463
|
+
console.print("\nš” Usage:")
|
|
464
|
+
console.print(" cognautic rules add global <rule> [description]")
|
|
465
|
+
console.print(" cognautic rules add workspace <rule> [description] -w <path>")
|
|
466
|
+
console.print(" cognautic rules remove global <index>")
|
|
467
|
+
console.print(" cognautic rules remove workspace <index> -w <path>")
|
|
468
|
+
console.print(" cognautic rules clear global")
|
|
469
|
+
console.print(" cognautic rules clear workspace -w <path>")
|
|
470
|
+
return
|
|
471
|
+
|
|
472
|
+
if action == "add":
|
|
473
|
+
if not rule_type or not args:
|
|
474
|
+
console.print("ā Usage: cognautic rules add <global|workspace> <rule> [description]", style="red")
|
|
475
|
+
return
|
|
476
|
+
|
|
477
|
+
# Join all args as the rule text
|
|
478
|
+
rule_text = " ".join(args)
|
|
479
|
+
|
|
480
|
+
if rule_type == "global":
|
|
481
|
+
rules_manager.add_global_rule(rule_text)
|
|
482
|
+
elif rule_type == "workspace":
|
|
483
|
+
if not workspace:
|
|
484
|
+
workspace = os.getcwd()
|
|
485
|
+
rules_manager.add_workspace_rule(rule_text, workspace_path=workspace)
|
|
486
|
+
else:
|
|
487
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
488
|
+
|
|
489
|
+
elif action == "remove":
|
|
490
|
+
if not rule_type or not args:
|
|
491
|
+
console.print("ā Usage: cognautic rules remove <global|workspace> <index>", style="red")
|
|
492
|
+
return
|
|
493
|
+
|
|
494
|
+
try:
|
|
495
|
+
index = int(args[0])
|
|
496
|
+
if rule_type == "global":
|
|
497
|
+
rules_manager.remove_global_rule(index)
|
|
498
|
+
elif rule_type == "workspace":
|
|
499
|
+
if not workspace:
|
|
500
|
+
workspace = os.getcwd()
|
|
501
|
+
rules_manager.remove_workspace_rule(index, workspace)
|
|
502
|
+
else:
|
|
503
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
504
|
+
except (ValueError, IndexError):
|
|
505
|
+
console.print("ā Index must be a valid number", style="red")
|
|
506
|
+
|
|
507
|
+
elif action == "clear":
|
|
508
|
+
if not rule_type:
|
|
509
|
+
console.print("ā Usage: cognautic rules clear <global|workspace>", style="red")
|
|
510
|
+
return
|
|
511
|
+
|
|
512
|
+
if rule_type == "global":
|
|
513
|
+
rules_manager.clear_global_rules()
|
|
514
|
+
elif rule_type == "workspace":
|
|
515
|
+
if not workspace:
|
|
516
|
+
workspace = os.getcwd()
|
|
517
|
+
rules_manager.clear_workspace_rules(workspace)
|
|
518
|
+
else:
|
|
519
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
520
|
+
|
|
521
|
+
else:
|
|
522
|
+
console.print(f"ā Unknown action: {action}", style="red")
|
|
523
|
+
console.print("Valid actions: add, remove, clear")
|
|
524
|
+
|
|
525
|
+
async def handle_slash_command(command, config_manager, ai_engine, context):
|
|
526
|
+
"""Handle slash commands in chat mode"""
|
|
527
|
+
parts = command[1:].split()
|
|
528
|
+
cmd = parts[0].lower() if parts else ""
|
|
529
|
+
|
|
530
|
+
if cmd == "help":
|
|
531
|
+
show_help()
|
|
532
|
+
return True
|
|
533
|
+
|
|
534
|
+
elif cmd == "workspace" or cmd == "ws":
|
|
535
|
+
if len(parts) < 2:
|
|
536
|
+
current = context.get('current_workspace')
|
|
537
|
+
if current:
|
|
538
|
+
console.print(f"š Current workspace: {current}")
|
|
539
|
+
else:
|
|
540
|
+
console.print("š No workspace set")
|
|
541
|
+
console.print("Usage: /workspace <path> or /ws <path>")
|
|
542
|
+
else:
|
|
543
|
+
path_input = parts[1]
|
|
544
|
+
|
|
545
|
+
# First expand user home directory
|
|
546
|
+
new_path = Path(path_input).expanduser()
|
|
547
|
+
|
|
548
|
+
# Then handle relative vs absolute paths
|
|
549
|
+
if not new_path.is_absolute():
|
|
550
|
+
# Relative path from original working directory
|
|
551
|
+
original_cwd = context.get('original_cwd', os.getcwd())
|
|
552
|
+
new_path = Path(original_cwd) / new_path
|
|
553
|
+
|
|
554
|
+
# Resolve to absolute path
|
|
555
|
+
new_path = new_path.resolve()
|
|
556
|
+
|
|
557
|
+
if new_path.exists() and new_path.is_dir():
|
|
558
|
+
context['current_workspace'] = str(new_path)
|
|
559
|
+
console.print(f"ā
Workspace changed to: {new_path}")
|
|
560
|
+
console.print(f"š” AI will now create files in this directory")
|
|
561
|
+
else:
|
|
562
|
+
console.print(f"ā Directory not found: {new_path}", style="red")
|
|
563
|
+
return True
|
|
564
|
+
|
|
565
|
+
elif cmd == "setup":
|
|
566
|
+
console.print("š§ Running setup wizard...")
|
|
567
|
+
config_manager.interactive_setup()
|
|
568
|
+
return True
|
|
569
|
+
|
|
570
|
+
elif cmd == "config":
|
|
571
|
+
if len(parts) < 2:
|
|
572
|
+
console.print("Available config commands:")
|
|
573
|
+
console.print("⢠/config list - Show current configuration")
|
|
574
|
+
console.print("⢠/config providers - Show available providers")
|
|
575
|
+
console.print("⢠/config set <key> <value> - Set configuration value")
|
|
576
|
+
elif parts[1] == "list":
|
|
577
|
+
config_data = config_manager.get_config()
|
|
578
|
+
console.print_json(data=config_data)
|
|
579
|
+
elif parts[1] == "providers":
|
|
580
|
+
providers = config_manager.list_providers()
|
|
581
|
+
console.print(f"Available providers: {', '.join(providers) if providers else 'None'}")
|
|
582
|
+
elif parts[1] == "set" and len(parts) >= 4:
|
|
583
|
+
config_manager.set_config(parts[2], parts[3])
|
|
584
|
+
console.print(f"ā
Set {parts[2]} = {parts[3]}")
|
|
585
|
+
return True
|
|
586
|
+
|
|
587
|
+
elif cmd == "provider":
|
|
588
|
+
if len(parts) < 2:
|
|
589
|
+
current_provider = context.get('provider')
|
|
590
|
+
console.print(f"Current provider: {current_provider}")
|
|
591
|
+
providers = config_manager.list_providers()
|
|
592
|
+
# Add local provider if available
|
|
593
|
+
if 'local' in ai_engine.providers:
|
|
594
|
+
providers.append('local')
|
|
595
|
+
console.print(f"Available providers: {', '.join(providers)}")
|
|
596
|
+
console.print("Usage: /provider <provider_name>")
|
|
597
|
+
else:
|
|
598
|
+
new_provider = parts[1]
|
|
599
|
+
# Check if it's the local provider or has an API key
|
|
600
|
+
if new_provider == 'local':
|
|
601
|
+
# Check if local model is configured
|
|
602
|
+
local_model_path = config_manager.get_config_value('local_model_path')
|
|
603
|
+
if local_model_path:
|
|
604
|
+
# Load the local model if not already loaded
|
|
605
|
+
if 'local' not in ai_engine.providers:
|
|
606
|
+
try:
|
|
607
|
+
console.print(f"š Loading local model from: {local_model_path}")
|
|
608
|
+
ai_engine.load_local_model(local_model_path)
|
|
609
|
+
console.print("ā
Local model loaded successfully!")
|
|
610
|
+
except Exception as e:
|
|
611
|
+
console.print(f"ā Error loading local model: {e}", style="red")
|
|
612
|
+
return True
|
|
613
|
+
|
|
614
|
+
context['provider'] = new_provider
|
|
615
|
+
current_provider = new_provider # Update current provider
|
|
616
|
+
# Save the provider choice
|
|
617
|
+
config_manager.set_config('last_provider', new_provider)
|
|
618
|
+
# Load saved model for this provider
|
|
619
|
+
saved_model = config_manager.get_provider_model(new_provider)
|
|
620
|
+
context['model'] = saved_model
|
|
621
|
+
current_model = saved_model # Update current model
|
|
622
|
+
console.print(f"ā
Switched to provider: {new_provider}")
|
|
623
|
+
if saved_model:
|
|
624
|
+
console.print(f"š Using saved model: {saved_model}")
|
|
625
|
+
else:
|
|
626
|
+
console.print("ā No local model configured", style="red")
|
|
627
|
+
console.print("š” Use /lmodel <path> to load a local model first", style="yellow")
|
|
628
|
+
elif config_manager.has_api_key(new_provider):
|
|
629
|
+
context['provider'] = new_provider
|
|
630
|
+
current_provider = new_provider # Update current provider
|
|
631
|
+
# Save the provider choice
|
|
632
|
+
config_manager.set_config('last_provider', new_provider)
|
|
633
|
+
# Load saved model for this provider
|
|
634
|
+
saved_model = config_manager.get_provider_model(new_provider)
|
|
635
|
+
context['model'] = saved_model
|
|
636
|
+
current_model = saved_model # Update current model
|
|
637
|
+
console.print(f"ā
Switched to provider: {new_provider}")
|
|
638
|
+
if saved_model:
|
|
639
|
+
console.print(f"š Using saved model: {saved_model}")
|
|
640
|
+
else:
|
|
641
|
+
console.print(f"ā Provider {new_provider} not configured", style="red")
|
|
642
|
+
if new_provider == 'local':
|
|
643
|
+
console.print("š” Use /lmodel <path> to load a local model first", style="yellow")
|
|
644
|
+
return True
|
|
645
|
+
|
|
646
|
+
elif cmd == "model" or cmd == "models":
|
|
647
|
+
current_provider = context.get('provider')
|
|
648
|
+
if not current_provider:
|
|
649
|
+
console.print("ā No provider selected", style="red")
|
|
650
|
+
return True
|
|
651
|
+
|
|
652
|
+
# Check if user wants to list models (fetch from API)
|
|
653
|
+
if len(parts) >= 2 and parts[1] == "list":
|
|
654
|
+
console.print(f"š Fetching available models from {current_provider}...")
|
|
655
|
+
|
|
656
|
+
try:
|
|
657
|
+
# Import API client
|
|
658
|
+
from .provider_endpoints import GenericAPIClient, get_provider_config
|
|
659
|
+
|
|
660
|
+
# Get API key
|
|
661
|
+
config_manager = context.get('config_manager')
|
|
662
|
+
api_key = config_manager.get_api_key(current_provider)
|
|
663
|
+
|
|
664
|
+
if not api_key:
|
|
665
|
+
console.print(f"ā No API key configured for {current_provider}", style="red")
|
|
666
|
+
console.print(f"š” Run /setup to configure your API key")
|
|
667
|
+
return True
|
|
668
|
+
|
|
669
|
+
# Create API client and fetch models
|
|
670
|
+
client = GenericAPIClient(current_provider, api_key)
|
|
671
|
+
|
|
672
|
+
# Check if provider has models endpoint
|
|
673
|
+
provider_config = get_provider_config(current_provider)
|
|
674
|
+
if 'models_endpoint' not in provider_config:
|
|
675
|
+
console.print(f"ā¹ļø {current_provider} doesn't provide a models API endpoint")
|
|
676
|
+
console.print(f"š” Check the provider's documentation for available models:")
|
|
677
|
+
|
|
678
|
+
# Show documentation links
|
|
679
|
+
docs = {
|
|
680
|
+
'openai': 'https://platform.openai.com/docs/models',
|
|
681
|
+
'anthropic': 'https://docs.anthropic.com/claude/docs/models-overview',
|
|
682
|
+
'google': 'https://ai.google.dev/gemini-api/docs/models/gemini',
|
|
683
|
+
'together': 'https://docs.together.ai/docs/inference-models',
|
|
684
|
+
'openrouter': 'https://openrouter.ai/models',
|
|
685
|
+
'groq': 'https://console.groq.com/docs/models',
|
|
686
|
+
'mistral': 'https://docs.mistral.ai/getting-started/models/',
|
|
687
|
+
'deepseek': 'https://platform.deepseek.com/api-docs/',
|
|
688
|
+
'perplexity': 'https://docs.perplexity.ai/docs/model-cards',
|
|
689
|
+
'cohere': 'https://docs.cohere.com/docs/models',
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
if current_provider in docs:
|
|
693
|
+
console.print(f" {docs[current_provider]}")
|
|
694
|
+
|
|
695
|
+
return True
|
|
696
|
+
|
|
697
|
+
# Fetch models from API
|
|
698
|
+
import asyncio
|
|
699
|
+
import nest_asyncio
|
|
700
|
+
|
|
701
|
+
# Allow nested event loops
|
|
702
|
+
try:
|
|
703
|
+
nest_asyncio.apply()
|
|
704
|
+
except:
|
|
705
|
+
pass
|
|
706
|
+
|
|
707
|
+
# Try to get models
|
|
708
|
+
try:
|
|
709
|
+
result = asyncio.run(client.list_models())
|
|
710
|
+
except RuntimeError:
|
|
711
|
+
# If we're in an event loop, use a workaround
|
|
712
|
+
import concurrent.futures
|
|
713
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
714
|
+
future = executor.submit(asyncio.run, client.list_models())
|
|
715
|
+
result = future.result(timeout=10)
|
|
716
|
+
|
|
717
|
+
if 'data' in result:
|
|
718
|
+
models = result['data']
|
|
719
|
+
console.print(f"\n[bold cyan]Available models for {current_provider}:[/bold cyan]")
|
|
720
|
+
for i, model in enumerate(models[:20], 1): # Show first 20
|
|
721
|
+
model_id = model.get('id') or model.get('name') or str(model)
|
|
722
|
+
console.print(f" {i}. {model_id}")
|
|
723
|
+
|
|
724
|
+
if len(models) > 20:
|
|
725
|
+
console.print(f"\n... and {len(models) - 20} more models")
|
|
726
|
+
|
|
727
|
+
console.print(f"\nš” Use: /model <model_name> to switch")
|
|
728
|
+
elif 'models' in result:
|
|
729
|
+
models = result['models']
|
|
730
|
+
console.print(f"\n[bold cyan]Available models for {current_provider}:[/bold cyan]")
|
|
731
|
+
for i, model in enumerate(models[:20], 1):
|
|
732
|
+
model_name = model.get('name', '').replace('models/', '')
|
|
733
|
+
console.print(f" {i}. {model_name}")
|
|
734
|
+
|
|
735
|
+
if len(models) > 20:
|
|
736
|
+
console.print(f"\n... and {len(models) - 20} more models")
|
|
737
|
+
|
|
738
|
+
console.print(f"\nš” Use: /model <model_name> to switch")
|
|
739
|
+
else:
|
|
740
|
+
console.print(f"ā ļø Unexpected response format from {current_provider}")
|
|
741
|
+
console.print(f"š” Check the provider's documentation for available models")
|
|
742
|
+
|
|
743
|
+
except Exception as e:
|
|
744
|
+
console.print(f"ā Error fetching models: {str(e)}", style="red")
|
|
745
|
+
console.print(f"š” Check the provider's documentation for available models")
|
|
746
|
+
|
|
747
|
+
elif len(parts) < 2:
|
|
748
|
+
# Show current model and hint
|
|
749
|
+
current_model = context.get('model')
|
|
750
|
+
console.print(f"š Current model: {current_model or 'default'}")
|
|
751
|
+
console.print(f"š Provider: {current_provider}")
|
|
752
|
+
console.print(f"\nš” Use: /model list - to fetch available models from API")
|
|
753
|
+
console.print(f"š” Use: /model <model_name> - to switch model")
|
|
754
|
+
else:
|
|
755
|
+
# Switch to new model
|
|
756
|
+
new_model = parts[1]
|
|
757
|
+
context['model'] = new_model
|
|
758
|
+
|
|
759
|
+
# Save the model preference for the current provider and globally
|
|
760
|
+
current_provider = context.get('provider')
|
|
761
|
+
config_manager = context.get('config_manager')
|
|
762
|
+
if config_manager:
|
|
763
|
+
# Save as last used model globally
|
|
764
|
+
config_manager.set_config('last_model', new_model)
|
|
765
|
+
# Save as default for this provider
|
|
766
|
+
if current_provider:
|
|
767
|
+
config_manager.set_provider_model(current_provider, new_model)
|
|
768
|
+
console.print(f"ā
Switched to model: {new_model}")
|
|
769
|
+
console.print(f"š¾ Model saved as default for provider: {current_provider}")
|
|
770
|
+
else:
|
|
771
|
+
console.print(f"ā
Switched to model: {new_model}")
|
|
772
|
+
else:
|
|
773
|
+
console.print(f"ā
Switched to model: {new_model}")
|
|
774
|
+
return True
|
|
775
|
+
|
|
776
|
+
elif cmd == "session":
|
|
777
|
+
memory_manager = context.get('memory_manager')
|
|
778
|
+
if not memory_manager:
|
|
779
|
+
console.print("ā Memory manager not available", style="red")
|
|
780
|
+
return True
|
|
781
|
+
|
|
782
|
+
if len(parts) < 2:
|
|
783
|
+
# Show current session info and list available sessions
|
|
784
|
+
current_session = memory_manager.get_current_session()
|
|
785
|
+
if current_session:
|
|
786
|
+
console.print(f"š Current session: {current_session.session_id} - {current_session.title}")
|
|
787
|
+
console.print(f" Created: {current_session.created_at}")
|
|
788
|
+
console.print(f" Messages: {current_session.message_count}")
|
|
789
|
+
console.print(f" Provider: {current_session.provider}")
|
|
790
|
+
if current_session.model:
|
|
791
|
+
console.print(f" Model: {current_session.model}")
|
|
792
|
+
else:
|
|
793
|
+
console.print("š No active session")
|
|
794
|
+
|
|
795
|
+
console.print("\nAvailable commands:")
|
|
796
|
+
console.print("⢠/session list - List all sessions")
|
|
797
|
+
console.print("⢠/session new - Create new session")
|
|
798
|
+
console.print("⢠/session load <id> - Load existing session")
|
|
799
|
+
console.print("⢠/session delete <id> - Delete session")
|
|
800
|
+
console.print("⢠/session export <id> [format] - Export session")
|
|
801
|
+
console.print("⢠/session title <new_title> - Update session title")
|
|
802
|
+
|
|
803
|
+
elif parts[1] == "list":
|
|
804
|
+
sessions = memory_manager.list_sessions()
|
|
805
|
+
if not sessions:
|
|
806
|
+
console.print("š No sessions found")
|
|
807
|
+
else:
|
|
808
|
+
console.print(f"š Found {len(sessions)} sessions:")
|
|
809
|
+
for session in sessions[:10]: # Show last 10 sessions
|
|
810
|
+
console.print(f" {session.session_id} - {session.title}")
|
|
811
|
+
console.print(f" {session.message_count} messages, {session.provider}")
|
|
812
|
+
console.print(f" Last updated: {session.last_updated}")
|
|
813
|
+
console.print()
|
|
814
|
+
|
|
815
|
+
elif parts[1] == "new":
|
|
816
|
+
# Create new session (will replace current one)
|
|
817
|
+
current_provider = context.get('provider', 'openai')
|
|
818
|
+
current_model = context.get('model')
|
|
819
|
+
current_workspace = context.get('current_workspace')
|
|
820
|
+
|
|
821
|
+
session_id = memory_manager.create_session(
|
|
822
|
+
provider=current_provider,
|
|
823
|
+
model=current_model,
|
|
824
|
+
workspace=current_workspace
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
elif parts[1] == "load" and len(parts) >= 3:
|
|
828
|
+
session_id = parts[2]
|
|
829
|
+
if memory_manager.load_session(session_id):
|
|
830
|
+
# Update context with session info
|
|
831
|
+
current_session = memory_manager.get_current_session()
|
|
832
|
+
context['provider'] = current_session.provider
|
|
833
|
+
context['model'] = current_session.model
|
|
834
|
+
context['current_workspace'] = current_session.workspace
|
|
835
|
+
|
|
836
|
+
elif parts[1] == "delete" and len(parts) >= 3:
|
|
837
|
+
session_id = parts[2]
|
|
838
|
+
memory_manager.delete_session(session_id)
|
|
839
|
+
|
|
840
|
+
elif parts[1] == "export" and len(parts) >= 3:
|
|
841
|
+
session_id = parts[2]
|
|
842
|
+
format_type = parts[3] if len(parts) >= 4 else "json"
|
|
843
|
+
memory_manager.export_session(session_id, format_type)
|
|
844
|
+
|
|
845
|
+
elif parts[1] == "title" and len(parts) >= 3:
|
|
846
|
+
new_title = " ".join(parts[2:])
|
|
847
|
+
memory_manager.update_session_info(title=new_title)
|
|
848
|
+
console.print(f"ā
Updated session title to: {new_title}")
|
|
849
|
+
|
|
850
|
+
else:
|
|
851
|
+
console.print("ā Invalid session command. Use /session for help.", style="red")
|
|
852
|
+
|
|
853
|
+
return True
|
|
854
|
+
|
|
855
|
+
elif cmd == "lmodel":
|
|
856
|
+
if len(parts) < 2:
|
|
857
|
+
# Show current local model status
|
|
858
|
+
local_model_path = config_manager.get_config_value('local_model_path')
|
|
859
|
+
if local_model_path:
|
|
860
|
+
console.print(f"š¤ Current local model: {local_model_path}")
|
|
861
|
+
console.print("š” Use: /lmodel <path> - to load a different model")
|
|
862
|
+
console.print("š” Use: /lmodel unload - to unload the current model")
|
|
863
|
+
console.print("š” Use: /provider local - to switch to local model")
|
|
864
|
+
else:
|
|
865
|
+
console.print("š¤ No local model loaded")
|
|
866
|
+
console.print("\nUsage: /lmodel <model_path>")
|
|
867
|
+
console.print("\nExamples:")
|
|
868
|
+
console.print(" /lmodel ~/models/llama-2-7b")
|
|
869
|
+
console.print(" /lmodel /path/to/huggingface/model")
|
|
870
|
+
console.print(" /lmodel microsoft/phi-2")
|
|
871
|
+
console.print("\nš” You can use:")
|
|
872
|
+
console.print(" ⢠Local path to a downloaded model")
|
|
873
|
+
console.print(" ⢠Hugging Face model ID (will download if needed)")
|
|
874
|
+
elif parts[1] == "unload":
|
|
875
|
+
# Unload the local model
|
|
876
|
+
try:
|
|
877
|
+
ai_engine.unload_local_model()
|
|
878
|
+
console.print("ā
Local model unloaded successfully")
|
|
879
|
+
# If current provider is local, switch to default
|
|
880
|
+
if context.get('provider') == 'local':
|
|
881
|
+
available_providers = config_manager.list_providers()
|
|
882
|
+
if available_providers:
|
|
883
|
+
context['provider'] = available_providers[0]
|
|
884
|
+
console.print(f"ā
Switched to provider: {available_providers[0]}")
|
|
885
|
+
except Exception as e:
|
|
886
|
+
console.print(f"ā Error unloading model: {e}", style="red")
|
|
887
|
+
else:
|
|
888
|
+
# Load a new local model
|
|
889
|
+
model_path = " ".join(parts[1:])
|
|
890
|
+
console.print(f"š Loading local model from: {model_path}")
|
|
891
|
+
console.print("ā³ This may take a few minutes depending on model size...")
|
|
892
|
+
|
|
893
|
+
try:
|
|
894
|
+
ai_engine.load_local_model(model_path)
|
|
895
|
+
console.print("ā
Local model loaded successfully!")
|
|
896
|
+
console.print("š” Use: /provider local - to switch to the local model")
|
|
897
|
+
console.print("š” Or the model will be used automatically if 'local' is your default provider")
|
|
898
|
+
except Exception as e:
|
|
899
|
+
console.print(f"ā Error loading model: {e}", style="red")
|
|
900
|
+
console.print("\nš” Make sure you have installed the required dependencies:")
|
|
901
|
+
console.print(" pip install transformers torch accelerate")
|
|
902
|
+
return True
|
|
903
|
+
|
|
904
|
+
elif cmd == "speed":
|
|
905
|
+
config_manager = context.get('config_manager') or config_manager
|
|
906
|
+
|
|
907
|
+
if len(parts) < 2:
|
|
908
|
+
# Show current speed
|
|
909
|
+
current_speed = config_manager.get_config_value('typing_speed') or 'fast'
|
|
910
|
+
console.print(f"ā” Current typing speed: {current_speed}")
|
|
911
|
+
console.print("\nAvailable speeds:")
|
|
912
|
+
console.print(" ⢠instant - No delay (immediate)")
|
|
913
|
+
console.print(" ⢠fast - 0.001s per character (~1000 chars/sec) [default]")
|
|
914
|
+
console.print(" ⢠normal - 0.005s per character (~200 chars/sec)")
|
|
915
|
+
console.print(" ⢠slow - 0.01s per character (~100 chars/sec)")
|
|
916
|
+
console.print(" ⢠<number> - Custom delay in seconds")
|
|
917
|
+
console.print("\nUsage: /speed <instant|fast|normal|slow|number>")
|
|
918
|
+
else:
|
|
919
|
+
new_speed = parts[1]
|
|
920
|
+
|
|
921
|
+
# Validate speed
|
|
922
|
+
valid_speeds = ['instant', 'fast', 'normal', 'slow']
|
|
923
|
+
try:
|
|
924
|
+
# Try to parse as number
|
|
925
|
+
float(new_speed)
|
|
926
|
+
is_valid = True
|
|
927
|
+
except ValueError:
|
|
928
|
+
is_valid = new_speed in valid_speeds
|
|
929
|
+
|
|
930
|
+
if is_valid:
|
|
931
|
+
config_manager.set_config('typing_speed', new_speed)
|
|
932
|
+
console.print(f"ā
Typing speed set to: {new_speed}")
|
|
933
|
+
else:
|
|
934
|
+
console.print(f"ā Invalid speed: {new_speed}", style="red")
|
|
935
|
+
console.print("Valid options: instant, fast, normal, slow, or a number")
|
|
936
|
+
|
|
937
|
+
return True
|
|
938
|
+
|
|
939
|
+
elif cmd == "rules" or cmd == "rule":
|
|
940
|
+
rules_manager = RulesManager()
|
|
941
|
+
current_workspace = context.get('current_workspace')
|
|
942
|
+
|
|
943
|
+
if len(parts) < 2:
|
|
944
|
+
# Display all rules
|
|
945
|
+
rules_manager.display_rules(current_workspace)
|
|
946
|
+
console.print("\nš” Commands:")
|
|
947
|
+
console.print(" /rules add global <rule> [description]")
|
|
948
|
+
console.print(" /rules add workspace <rule> [description]")
|
|
949
|
+
console.print(" /rules remove global <index>")
|
|
950
|
+
console.print(" /rules remove workspace <index>")
|
|
951
|
+
console.print(" /rules clear global")
|
|
952
|
+
console.print(" /rules clear workspace")
|
|
953
|
+
|
|
954
|
+
elif parts[1] == "add":
|
|
955
|
+
if len(parts) < 4:
|
|
956
|
+
console.print("ā Usage: /rules add <global|workspace> <rule> [description]", style="red")
|
|
957
|
+
else:
|
|
958
|
+
rule_type = parts[2].lower()
|
|
959
|
+
# Find where description starts (after the rule text)
|
|
960
|
+
rule_parts = []
|
|
961
|
+
description_parts = []
|
|
962
|
+
in_description = False
|
|
963
|
+
|
|
964
|
+
for i, part in enumerate(parts[3:], 3):
|
|
965
|
+
if part.startswith('[') and not in_description:
|
|
966
|
+
in_description = True
|
|
967
|
+
description_parts.append(part[1:])
|
|
968
|
+
elif in_description:
|
|
969
|
+
if part.endswith(']'):
|
|
970
|
+
description_parts.append(part[:-1])
|
|
971
|
+
break
|
|
972
|
+
else:
|
|
973
|
+
description_parts.append(part)
|
|
974
|
+
else:
|
|
975
|
+
rule_parts.append(part)
|
|
976
|
+
|
|
977
|
+
rule = " ".join(rule_parts)
|
|
978
|
+
description = " ".join(description_parts) if description_parts else ""
|
|
979
|
+
|
|
980
|
+
if rule_type == "global":
|
|
981
|
+
rules_manager.add_global_rule(rule, description)
|
|
982
|
+
elif rule_type == "workspace":
|
|
983
|
+
rules_manager.add_workspace_rule(rule, description, current_workspace)
|
|
984
|
+
else:
|
|
985
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
986
|
+
|
|
987
|
+
elif parts[1] == "remove":
|
|
988
|
+
if len(parts) < 4:
|
|
989
|
+
console.print("ā Usage: /rules remove <global|workspace> <index>", style="red")
|
|
990
|
+
else:
|
|
991
|
+
rule_type = parts[2].lower()
|
|
992
|
+
try:
|
|
993
|
+
index = int(parts[3])
|
|
994
|
+
if rule_type == "global":
|
|
995
|
+
rules_manager.remove_global_rule(index)
|
|
996
|
+
elif rule_type == "workspace":
|
|
997
|
+
rules_manager.remove_workspace_rule(index, current_workspace)
|
|
998
|
+
else:
|
|
999
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
1000
|
+
except ValueError:
|
|
1001
|
+
console.print("ā Index must be a number", style="red")
|
|
1002
|
+
|
|
1003
|
+
elif parts[1] == "clear":
|
|
1004
|
+
if len(parts) < 3:
|
|
1005
|
+
console.print("ā Usage: /rules clear <global|workspace>", style="red")
|
|
1006
|
+
else:
|
|
1007
|
+
rule_type = parts[2].lower()
|
|
1008
|
+
if rule_type == "global":
|
|
1009
|
+
rules_manager.clear_global_rules()
|
|
1010
|
+
elif rule_type == "workspace":
|
|
1011
|
+
rules_manager.clear_workspace_rules(current_workspace)
|
|
1012
|
+
else:
|
|
1013
|
+
console.print("ā Rule type must be 'global' or 'workspace'", style="red")
|
|
1014
|
+
|
|
1015
|
+
else:
|
|
1016
|
+
console.print("ā Invalid rules command. Use /rules for help.", style="red")
|
|
1017
|
+
|
|
1018
|
+
return True
|
|
1019
|
+
|
|
1020
|
+
elif cmd == "clear":
|
|
1021
|
+
console.clear()
|
|
1022
|
+
console.print("š¬ Chat cleared")
|
|
1023
|
+
return True
|
|
1024
|
+
|
|
1025
|
+
elif cmd == "exit" or cmd == "quit":
|
|
1026
|
+
return False
|
|
1027
|
+
|
|
1028
|
+
else:
|
|
1029
|
+
console.print(f"ā Unknown command: /{cmd}. Type /help for available commands.", style="red")
|
|
1030
|
+
return True
|
|
1031
|
+
|
|
1032
|
+
def show_help():
|
|
1033
|
+
"""Show help information"""
|
|
1034
|
+
help_text = Text()
|
|
1035
|
+
help_text.append("Available commands:\n", style="bold")
|
|
1036
|
+
help_text.append("⢠/help - Show this help message\n")
|
|
1037
|
+
help_text.append("⢠/workspace <path> or /ws <path> - Change working directory\n")
|
|
1038
|
+
help_text.append("⢠/setup - Run interactive setup wizard\n")
|
|
1039
|
+
help_text.append("⢠/config [list|providers|set <key> <value>] - Manage configuration\n")
|
|
1040
|
+
help_text.append("⢠/provider [name] - Switch AI provider\n")
|
|
1041
|
+
help_text.append("⢠/model [model_id] - Switch AI model\n")
|
|
1042
|
+
help_text.append("⢠/model list or /models - Fetch available models from provider's API\n")
|
|
1043
|
+
help_text.append("⢠/lmodel <model_path> - Load local Hugging Face model\n")
|
|
1044
|
+
help_text.append("⢠/lmodel unload - Unload current local model\n")
|
|
1045
|
+
help_text.append("⢠/session [list|new|load <id>|delete <id>|export <id>] - Manage chat sessions\n")
|
|
1046
|
+
help_text.append("⢠/rules - Manage global and workspace rules for AI behavior\n")
|
|
1047
|
+
help_text.append(" - /rules add global <rule> [description] - Add a global rule\n", style="dim")
|
|
1048
|
+
help_text.append(" - /rules add workspace <rule> [description] - Add a workspace rule\n", style="dim")
|
|
1049
|
+
help_text.append(" - /rules remove global <index> - Remove a global rule\n", style="dim")
|
|
1050
|
+
help_text.append(" - /rules remove workspace <index> - Remove a workspace rule\n", style="dim")
|
|
1051
|
+
help_text.append(" - /rules clear global|workspace - Clear all rules of a type\n", style="dim")
|
|
1052
|
+
help_text.append("⢠/speed [instant|fast|normal|slow|<number>] - Set typing speed for AI responses\n")
|
|
1053
|
+
help_text.append("⢠/clear - Clear chat screen\n")
|
|
1054
|
+
help_text.append("⢠/exit or /quit - Exit chat session\n")
|
|
1055
|
+
help_text.append("⢠exit or quit - Exit chat session\n")
|
|
1056
|
+
help_text.append("\n")
|
|
1057
|
+
help_text.append("CLI Commands (use outside chat):\n", style="bold cyan")
|
|
1058
|
+
help_text.append("⢠cognautic providers - List all AI providers and their API endpoints\n", style="cyan")
|
|
1059
|
+
help_text.append("\n⢠Any other text will be sent to the AI\n")
|
|
1060
|
+
|
|
1061
|
+
console.print(Panel(help_text, title="Cognautic CLI Help", style="blue"))
|
|
1062
|
+
|
|
1063
|
+
if __name__ == '__main__':
|
|
1064
|
+
main()
|