ai-coding-assistant 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_coding_assistant-0.5.0.dist-info/METADATA +226 -0
- ai_coding_assistant-0.5.0.dist-info/RECORD +89 -0
- ai_coding_assistant-0.5.0.dist-info/WHEEL +4 -0
- ai_coding_assistant-0.5.0.dist-info/entry_points.txt +3 -0
- ai_coding_assistant-0.5.0.dist-info/licenses/LICENSE +21 -0
- coding_assistant/__init__.py +3 -0
- coding_assistant/__main__.py +19 -0
- coding_assistant/cli/__init__.py +1 -0
- coding_assistant/cli/app.py +158 -0
- coding_assistant/cli/commands/__init__.py +19 -0
- coding_assistant/cli/commands/ask.py +178 -0
- coding_assistant/cli/commands/config.py +438 -0
- coding_assistant/cli/commands/diagram.py +267 -0
- coding_assistant/cli/commands/document.py +410 -0
- coding_assistant/cli/commands/explain.py +192 -0
- coding_assistant/cli/commands/fix.py +249 -0
- coding_assistant/cli/commands/index.py +162 -0
- coding_assistant/cli/commands/refactor.py +245 -0
- coding_assistant/cli/commands/search.py +182 -0
- coding_assistant/cli/commands/serve_docs.py +128 -0
- coding_assistant/cli/repl.py +381 -0
- coding_assistant/cli/theme.py +90 -0
- coding_assistant/codebase/__init__.py +1 -0
- coding_assistant/codebase/crawler.py +93 -0
- coding_assistant/codebase/parser.py +266 -0
- coding_assistant/config/__init__.py +25 -0
- coding_assistant/config/config_manager.py +615 -0
- coding_assistant/config/settings.py +82 -0
- coding_assistant/context/__init__.py +19 -0
- coding_assistant/context/chunker.py +443 -0
- coding_assistant/context/enhanced_retriever.py +322 -0
- coding_assistant/context/hybrid_search.py +311 -0
- coding_assistant/context/ranker.py +355 -0
- coding_assistant/context/retriever.py +119 -0
- coding_assistant/context/window.py +362 -0
- coding_assistant/documentation/__init__.py +23 -0
- coding_assistant/documentation/agents/__init__.py +27 -0
- coding_assistant/documentation/agents/coordinator.py +510 -0
- coding_assistant/documentation/agents/module_documenter.py +111 -0
- coding_assistant/documentation/agents/synthesizer.py +139 -0
- coding_assistant/documentation/agents/task_delegator.py +100 -0
- coding_assistant/documentation/decomposition/__init__.py +21 -0
- coding_assistant/documentation/decomposition/context_preserver.py +477 -0
- coding_assistant/documentation/decomposition/module_detector.py +302 -0
- coding_assistant/documentation/decomposition/partitioner.py +621 -0
- coding_assistant/documentation/generators/__init__.py +14 -0
- coding_assistant/documentation/generators/dataflow_generator.py +440 -0
- coding_assistant/documentation/generators/diagram_generator.py +511 -0
- coding_assistant/documentation/graph/__init__.py +13 -0
- coding_assistant/documentation/graph/dependency_builder.py +468 -0
- coding_assistant/documentation/graph/module_analyzer.py +475 -0
- coding_assistant/documentation/writers/__init__.py +11 -0
- coding_assistant/documentation/writers/markdown_writer.py +322 -0
- coding_assistant/embeddings/__init__.py +0 -0
- coding_assistant/embeddings/generator.py +89 -0
- coding_assistant/embeddings/store.py +187 -0
- coding_assistant/exceptions/__init__.py +50 -0
- coding_assistant/exceptions/base.py +110 -0
- coding_assistant/exceptions/llm.py +249 -0
- coding_assistant/exceptions/recovery.py +263 -0
- coding_assistant/exceptions/storage.py +213 -0
- coding_assistant/exceptions/validation.py +230 -0
- coding_assistant/llm/__init__.py +1 -0
- coding_assistant/llm/client.py +277 -0
- coding_assistant/llm/gemini_client.py +181 -0
- coding_assistant/llm/groq_client.py +160 -0
- coding_assistant/llm/prompts.py +98 -0
- coding_assistant/llm/together_client.py +160 -0
- coding_assistant/operations/__init__.py +13 -0
- coding_assistant/operations/differ.py +369 -0
- coding_assistant/operations/generator.py +347 -0
- coding_assistant/operations/linter.py +430 -0
- coding_assistant/operations/validator.py +406 -0
- coding_assistant/storage/__init__.py +9 -0
- coding_assistant/storage/database.py +363 -0
- coding_assistant/storage/session.py +231 -0
- coding_assistant/utils/__init__.py +31 -0
- coding_assistant/utils/cache.py +477 -0
- coding_assistant/utils/hardware.py +132 -0
- coding_assistant/utils/keystore.py +206 -0
- coding_assistant/utils/logger.py +32 -0
- coding_assistant/utils/progress.py +311 -0
- coding_assistant/validation/__init__.py +13 -0
- coding_assistant/validation/files.py +305 -0
- coding_assistant/validation/inputs.py +335 -0
- coding_assistant/validation/params.py +280 -0
- coding_assistant/validation/sanitizers.py +243 -0
- coding_assistant/vcs/__init__.py +5 -0
- coding_assistant/vcs/git.py +269 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""Ask command - Ask questions about your codebase."""
|
|
2
|
+
|
|
3
|
+
import typer
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.markdown import Markdown
|
|
6
|
+
from rich.panel import Panel
|
|
7
|
+
from rich.syntax import Syntax
|
|
8
|
+
from rich.live import Live
|
|
9
|
+
from rich.spinner import Spinner
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from coding_assistant.config.settings import settings
|
|
13
|
+
from coding_assistant.context.enhanced_retriever import EnhancedSemanticRetriever
|
|
14
|
+
from coding_assistant.codebase.crawler import CodebaseCrawler
|
|
15
|
+
from coding_assistant.llm.client import LLMClientFactory
|
|
16
|
+
from coding_assistant.llm.prompts import PromptBuilder
|
|
17
|
+
from coding_assistant.cli.theme import get_console, COLORS, ICONS
|
|
18
|
+
|
|
19
|
+
console = get_console()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def ask_command(
|
|
23
|
+
question: str = typer.Argument(..., help="Your question about the codebase"),
|
|
24
|
+
context_chunks: int = typer.Option(
|
|
25
|
+
5,
|
|
26
|
+
"--context",
|
|
27
|
+
"-c",
|
|
28
|
+
help="Number of code chunks to include as context"
|
|
29
|
+
),
|
|
30
|
+
use_hybrid: bool = typer.Option(
|
|
31
|
+
True,
|
|
32
|
+
"--hybrid/--no-hybrid",
|
|
33
|
+
help="Use hybrid search (vector + keyword)"
|
|
34
|
+
),
|
|
35
|
+
):
|
|
36
|
+
"""Ask a question about your codebase with intelligent context retrieval.
|
|
37
|
+
|
|
38
|
+
The assistant will:
|
|
39
|
+
1. Find relevant code using hybrid search (semantic + keyword)
|
|
40
|
+
2. Rank results with language-aware intelligence
|
|
41
|
+
3. Build context with smart chunking
|
|
42
|
+
4. Generate answer using LLM
|
|
43
|
+
|
|
44
|
+
Examples:
|
|
45
|
+
assistant ask "how does authentication work?"
|
|
46
|
+
assistant ask "where are database queries defined?" --context 10
|
|
47
|
+
assistant ask "explain the JWT implementation"
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
# Display question in a beautiful panel
|
|
51
|
+
question_panel = Panel(
|
|
52
|
+
f"[question]{question}[/question]",
|
|
53
|
+
title=f"[primary]{ICONS['question']} Question[/primary]",
|
|
54
|
+
border_style="primary",
|
|
55
|
+
padding=(0, 2)
|
|
56
|
+
)
|
|
57
|
+
console.print(question_panel)
|
|
58
|
+
|
|
59
|
+
file_contents = []
|
|
60
|
+
|
|
61
|
+
# Try enhanced semantic retrieval
|
|
62
|
+
try:
|
|
63
|
+
with console.status(f"[spinner]{ICONS['search']} Finding relevant code...[/spinner]", spinner="dots"):
|
|
64
|
+
retriever = EnhancedSemanticRetriever(settings.project_path)
|
|
65
|
+
|
|
66
|
+
# Check if indexed
|
|
67
|
+
stats = retriever.get_stats()
|
|
68
|
+
if stats['total_chunks'] == 0:
|
|
69
|
+
console.print(f"[warning]{ICONS['warning']} Codebase not indexed.[/warning]")
|
|
70
|
+
console.print("[dim] Run 'assistant index' first for best results.[/dim]")
|
|
71
|
+
console.print("[dim] Falling back to simple file scan...[/dim]\n")
|
|
72
|
+
use_semantic = False
|
|
73
|
+
else:
|
|
74
|
+
# Retrieve with enhanced retrieval
|
|
75
|
+
results = retriever.retrieve(
|
|
76
|
+
query=question,
|
|
77
|
+
k=context_chunks,
|
|
78
|
+
use_hybrid=use_hybrid,
|
|
79
|
+
use_ranking=True
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
if settings.verbose:
|
|
83
|
+
console.print(f"\n[dim]Retrieved {len(results)} relevant chunks:[/dim]")
|
|
84
|
+
for i, r in enumerate(results, 1):
|
|
85
|
+
score_type = "Rank" if use_hybrid else "Similarity"
|
|
86
|
+
score = r.get('rank_score', r.get('similarity', 0.0))
|
|
87
|
+
console.print(
|
|
88
|
+
f"[dim] {i}. {r['path']}:{r['start_line']} - "
|
|
89
|
+
f"{r['name']} ({score_type}: {score:.2f})[/dim]"
|
|
90
|
+
)
|
|
91
|
+
console.print()
|
|
92
|
+
|
|
93
|
+
# Convert to file_contents format
|
|
94
|
+
for result in results:
|
|
95
|
+
file_contents.append({
|
|
96
|
+
'path': f"{result['path']}:{result['start_line']}-{result['end_line']}",
|
|
97
|
+
'content': result['content'],
|
|
98
|
+
'language': result.get('language', 'python')
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
use_semantic = True
|
|
102
|
+
|
|
103
|
+
except Exception as e:
|
|
104
|
+
console.print(f"[yellow]⚠️ Enhanced retrieval failed: {e}[/yellow]")
|
|
105
|
+
console.print("[yellow] Falling back to simple file scan...[/yellow]\n")
|
|
106
|
+
use_semantic = False
|
|
107
|
+
|
|
108
|
+
# Fallback to simple scan if needed
|
|
109
|
+
if not use_semantic:
|
|
110
|
+
with console.status("[bold green]Scanning codebase..."):
|
|
111
|
+
crawler = CodebaseCrawler(settings.project_path)
|
|
112
|
+
files = crawler.scan(max_files=context_chunks * 2)
|
|
113
|
+
|
|
114
|
+
for file_info in files[:context_chunks]:
|
|
115
|
+
try:
|
|
116
|
+
content = crawler.read_file(file_info['path'])
|
|
117
|
+
file_contents.append({
|
|
118
|
+
'path': file_info['path'],
|
|
119
|
+
'content': content[:2000], # Limit content
|
|
120
|
+
'language': file_info['extension'][1:] if file_info['extension'] else 'text'
|
|
121
|
+
})
|
|
122
|
+
except:
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
if settings.verbose:
|
|
126
|
+
console.print(f"[dim]Scanned {len(files)} files, using {len(file_contents)} for context[/dim]\n")
|
|
127
|
+
|
|
128
|
+
if not file_contents:
|
|
129
|
+
console.print("[red]✗ No code found to provide context.[/red]")
|
|
130
|
+
console.print("[yellow]Try indexing first: assistant index[/yellow]\n")
|
|
131
|
+
raise typer.Exit(1)
|
|
132
|
+
|
|
133
|
+
# Build prompt
|
|
134
|
+
prompt_builder = PromptBuilder()
|
|
135
|
+
messages = prompt_builder.build_ask_prompt(question, file_contents)
|
|
136
|
+
|
|
137
|
+
# Get LLM client
|
|
138
|
+
llm = LLMClientFactory.create_client(settings.llm_provider)
|
|
139
|
+
|
|
140
|
+
# Generate response with beautiful formatting
|
|
141
|
+
console.print() # Spacing
|
|
142
|
+
console.print(Panel(
|
|
143
|
+
f"[provider]{ICONS['sparkles']} Generating answer...[/provider]",
|
|
144
|
+
border_style="secondary",
|
|
145
|
+
padding=(0, 2)
|
|
146
|
+
))
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
response_text = ""
|
|
150
|
+
for chunk in llm.generate(messages, stream=True):
|
|
151
|
+
response_text += chunk
|
|
152
|
+
|
|
153
|
+
# Render markdown answer in a panel
|
|
154
|
+
answer_md = Markdown(response_text)
|
|
155
|
+
answer_panel = Panel(
|
|
156
|
+
answer_md,
|
|
157
|
+
title=f"[success]{ICONS['answer']} Answer[/success]",
|
|
158
|
+
border_style="success",
|
|
159
|
+
padding=(1, 2)
|
|
160
|
+
)
|
|
161
|
+
console.print(answer_panel)
|
|
162
|
+
|
|
163
|
+
# Show tips
|
|
164
|
+
if not use_semantic:
|
|
165
|
+
console.print(f"\n[dim]{ICONS['info']} Tip: Run 'assistant index' for better context retrieval[/dim]")
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
error_panel = Panel(
|
|
169
|
+
f"[error]{str(e)}[/error]",
|
|
170
|
+
title=f"[error]{ICONS['error']} Error[/error]",
|
|
171
|
+
border_style="error",
|
|
172
|
+
padding=(0, 2)
|
|
173
|
+
)
|
|
174
|
+
console.print(error_panel)
|
|
175
|
+
if settings.verbose:
|
|
176
|
+
import traceback
|
|
177
|
+
traceback.print_exc()
|
|
178
|
+
raise typer.Exit(1)
|
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
"""Configuration management commands."""
|
|
2
|
+
|
|
3
|
+
import typer
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.table import Table
|
|
6
|
+
from rich.panel import Panel
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from coding_assistant.config.settings import settings
|
|
11
|
+
from coding_assistant.config.config_manager import Config
|
|
12
|
+
from coding_assistant.utils.keystore import KeyStore
|
|
13
|
+
from coding_assistant.utils.hardware import HardwareDetector
|
|
14
|
+
|
|
15
|
+
console = Console()
|
|
16
|
+
app = typer.Typer(help="Manage configuration and API keys")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@app.command(name="show")
|
|
20
|
+
def show_config():
|
|
21
|
+
"""Display current configuration."""
|
|
22
|
+
console.print("\n[bold cyan]Current Configuration[/bold cyan]\n")
|
|
23
|
+
|
|
24
|
+
# LLM Provider settings
|
|
25
|
+
table = Table(title="LLM Provider Settings", show_header=True, header_style="bold magenta")
|
|
26
|
+
table.add_column("Setting", style="cyan")
|
|
27
|
+
table.add_column("Value", style="green")
|
|
28
|
+
|
|
29
|
+
table.add_row("Provider", settings.llm_provider or "[dim]auto-detect[/dim]")
|
|
30
|
+
table.add_row("Ollama Model", settings.ollama_model)
|
|
31
|
+
table.add_row("Ollama URL", settings.ollama_base_url)
|
|
32
|
+
table.add_row("Groq Model", settings.groq_model)
|
|
33
|
+
table.add_row("Together Model", settings.together_model)
|
|
34
|
+
|
|
35
|
+
console.print(table)
|
|
36
|
+
console.print()
|
|
37
|
+
|
|
38
|
+
# API Keys status
|
|
39
|
+
keystore = KeyStore()
|
|
40
|
+
masked_keys = keystore.get_masked_keys()
|
|
41
|
+
|
|
42
|
+
keys_table = Table(title="API Keys Status", show_header=True, header_style="bold magenta")
|
|
43
|
+
keys_table.add_column("Provider", style="cyan")
|
|
44
|
+
keys_table.add_column("Status", style="green")
|
|
45
|
+
keys_table.add_column("Key Preview", style="dim")
|
|
46
|
+
|
|
47
|
+
for provider in ["groq", "gemini", "together", "openai", "claude"]:
|
|
48
|
+
if provider in masked_keys:
|
|
49
|
+
status = "✓ Configured"
|
|
50
|
+
preview = masked_keys[provider]
|
|
51
|
+
else:
|
|
52
|
+
env_key = getattr(settings, f"{provider}_api_key", None)
|
|
53
|
+
if env_key:
|
|
54
|
+
status = "✓ From env var"
|
|
55
|
+
preview = keystore.mask_key(env_key)
|
|
56
|
+
else:
|
|
57
|
+
status = "✗ Not set"
|
|
58
|
+
preview = "-"
|
|
59
|
+
|
|
60
|
+
keys_table.add_row(provider.capitalize(), status, preview)
|
|
61
|
+
|
|
62
|
+
console.print(keys_table)
|
|
63
|
+
console.print()
|
|
64
|
+
|
|
65
|
+
# General settings
|
|
66
|
+
general_table = Table(title="General Settings", show_header=True, header_style="bold magenta")
|
|
67
|
+
general_table.add_column("Setting", style="cyan")
|
|
68
|
+
general_table.add_column("Value", style="green")
|
|
69
|
+
|
|
70
|
+
general_table.add_row("Project Path", str(settings.project_path))
|
|
71
|
+
general_table.add_row("Data Directory", str(settings.data_dir))
|
|
72
|
+
general_table.add_row("Verbose", str(settings.verbose))
|
|
73
|
+
general_table.add_row("Max Context Files", str(settings.max_context_files))
|
|
74
|
+
|
|
75
|
+
console.print(general_table)
|
|
76
|
+
console.print()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@app.command(name="set-api-key")
|
|
80
|
+
def set_api_key(
|
|
81
|
+
provider: str = typer.Argument(..., help="Provider name (groq, together, openai, claude)"),
|
|
82
|
+
api_key: str = typer.Argument(..., help="API key to store"),
|
|
83
|
+
):
|
|
84
|
+
"""Store API key securely for a provider."""
|
|
85
|
+
try:
|
|
86
|
+
keystore = KeyStore()
|
|
87
|
+
keystore.set_key(provider, api_key)
|
|
88
|
+
|
|
89
|
+
console.print(f"\n[green]✓ API key for {provider} stored successfully![/green]\n")
|
|
90
|
+
|
|
91
|
+
# Show masked key
|
|
92
|
+
masked = keystore.mask_key(api_key)
|
|
93
|
+
console.print(f"[dim]Stored key: {masked}[/dim]\n")
|
|
94
|
+
|
|
95
|
+
# Next steps
|
|
96
|
+
console.print("[bold]Next steps:[/bold]")
|
|
97
|
+
console.print(f" 1. Test the connection: assistant ask \"test question\"")
|
|
98
|
+
console.print(f" 2. View config: assistant config show")
|
|
99
|
+
console.print()
|
|
100
|
+
|
|
101
|
+
except ValueError as e:
|
|
102
|
+
console.print(f"\n[red]✗ Error: {e}[/red]\n")
|
|
103
|
+
console.print("[yellow]Valid providers: groq, together, openai, claude[/yellow]\n")
|
|
104
|
+
raise typer.Exit(1)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
console.print(f"\n[red]✗ Failed to store API key: {e}[/red]\n")
|
|
107
|
+
raise typer.Exit(1)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@app.command(name="remove-api-key")
|
|
111
|
+
def remove_api_key(
|
|
112
|
+
provider: str = typer.Argument(..., help="Provider name to remove key for"),
|
|
113
|
+
):
|
|
114
|
+
"""Remove stored API key for a provider."""
|
|
115
|
+
keystore = KeyStore()
|
|
116
|
+
|
|
117
|
+
if keystore.remove_key(provider):
|
|
118
|
+
console.print(f"\n[green]✓ API key for {provider} removed successfully![/green]\n")
|
|
119
|
+
else:
|
|
120
|
+
console.print(f"\n[yellow]⚠ No API key found for {provider}[/yellow]\n")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
@app.command(name="list-keys")
|
|
124
|
+
def list_keys():
|
|
125
|
+
"""Show all configured providers with masked keys."""
|
|
126
|
+
console.print("\n[bold cyan]Configured API Keys[/bold cyan]\n")
|
|
127
|
+
|
|
128
|
+
keystore = KeyStore()
|
|
129
|
+
providers = keystore.list_providers()
|
|
130
|
+
|
|
131
|
+
if not providers:
|
|
132
|
+
console.print("[yellow]No API keys configured yet.[/yellow]\n")
|
|
133
|
+
console.print("[dim]Set a key with: assistant config set-api-key <provider> <key>[/dim]\n")
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
table = Table(show_header=True, header_style="bold magenta")
|
|
137
|
+
table.add_column("#", style="dim", width=4)
|
|
138
|
+
table.add_column("Provider", style="cyan")
|
|
139
|
+
table.add_column("Key Preview", style="green")
|
|
140
|
+
|
|
141
|
+
masked_keys = keystore.get_masked_keys()
|
|
142
|
+
for i, provider in enumerate(providers, 1):
|
|
143
|
+
table.add_row(
|
|
144
|
+
str(i),
|
|
145
|
+
provider.capitalize(),
|
|
146
|
+
masked_keys.get(provider, "***")
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
console.print(table)
|
|
150
|
+
console.print()
|
|
151
|
+
|
|
152
|
+
console.print("[dim]Remove a key with: assistant config remove-api-key <provider>[/dim]\n")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@app.command(name="check-hardware")
|
|
156
|
+
def check_hardware():
|
|
157
|
+
"""Display hardware information and provider recommendations."""
|
|
158
|
+
console.print("\n[bold cyan]Hardware Detection & Recommendations[/bold cyan]\n")
|
|
159
|
+
|
|
160
|
+
hw_info = HardwareDetector.get_hardware_info()
|
|
161
|
+
|
|
162
|
+
# Hardware info table
|
|
163
|
+
hw_table = Table(show_header=True, header_style="bold magenta")
|
|
164
|
+
hw_table.add_column("Specification", style="cyan")
|
|
165
|
+
hw_table.add_column("Value", style="green")
|
|
166
|
+
|
|
167
|
+
hw_table.add_row("Total RAM", f"{hw_info.total_ram_gb:.1f} GB")
|
|
168
|
+
hw_table.add_row("Available RAM", f"{hw_info.available_ram_gb:.1f} GB")
|
|
169
|
+
hw_table.add_row("Can Run Local Models", "Yes" if hw_info.can_run_local else "No")
|
|
170
|
+
hw_table.add_row("Recommended Provider", hw_info.recommended_provider.upper())
|
|
171
|
+
hw_table.add_row("Recommended Model", hw_info.recommended_model)
|
|
172
|
+
|
|
173
|
+
console.print(hw_table)
|
|
174
|
+
console.print()
|
|
175
|
+
|
|
176
|
+
# Detailed recommendation
|
|
177
|
+
recommendation = HardwareDetector.format_recommendation(hw_info)
|
|
178
|
+
panel = Panel(
|
|
179
|
+
recommendation,
|
|
180
|
+
title="[bold]Setup Recommendation[/bold]",
|
|
181
|
+
border_style="green" if hw_info.can_run_local else "yellow"
|
|
182
|
+
)
|
|
183
|
+
console.print(panel)
|
|
184
|
+
console.print()
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
@app.command(name="models")
|
|
188
|
+
def select_model():
|
|
189
|
+
"""Interactively select and switch LLM provider/model."""
|
|
190
|
+
from rich.prompt import Prompt
|
|
191
|
+
from coding_assistant.llm.client import LLMClientFactory, OllamaClient
|
|
192
|
+
from coding_assistant.llm.groq_client import GroqClient
|
|
193
|
+
from coding_assistant.llm.together_client import TogetherClient
|
|
194
|
+
from coding_assistant.llm.gemini_client import GeminiClient
|
|
195
|
+
|
|
196
|
+
console.print("\n[bold cyan]Available LLM Models[/bold cyan]\n")
|
|
197
|
+
|
|
198
|
+
# Build provider list with status
|
|
199
|
+
providers = []
|
|
200
|
+
keystore = KeyStore()
|
|
201
|
+
|
|
202
|
+
# Check each provider
|
|
203
|
+
provider_info = [
|
|
204
|
+
{
|
|
205
|
+
"name": "together",
|
|
206
|
+
"display": "Together AI",
|
|
207
|
+
"model": "Qwen2.5-Coder-32B",
|
|
208
|
+
"description": "Best for coding (32B params)",
|
|
209
|
+
"client_class": TogetherClient,
|
|
210
|
+
"needs_key": True,
|
|
211
|
+
"color": "cyan"
|
|
212
|
+
},
|
|
213
|
+
{
|
|
214
|
+
"name": "groq",
|
|
215
|
+
"display": "Groq",
|
|
216
|
+
"model": "Llama 3.3 70B",
|
|
217
|
+
"description": "Fast & capable general model",
|
|
218
|
+
"client_class": GroqClient,
|
|
219
|
+
"needs_key": True,
|
|
220
|
+
"color": "green"
|
|
221
|
+
},
|
|
222
|
+
{
|
|
223
|
+
"name": "gemini",
|
|
224
|
+
"display": "Google Gemini",
|
|
225
|
+
"model": "Gemini 2.0 Flash",
|
|
226
|
+
"description": "Fast, may have quotas",
|
|
227
|
+
"client_class": GeminiClient,
|
|
228
|
+
"needs_key": True,
|
|
229
|
+
"color": "yellow"
|
|
230
|
+
},
|
|
231
|
+
{
|
|
232
|
+
"name": "ollama",
|
|
233
|
+
"display": "Ollama (Local)",
|
|
234
|
+
"model": settings.ollama_model,
|
|
235
|
+
"description": "Privacy-first, runs offline",
|
|
236
|
+
"client_class": OllamaClient,
|
|
237
|
+
"needs_key": False,
|
|
238
|
+
"color": "magenta"
|
|
239
|
+
},
|
|
240
|
+
]
|
|
241
|
+
|
|
242
|
+
# Create table
|
|
243
|
+
table = Table(show_header=True, header_style="bold magenta", title="LLM Providers")
|
|
244
|
+
table.add_column("#", style="dim", width=4)
|
|
245
|
+
table.add_column("Provider", style="bold")
|
|
246
|
+
table.add_column("Model", style="cyan")
|
|
247
|
+
table.add_column("Description", style="dim")
|
|
248
|
+
table.add_column("Status", justify="center")
|
|
249
|
+
|
|
250
|
+
available_choices = []
|
|
251
|
+
|
|
252
|
+
for i, provider in enumerate(provider_info, 1):
|
|
253
|
+
# Check if configured
|
|
254
|
+
if provider["needs_key"]:
|
|
255
|
+
has_key = (
|
|
256
|
+
keystore.get_key(provider["name"]) is not None or
|
|
257
|
+
getattr(settings, f"{provider['name']}_api_key", None) is not None
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
has_key = True
|
|
261
|
+
|
|
262
|
+
# Check if available (running/accessible)
|
|
263
|
+
is_available = False
|
|
264
|
+
if has_key:
|
|
265
|
+
try:
|
|
266
|
+
if provider["name"] == "ollama":
|
|
267
|
+
client = provider["client_class"](
|
|
268
|
+
model=settings.ollama_model,
|
|
269
|
+
base_url=settings.ollama_base_url
|
|
270
|
+
)
|
|
271
|
+
elif provider["name"] == "groq":
|
|
272
|
+
client = provider["client_class"](
|
|
273
|
+
api_key=settings.groq_api_key,
|
|
274
|
+
model=settings.groq_model
|
|
275
|
+
)
|
|
276
|
+
elif provider["name"] == "gemini":
|
|
277
|
+
client = provider["client_class"](
|
|
278
|
+
api_key=settings.gemini_api_key,
|
|
279
|
+
model=settings.gemini_model
|
|
280
|
+
)
|
|
281
|
+
elif provider["name"] == "together":
|
|
282
|
+
client = provider["client_class"](
|
|
283
|
+
api_key=settings.together_api_key,
|
|
284
|
+
model=settings.together_model
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
is_available = client.is_available()
|
|
288
|
+
except:
|
|
289
|
+
is_available = False
|
|
290
|
+
|
|
291
|
+
# Determine status
|
|
292
|
+
if is_available:
|
|
293
|
+
status = "[green]✓ Ready[/green]"
|
|
294
|
+
available_choices.append(str(i))
|
|
295
|
+
elif has_key:
|
|
296
|
+
status = "[yellow]⚠ Offline[/yellow]"
|
|
297
|
+
else:
|
|
298
|
+
status = "[red]✗ No API Key[/red]"
|
|
299
|
+
|
|
300
|
+
# Check if current default
|
|
301
|
+
current_provider = settings.llm_provider or "auto"
|
|
302
|
+
is_current = (current_provider == provider["name"])
|
|
303
|
+
|
|
304
|
+
provider_display = provider["display"]
|
|
305
|
+
if is_current:
|
|
306
|
+
provider_display = f"→ {provider_display}"
|
|
307
|
+
|
|
308
|
+
table.add_row(
|
|
309
|
+
str(i),
|
|
310
|
+
provider_display,
|
|
311
|
+
provider["model"],
|
|
312
|
+
provider["description"],
|
|
313
|
+
status
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
console.print(table)
|
|
317
|
+
console.print()
|
|
318
|
+
|
|
319
|
+
if not available_choices:
|
|
320
|
+
console.print("[yellow]⚠ No providers are currently available.[/yellow]\n")
|
|
321
|
+
console.print("[bold]To set up a provider:[/bold]")
|
|
322
|
+
console.print(" 1. Get an API key (Groq is FREE: https://console.groq.com)")
|
|
323
|
+
console.print(" 2. Set it: assistant config set-api-key groq <your-key>")
|
|
324
|
+
console.print(" 3. Or install Ollama: curl -fsSL https://ollama.com/install.sh | sh\n")
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
# Prompt for selection
|
|
328
|
+
console.print(f"[dim]Current: {settings.llm_provider or 'auto-detect'}[/dim]")
|
|
329
|
+
choice = Prompt.ask(
|
|
330
|
+
"\n[bold]Select provider[/bold]",
|
|
331
|
+
choices=available_choices + ["q"],
|
|
332
|
+
default="q"
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
if choice == "q":
|
|
336
|
+
console.print("\n[dim]No changes made.[/dim]\n")
|
|
337
|
+
return
|
|
338
|
+
|
|
339
|
+
# Get selected provider
|
|
340
|
+
selected = provider_info[int(choice) - 1]
|
|
341
|
+
|
|
342
|
+
# Save to config file
|
|
343
|
+
config_file = Path(".assistant.yml")
|
|
344
|
+
try:
|
|
345
|
+
if config_file.exists():
|
|
346
|
+
config = Config.from_file(config_file)
|
|
347
|
+
else:
|
|
348
|
+
config = Config.default()
|
|
349
|
+
|
|
350
|
+
config.llm.provider = selected["name"]
|
|
351
|
+
config.save(config_file)
|
|
352
|
+
|
|
353
|
+
console.print(f"\n[green]✓ Default provider set to: {selected['display']}[/green]")
|
|
354
|
+
console.print(f"[dim]Saved to: {config_file}[/dim]\n")
|
|
355
|
+
|
|
356
|
+
console.print("[bold]Test it:[/bold]")
|
|
357
|
+
console.print(f" assistant ask \"test question\"\n")
|
|
358
|
+
|
|
359
|
+
except Exception as e:
|
|
360
|
+
console.print(f"\n[yellow]⚠ Could not save to config file: {e}[/yellow]")
|
|
361
|
+
console.print(f"\n[bold]Alternative: Set environment variable[/bold]")
|
|
362
|
+
console.print(f" export LLM_PROVIDER={selected['name']}")
|
|
363
|
+
console.print(f" # Or add to ~/.bashrc:\n")
|
|
364
|
+
console.print(f" echo 'export LLM_PROVIDER={selected['name']}' >> ~/.bashrc\n")
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
@app.command(name="generate")
|
|
368
|
+
def generate_config(
|
|
369
|
+
output: Optional[Path] = typer.Option(
|
|
370
|
+
None,
|
|
371
|
+
"--output",
|
|
372
|
+
"-o",
|
|
373
|
+
help="Output file path (default: .assistant.yml)"
|
|
374
|
+
),
|
|
375
|
+
force: bool = typer.Option(
|
|
376
|
+
False,
|
|
377
|
+
"--force",
|
|
378
|
+
"-f",
|
|
379
|
+
help="Overwrite existing config file"
|
|
380
|
+
),
|
|
381
|
+
):
|
|
382
|
+
"""Generate default configuration file."""
|
|
383
|
+
if output is None:
|
|
384
|
+
output = Path(".assistant.yml")
|
|
385
|
+
|
|
386
|
+
# Check if file exists
|
|
387
|
+
if output.exists() and not force:
|
|
388
|
+
console.print(f"\n[yellow]⚠ Config file already exists: {output}[/yellow]")
|
|
389
|
+
console.print("[dim]Use --force to overwrite[/dim]\n")
|
|
390
|
+
raise typer.Exit(1)
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
# Generate default config
|
|
394
|
+
config = Config()
|
|
395
|
+
config.save(output)
|
|
396
|
+
|
|
397
|
+
console.print(f"\n[green]✓ Configuration file generated: {output}[/green]\n")
|
|
398
|
+
|
|
399
|
+
console.print("[bold]Next steps:[/bold]")
|
|
400
|
+
console.print(f" 1. Edit the file: {output}")
|
|
401
|
+
console.print(f" 2. Set your API keys: assistant config set-api-key <provider> <key>")
|
|
402
|
+
console.print(f" 3. Check hardware: assistant config check-hardware")
|
|
403
|
+
console.print()
|
|
404
|
+
|
|
405
|
+
except Exception as e:
|
|
406
|
+
console.print(f"\n[red]✗ Failed to generate config: {e}[/red]\n")
|
|
407
|
+
raise typer.Exit(1)
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def config_command(
|
|
411
|
+
ctx: typer.Context,
|
|
412
|
+
):
|
|
413
|
+
"""
|
|
414
|
+
Manage configuration and API keys.
|
|
415
|
+
|
|
416
|
+
Subcommands:
|
|
417
|
+
- show: Display current configuration
|
|
418
|
+
- models: Interactively select LLM provider
|
|
419
|
+
- set-api-key: Store API key for a provider
|
|
420
|
+
- remove-api-key: Remove stored API key
|
|
421
|
+
- list-keys: Show all configured providers
|
|
422
|
+
- check-hardware: Display hardware recommendations
|
|
423
|
+
- generate: Generate default config file
|
|
424
|
+
|
|
425
|
+
Examples:
|
|
426
|
+
assistant config show
|
|
427
|
+
assistant config models
|
|
428
|
+
assistant config set-api-key groq gsk_xxxxx
|
|
429
|
+
assistant config check-hardware
|
|
430
|
+
assistant config list-keys
|
|
431
|
+
"""
|
|
432
|
+
# This function serves as the entry point
|
|
433
|
+
# Actual command handling is done by subcommands via app
|
|
434
|
+
pass
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
# Export the Typer app as the command
|
|
438
|
+
config_command = app
|