parishad 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parishad/__init__.py +70 -0
- parishad/__main__.py +10 -0
- parishad/checker/__init__.py +25 -0
- parishad/checker/deterministic.py +644 -0
- parishad/checker/ensemble.py +496 -0
- parishad/checker/retrieval.py +546 -0
- parishad/cli/__init__.py +6 -0
- parishad/cli/code.py +3254 -0
- parishad/cli/main.py +1158 -0
- parishad/cli/prarambh.py +99 -0
- parishad/cli/sthapana.py +368 -0
- parishad/config/modes.py +139 -0
- parishad/config/pipeline.core.yaml +128 -0
- parishad/config/pipeline.extended.yaml +172 -0
- parishad/config/pipeline.fast.yaml +89 -0
- parishad/config/user_config.py +115 -0
- parishad/data/catalog.py +118 -0
- parishad/data/models.json +108 -0
- parishad/memory/__init__.py +79 -0
- parishad/models/__init__.py +181 -0
- parishad/models/backends/__init__.py +247 -0
- parishad/models/backends/base.py +211 -0
- parishad/models/backends/huggingface.py +318 -0
- parishad/models/backends/llama_cpp.py +239 -0
- parishad/models/backends/mlx_lm.py +141 -0
- parishad/models/backends/ollama.py +253 -0
- parishad/models/backends/openai_api.py +193 -0
- parishad/models/backends/transformers_hf.py +198 -0
- parishad/models/costs.py +385 -0
- parishad/models/downloader.py +1557 -0
- parishad/models/optimizations.py +871 -0
- parishad/models/profiles.py +610 -0
- parishad/models/reliability.py +876 -0
- parishad/models/runner.py +651 -0
- parishad/models/tokenization.py +287 -0
- parishad/orchestrator/__init__.py +24 -0
- parishad/orchestrator/config_loader.py +210 -0
- parishad/orchestrator/engine.py +1113 -0
- parishad/orchestrator/exceptions.py +14 -0
- parishad/roles/__init__.py +71 -0
- parishad/roles/base.py +712 -0
- parishad/roles/dandadhyaksha.py +163 -0
- parishad/roles/darbari.py +246 -0
- parishad/roles/majumdar.py +274 -0
- parishad/roles/pantapradhan.py +150 -0
- parishad/roles/prerak.py +357 -0
- parishad/roles/raja.py +345 -0
- parishad/roles/sacheev.py +203 -0
- parishad/roles/sainik.py +427 -0
- parishad/roles/sar_senapati.py +164 -0
- parishad/roles/vidushak.py +69 -0
- parishad/tools/__init__.py +7 -0
- parishad/tools/base.py +57 -0
- parishad/tools/fs.py +110 -0
- parishad/tools/perception.py +96 -0
- parishad/tools/retrieval.py +74 -0
- parishad/tools/shell.py +103 -0
- parishad/utils/__init__.py +7 -0
- parishad/utils/hardware.py +122 -0
- parishad/utils/logging.py +79 -0
- parishad/utils/scanner.py +164 -0
- parishad/utils/text.py +61 -0
- parishad/utils/tracing.py +133 -0
- parishad-0.1.0.dist-info/METADATA +256 -0
- parishad-0.1.0.dist-info/RECORD +68 -0
- parishad-0.1.0.dist-info/WHEEL +4 -0
- parishad-0.1.0.dist-info/entry_points.txt +2 -0
- parishad-0.1.0.dist-info/licenses/LICENSE +21 -0
parishad/cli/code.py
ADDED
|
@@ -0,0 +1,3254 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Parishad CLI - Unified TUI with setup wizard and chat interface.
|
|
3
|
+
|
|
4
|
+
Features:
|
|
5
|
+
- Setup wizard on first run (Sabha selection, model browser)
|
|
6
|
+
- Interactive chat with agentic coding assistant
|
|
7
|
+
- Advanced input (@mentions, /commands, ? help)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import os
|
|
14
|
+
import re
|
|
15
|
+
import sys
|
|
16
|
+
import subprocess
|
|
17
|
+
import socket
|
|
18
|
+
import shutil
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Optional, Dict, List, Tuple
|
|
21
|
+
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
|
|
24
|
+
from textual.app import App, ComposeResult
|
|
25
|
+
from textual.message import Message
|
|
26
|
+
from textual.containers import Container, Vertical, Horizontal, Grid, ScrollableContainer
|
|
27
|
+
from textual.widgets import (
|
|
28
|
+
Button, Footer, Header, Input, Label, ListItem,
|
|
29
|
+
ListView, Select, Static, TabbedContent, TabPane,
|
|
30
|
+
ProgressBar, RichLog
|
|
31
|
+
)
|
|
32
|
+
from textual.suggester import Suggester
|
|
33
|
+
from textual.binding import Binding
|
|
34
|
+
from textual.screen import Screen
|
|
35
|
+
from textual import on
|
|
36
|
+
from textual.message import Message
|
|
37
|
+
from rich.text import Text
|
|
38
|
+
from rich.panel import Panel
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# =============================================================================
|
|
42
|
+
# Configuration - Robust path resolution with fallbacks
|
|
43
|
+
# =============================================================================
|
|
44
|
+
|
|
45
|
+
def _get_config_dir() -> Path:
|
|
46
|
+
"""
|
|
47
|
+
Get config directory - always uses ~/.parishad for consistency.
|
|
48
|
+
This is the single source of truth for Parishad configuration.
|
|
49
|
+
"""
|
|
50
|
+
return Path.home() / ".parishad"
|
|
51
|
+
|
|
52
|
+
# Define config constants
|
|
53
|
+
CONFIG_DIR = _get_config_dir()
|
|
54
|
+
CONFIG_FILE = CONFIG_DIR / "config.json"
|
|
55
|
+
|
|
56
|
+
def load_parishad_config() -> Optional[ParishadConfig]:
|
|
57
|
+
"""
|
|
58
|
+
Load Parishad configuration from disk.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
ParishadConfig if valid config exists, None otherwise
|
|
62
|
+
"""
|
|
63
|
+
try:
|
|
64
|
+
if not CONFIG_FILE.exists():
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
|
|
68
|
+
data = json.load(f)
|
|
69
|
+
|
|
70
|
+
# New structure: session data is under 'session' key
|
|
71
|
+
# Old structure: session data is at root level
|
|
72
|
+
session_data = data.get("session", data)
|
|
73
|
+
|
|
74
|
+
return ParishadConfig.from_dict(session_data, full_config=data)
|
|
75
|
+
|
|
76
|
+
except (json.JSONDecodeError, KeyError, Exception) as e:
|
|
77
|
+
# Invalid config, treat as no config
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class ParishadConfig:
|
|
83
|
+
"""Central configuration for Parishad TUI."""
|
|
84
|
+
sabha: Optional[str] = None # "laghu" | "madhyam" | "maha"
|
|
85
|
+
backend: Optional[str] = None # "ollama" | "huggingface" | "lmstudio"
|
|
86
|
+
model: Optional[str] = None # model id/name
|
|
87
|
+
cwd: str = "" # working directory (optional)
|
|
88
|
+
setup_complete: bool = False
|
|
89
|
+
|
|
90
|
+
# Multi-model assignment mapping (slot -> model_id)
|
|
91
|
+
model_map: Dict[str, str] = field(default_factory=dict)
|
|
92
|
+
|
|
93
|
+
# Store other fields to preserve them (e.g., system, models, permissions)
|
|
94
|
+
extra_fields: Dict = field(default_factory=dict)
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_dict(cls, session_data: Dict, full_config: Dict = None) -> "ParishadConfig":
|
|
98
|
+
"""Create config from dictionary."""
|
|
99
|
+
# Store full config for preservation
|
|
100
|
+
extra = full_config if full_config else {}
|
|
101
|
+
|
|
102
|
+
return cls(
|
|
103
|
+
sabha=session_data.get("sabha"),
|
|
104
|
+
backend=session_data.get("backend"),
|
|
105
|
+
model=session_data.get("model"),
|
|
106
|
+
cwd=session_data.get("cwd", ""),
|
|
107
|
+
setup_complete=session_data.get("setup_complete", False),
|
|
108
|
+
model_map=session_data.get("model_map", {}),
|
|
109
|
+
extra_fields=extra
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def to_dict(self) -> Dict:
|
|
113
|
+
"""Convert config to dictionary for JSON serialization."""
|
|
114
|
+
# Start with preserved fields
|
|
115
|
+
result = dict(self.extra_fields) if self.extra_fields else {}
|
|
116
|
+
|
|
117
|
+
# Update top-level flags
|
|
118
|
+
result["setup_complete"] = True
|
|
119
|
+
|
|
120
|
+
# Update session data
|
|
121
|
+
result["session"] = {
|
|
122
|
+
"sabha": self.sabha,
|
|
123
|
+
"backend": self.backend,
|
|
124
|
+
"model": self.model,
|
|
125
|
+
"cwd": self.cwd,
|
|
126
|
+
"model_map": self.model_map
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
return result
|
|
130
|
+
|
|
131
|
+
def get_mode(self) -> str:
|
|
132
|
+
"""Get mode name from sabha using modes.py mapping."""
|
|
133
|
+
from ..config.modes import SABHA_ID_TO_MODE
|
|
134
|
+
return SABHA_ID_TO_MODE.get(self.sabha, "fast")
|
|
135
|
+
|
|
136
|
+
def get_pipeline_config(self) -> str:
|
|
137
|
+
"""Get pipeline config name for engine initialization."""
|
|
138
|
+
from ..config.modes import get_pipeline_name
|
|
139
|
+
return get_pipeline_name(self.sabha)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def save_parishad_config(config: ParishadConfig) -> bool:
|
|
143
|
+
"""
|
|
144
|
+
Save Parishad configuration to disk atomically.
|
|
145
|
+
|
|
146
|
+
Uses atomic write pattern: write to .tmp file, then rename.
|
|
147
|
+
This prevents corruption if process is interrupted.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
config: Configuration to save
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
True if save successful, False otherwise
|
|
154
|
+
"""
|
|
155
|
+
try:
|
|
156
|
+
# DEBUG LOGGING
|
|
157
|
+
db_path = Path.home() / "parishad_debug.log"
|
|
158
|
+
with open(db_path, "a") as f:
|
|
159
|
+
f.write(f"DEBUG: Attempting to save config to {CONFIG_FILE}\n")
|
|
160
|
+
|
|
161
|
+
# Ensure config directory exists
|
|
162
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
163
|
+
|
|
164
|
+
# Read existing file to get latest state of other fields (models, system, etc.)
|
|
165
|
+
# This prevents overwriting updates from other components (like ModelManager)
|
|
166
|
+
current_data = {}
|
|
167
|
+
if CONFIG_FILE.exists():
|
|
168
|
+
try:
|
|
169
|
+
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
|
|
170
|
+
current_data = json.load(f)
|
|
171
|
+
except Exception:
|
|
172
|
+
# If read fails, fall back to what we have in config.extra_fields
|
|
173
|
+
current_data = dict(config.extra_fields) if config.extra_fields else {}
|
|
174
|
+
else:
|
|
175
|
+
current_data = dict(config.extra_fields) if config.extra_fields else {}
|
|
176
|
+
|
|
177
|
+
# Update session data managed by this config object
|
|
178
|
+
# We explicitly update only what we own
|
|
179
|
+
current_data["session"] = {
|
|
180
|
+
"sabha": config.sabha,
|
|
181
|
+
"backend": config.backend,
|
|
182
|
+
"model": config.model,
|
|
183
|
+
"cwd": config.cwd,
|
|
184
|
+
"model_map": config.model_map
|
|
185
|
+
}
|
|
186
|
+
current_data["setup_complete"] = True
|
|
187
|
+
|
|
188
|
+
# Write to temporary file first
|
|
189
|
+
tmp_file = CONFIG_FILE.with_suffix(".json.tmp")
|
|
190
|
+
|
|
191
|
+
with open(tmp_file, 'w', encoding='utf-8') as f:
|
|
192
|
+
json.dump(current_data, f, indent=2)
|
|
193
|
+
|
|
194
|
+
# Atomic rename (overwrites existing file)
|
|
195
|
+
tmp_file.replace(CONFIG_FILE)
|
|
196
|
+
|
|
197
|
+
with open(db_path, "a") as f:
|
|
198
|
+
f.write(f"DEBUG: Config saved successfully to {CONFIG_FILE}\n")
|
|
199
|
+
|
|
200
|
+
return True
|
|
201
|
+
|
|
202
|
+
except Exception as e:
|
|
203
|
+
# Log the error
|
|
204
|
+
db_path = Path.home() / "parishad_debug.log"
|
|
205
|
+
with open(db_path, "a") as f:
|
|
206
|
+
f.write(f"DEBUG: save_parishad_config FAILED: {e}\n")
|
|
207
|
+
import traceback
|
|
208
|
+
f.write(traceback.format_exc())
|
|
209
|
+
|
|
210
|
+
# Clean up temp file if it exists
|
|
211
|
+
try:
|
|
212
|
+
tmp_file = CONFIG_FILE.with_suffix(".json.tmp")
|
|
213
|
+
if tmp_file.exists():
|
|
214
|
+
tmp_file.unlink()
|
|
215
|
+
except:
|
|
216
|
+
pass
|
|
217
|
+
|
|
218
|
+
return False
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
# =============================================================================
|
|
222
|
+
# Input Parsing Layer - Task 2
|
|
223
|
+
# =============================================================================
|
|
224
|
+
|
|
225
|
+
@dataclass
|
|
226
|
+
class LoadedFile:
|
|
227
|
+
"""Represents a loaded file with its content."""
|
|
228
|
+
path: str
|
|
229
|
+
exists: bool
|
|
230
|
+
content: Optional[str] = None
|
|
231
|
+
error: Optional[str] = None
|
|
232
|
+
size_bytes: int = 0
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
@dataclass
|
|
236
|
+
class ParsedInput:
|
|
237
|
+
"""
|
|
238
|
+
Structured representation of user input after parsing.
|
|
239
|
+
|
|
240
|
+
Attributes:
|
|
241
|
+
raw: Original input string
|
|
242
|
+
is_command: True if input starts with /
|
|
243
|
+
command_name: Command name without / (e.g., "help", "exit")
|
|
244
|
+
command_args: List of command arguments
|
|
245
|
+
tools: List of file references (e.g., [{"type": "file", "path": "foo.py"}])
|
|
246
|
+
flags: Dict of boolean flags (e.g., {"idk": True, "safe": False})
|
|
247
|
+
user_query: Natural language part with @ and # tokens removed
|
|
248
|
+
"""
|
|
249
|
+
raw: str
|
|
250
|
+
is_command: bool = False
|
|
251
|
+
command_name: Optional[str] = None
|
|
252
|
+
command_args: List[str] = field(default_factory=list)
|
|
253
|
+
tools: List[Dict[str, str]] = field(default_factory=list)
|
|
254
|
+
flags: Dict[str, bool] = field(default_factory=dict)
|
|
255
|
+
user_query: str = ""
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def parse_input(raw: str) -> ParsedInput:
|
|
259
|
+
"""
|
|
260
|
+
Parse user input into structured format.
|
|
261
|
+
|
|
262
|
+
Handles:
|
|
263
|
+
- Slash commands: /help, /exit, /clear, /config, etc.
|
|
264
|
+
- File references: @path/to/file.py
|
|
265
|
+
- Flags: #idk, #safe, #noguess
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
raw: Raw input string from user
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
ParsedInput with parsed components
|
|
272
|
+
"""
|
|
273
|
+
raw_stripped = raw.strip()
|
|
274
|
+
|
|
275
|
+
# Empty input
|
|
276
|
+
if not raw_stripped:
|
|
277
|
+
return ParsedInput(raw=raw, user_query="")
|
|
278
|
+
|
|
279
|
+
# Command detection (starts with /)
|
|
280
|
+
if raw_stripped.startswith("/"):
|
|
281
|
+
parts = raw_stripped.split(maxsplit=1)
|
|
282
|
+
cmd_name = parts[0][1:].lower() # Remove / and lowercase
|
|
283
|
+
cmd_args = parts[1].split() if len(parts) > 1 else []
|
|
284
|
+
|
|
285
|
+
return ParsedInput(
|
|
286
|
+
raw=raw,
|
|
287
|
+
is_command=True,
|
|
288
|
+
command_name=cmd_name,
|
|
289
|
+
command_args=cmd_args
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Not a command - parse tools and flags
|
|
293
|
+
tools = []
|
|
294
|
+
flags = {}
|
|
295
|
+
|
|
296
|
+
# Pattern for @file references
|
|
297
|
+
# Matches: @filename.ext, @path/to/file.ext, @"path with spaces.txt"
|
|
298
|
+
file_pattern = r'@(?:"([^"]+)"|([^\s]+))'
|
|
299
|
+
|
|
300
|
+
for match in re.finditer(file_pattern, raw_stripped):
|
|
301
|
+
# Group 1 is quoted path, group 2 is unquoted path
|
|
302
|
+
file_path = match.group(1) if match.group(1) else match.group(2)
|
|
303
|
+
tools.append({
|
|
304
|
+
"type": "file",
|
|
305
|
+
"path": file_path
|
|
306
|
+
})
|
|
307
|
+
|
|
308
|
+
# Remove @file references from query
|
|
309
|
+
query_without_files = re.sub(file_pattern, '', raw_stripped)
|
|
310
|
+
|
|
311
|
+
# Pattern for flags: #idk, #safe, #noguess
|
|
312
|
+
flag_pattern = r'#(idk|safe|noguess|careful)\b'
|
|
313
|
+
|
|
314
|
+
for match in re.finditer(flag_pattern, query_without_files, re.IGNORECASE):
|
|
315
|
+
flag_name = match.group(1).lower()
|
|
316
|
+
flags[flag_name] = True
|
|
317
|
+
|
|
318
|
+
# Remove flags from query
|
|
319
|
+
user_query = re.sub(flag_pattern, '', query_without_files, flags=re.IGNORECASE)
|
|
320
|
+
|
|
321
|
+
# Clean up extra whitespace
|
|
322
|
+
user_query = ' '.join(user_query.split())
|
|
323
|
+
|
|
324
|
+
return ParsedInput(
|
|
325
|
+
raw=raw,
|
|
326
|
+
is_command=False,
|
|
327
|
+
tools=tools,
|
|
328
|
+
flags=flags,
|
|
329
|
+
user_query=user_query
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def load_file(file_path: str, base_dir: Path, max_size_kb: int = 1024) -> LoadedFile:
|
|
334
|
+
"""
|
|
335
|
+
Load a file with error handling and size limits.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
file_path: Path to file (relative or absolute)
|
|
339
|
+
base_dir: Base directory for resolving relative paths
|
|
340
|
+
max_size_kb: Maximum file size in KB (default 1MB)
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
LoadedFile with content or error information
|
|
344
|
+
"""
|
|
345
|
+
try:
|
|
346
|
+
# Resolve path
|
|
347
|
+
path_obj = Path(file_path)
|
|
348
|
+
if not path_obj.is_absolute():
|
|
349
|
+
path_obj = base_dir / path_obj
|
|
350
|
+
|
|
351
|
+
path_obj = path_obj.resolve()
|
|
352
|
+
|
|
353
|
+
# Check existence
|
|
354
|
+
if not path_obj.exists():
|
|
355
|
+
return LoadedFile(
|
|
356
|
+
path=file_path,
|
|
357
|
+
exists=False,
|
|
358
|
+
error=f"File not found: {file_path}"
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
if path_obj.is_dir():
|
|
362
|
+
# Handle directories by listing content (simulating 'ls' or 'tree')
|
|
363
|
+
try:
|
|
364
|
+
# Simple listing for now. Could be enhanced with a Tool run if full `ls -R` needed.
|
|
365
|
+
# Let's do a shallow listing with file types.
|
|
366
|
+
items = []
|
|
367
|
+
for item in sorted(path_obj.iterdir()):
|
|
368
|
+
prefix = "[DIR]" if item.is_dir() else "[FILE]"
|
|
369
|
+
size = f"{item.stat().st_size}b" if item.is_file() else ""
|
|
370
|
+
items.append(f"{prefix} {item.name} {size}")
|
|
371
|
+
|
|
372
|
+
dir_content = f"Directory Listing for {file_path}:\n" + "\n".join(items)
|
|
373
|
+
|
|
374
|
+
return LoadedFile(
|
|
375
|
+
path=file_path,
|
|
376
|
+
exists=True,
|
|
377
|
+
content=dir_content,
|
|
378
|
+
size_bytes=len(dir_content.encode('utf-8'))
|
|
379
|
+
)
|
|
380
|
+
except Exception as e:
|
|
381
|
+
return LoadedFile(
|
|
382
|
+
path=file_path,
|
|
383
|
+
exists=True,
|
|
384
|
+
error=f"Error listing directory: {e}"
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
if not path_obj.is_file():
|
|
388
|
+
return LoadedFile(
|
|
389
|
+
path=file_path,
|
|
390
|
+
exists=False,
|
|
391
|
+
error=f"Not a file or directory: {file_path}"
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# Check for binary/image extensions to prevent crash
|
|
395
|
+
suffix = path_obj.suffix.lower()
|
|
396
|
+
binary_exts = {
|
|
397
|
+
".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".tiff", ".ico", # Images
|
|
398
|
+
".pdf", ".zip", ".tar", ".gz", ".7z", ".rar", # Archives
|
|
399
|
+
".exe", ".bin", ".dll", ".so", ".dylib", # Binaries
|
|
400
|
+
".pyc", ".pkl", ".db", ".sqlite" # Data
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
# Calculate max bytes early
|
|
404
|
+
max_bytes = max_size_kb * 1024
|
|
405
|
+
|
|
406
|
+
if suffix in binary_exts:
|
|
407
|
+
try:
|
|
408
|
+
from ..tools.perception import PerceptionTool
|
|
409
|
+
# Attempt conversion
|
|
410
|
+
# Attempt conversion (Use default local config for CLI view)
|
|
411
|
+
pt = PerceptionTool()
|
|
412
|
+
result = pt.run(str(path_obj))
|
|
413
|
+
|
|
414
|
+
if result.success:
|
|
415
|
+
content_preview = result.data
|
|
416
|
+
# Truncate if too huge
|
|
417
|
+
if len(content_preview) > max_bytes:
|
|
418
|
+
content_preview = content_preview[:max_bytes] + "... [Truncated]"
|
|
419
|
+
|
|
420
|
+
return LoadedFile(
|
|
421
|
+
path=file_path,
|
|
422
|
+
exists=True,
|
|
423
|
+
content=f"[Content processed by PerceptionTool]\n{content_preview}",
|
|
424
|
+
size_bytes=path_obj.stat().st_size
|
|
425
|
+
)
|
|
426
|
+
else:
|
|
427
|
+
# PerceptionTool ran but failed
|
|
428
|
+
return LoadedFile(
|
|
429
|
+
path=file_path,
|
|
430
|
+
exists=True,
|
|
431
|
+
content="",
|
|
432
|
+
size_bytes=path_obj.stat().st_size,
|
|
433
|
+
error=f"Perception failed: {result.error}"
|
|
434
|
+
)
|
|
435
|
+
except ImportError as e:
|
|
436
|
+
return LoadedFile(
|
|
437
|
+
path=file_path,
|
|
438
|
+
exists=True,
|
|
439
|
+
content="",
|
|
440
|
+
size_bytes=path_obj.stat().st_size,
|
|
441
|
+
error=f"Preview unavailable: PerceptionTool import failed ({e})"
|
|
442
|
+
)
|
|
443
|
+
except Exception as e:
|
|
444
|
+
return LoadedFile(
|
|
445
|
+
path=file_path,
|
|
446
|
+
exists=True,
|
|
447
|
+
content="",
|
|
448
|
+
size_bytes=path_obj.stat().st_size,
|
|
449
|
+
error=f"Preview unavailable: {str(e)}"
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
# Classify for better user message
|
|
453
|
+
if suffix in {".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".tiff", ".ico"}:
|
|
454
|
+
file_type = "Image"
|
|
455
|
+
elif suffix == ".pdf":
|
|
456
|
+
file_type = "PDF"
|
|
457
|
+
elif suffix in {".zip", ".tar", ".gz", ".7z", ".rar"}:
|
|
458
|
+
file_type = "Archive"
|
|
459
|
+
else:
|
|
460
|
+
file_type = "Binary"
|
|
461
|
+
|
|
462
|
+
return LoadedFile(
|
|
463
|
+
path=file_path,
|
|
464
|
+
exists=True,
|
|
465
|
+
content=f"[{file_type} file detected: {file_path}. Content not viewable in TUI.]",
|
|
466
|
+
size_bytes=path_obj.stat().st_size,
|
|
467
|
+
error=f"{file_type} file skipped (content not extractable)"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Check size
|
|
471
|
+
size_bytes = path_obj.stat().st_size
|
|
472
|
+
max_bytes = max_size_kb * 1024
|
|
473
|
+
|
|
474
|
+
if size_bytes > max_bytes:
|
|
475
|
+
# Read truncated
|
|
476
|
+
with open(path_obj, 'r', encoding='utf-8', errors='replace') as f:
|
|
477
|
+
content = f.read(max_bytes)
|
|
478
|
+
|
|
479
|
+
return LoadedFile(
|
|
480
|
+
path=file_path,
|
|
481
|
+
exists=True,
|
|
482
|
+
content=content,
|
|
483
|
+
size_bytes=size_bytes,
|
|
484
|
+
error=f"File truncated (size: {size_bytes // 1024}KB, limit: {max_size_kb}KB)"
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
# Read full file
|
|
488
|
+
with open(path_obj, 'r', encoding='utf-8', errors='replace') as f:
|
|
489
|
+
content = f.read()
|
|
490
|
+
|
|
491
|
+
return LoadedFile(
|
|
492
|
+
path=file_path,
|
|
493
|
+
exists=True,
|
|
494
|
+
content=content,
|
|
495
|
+
size_bytes=size_bytes
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
except PermissionError:
|
|
499
|
+
return LoadedFile(
|
|
500
|
+
path=file_path,
|
|
501
|
+
exists=True,
|
|
502
|
+
error=f"Permission denied: {file_path}"
|
|
503
|
+
)
|
|
504
|
+
except Exception as e:
|
|
505
|
+
return LoadedFile(
|
|
506
|
+
path=file_path,
|
|
507
|
+
exists=False,
|
|
508
|
+
error=f"Error reading {file_path}: {type(e).__name__}: {e}"
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def build_augmented_prompt(user_query: str, loaded_files: List[LoadedFile], flags: Dict[str, bool]) -> str:
|
|
513
|
+
"""
|
|
514
|
+
Build the final prompt with file contents and flag guidance.
|
|
515
|
+
|
|
516
|
+
Args:
|
|
517
|
+
user_query: User's natural language query
|
|
518
|
+
loaded_files: List of loaded files with their contents
|
|
519
|
+
flags: Dict of flags like {"idk": True}
|
|
520
|
+
|
|
521
|
+
Returns:
|
|
522
|
+
Augmented prompt string
|
|
523
|
+
"""
|
|
524
|
+
parts = []
|
|
525
|
+
|
|
526
|
+
# Add flag guidance at the beginning if present
|
|
527
|
+
if flags.get("idk") or flags.get("careful"):
|
|
528
|
+
parts.append(
|
|
529
|
+
"Important: The user prefers you to admit when you don't know rather than guess. "
|
|
530
|
+
"If you are uncertain or lack sufficient information, explicitly say 'I don't know' "
|
|
531
|
+
"or 'I'm not sure' instead of making assumptions.\n"
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
if flags.get("safe") or flags.get("noguess"):
|
|
535
|
+
parts.append(
|
|
536
|
+
"Important: The user wants safe, conservative responses. "
|
|
537
|
+
"Avoid speculation and only state what you're confident about.\n"
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
# Add file contents
|
|
541
|
+
valid_files = [f for f in loaded_files if f.exists and f.content]
|
|
542
|
+
if valid_files:
|
|
543
|
+
parts.append("\nYou are being provided with file contents from the current project:\n")
|
|
544
|
+
|
|
545
|
+
for file in valid_files:
|
|
546
|
+
parts.append(f"\n<<FILE: {file.path}>>")
|
|
547
|
+
parts.append(file.content)
|
|
548
|
+
parts.append("</FILE>\n")
|
|
549
|
+
|
|
550
|
+
if file.error: # Truncation warning
|
|
551
|
+
parts.append(f"[Note: {file.error}]\n")
|
|
552
|
+
|
|
553
|
+
# Add user query
|
|
554
|
+
if user_query:
|
|
555
|
+
if valid_files or flags:
|
|
556
|
+
parts.append(f"\nUser request:\n{user_query}")
|
|
557
|
+
else:
|
|
558
|
+
parts.append(user_query)
|
|
559
|
+
|
|
560
|
+
return "".join(parts)
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
# ASCII logo - Devanagari परिषद् with left-to-right saffron gradient (vibrant)
|
|
564
|
+
LOGO = """[#e65e1c] █████[/][#ff671f] [/][#ff7a3d] [/]
|
|
565
|
+
[#e65e1c] ██ ╔═[/][#ff671f]═██ [/][#ff7a3d] [/]
|
|
566
|
+
[#e65e1c]████████[/][#ff671f]██████████[/][#ff7a3d]███████[/][#ff8c5a]████████[/][#ff9e78]████████[/][#ffb095]██████████═╗[/]
|
|
567
|
+
[#e65e1c] ╚═██ ╔═[/][#ff671f]═██ ╔═██[/][#ff7a3d] ╔═▀▀▀▀█[/][#ff8c5a]█ ╔═███╔[/][#ff9e78]══██ ╔═[/][#ffb095]══════██ ╔═╝[/]
|
|
568
|
+
[#e65e1c] ██ ║ [/][#ff671f] ██ ║ ██[/][#ff7a3d] ║ █[/][#ff8c5a]█ ║ ██ █[/][#ff9e78]█ ██ ║ [/][#ffb095] ██████ ║[/]
|
|
569
|
+
[#e65e1c] ██ ║ [/][#ff671f] ██ ║ ██[/][#ff7a3d] ║ ██▀[/][#ff8c5a]▀╔╝ ██ ║[/][#ff9e78] ███ ║ █[/][#ffb095]█ ╔═════╝[/]
|
|
570
|
+
[#e65e1c] ███[/][#ff671f]███ ║ ██[/][#ff7a3d] ║ ██ [/][#ff8c5a]╔╝ ██[/][#ff9e78]████ ║ █[/][#ffb095]█ ║ ██═╗[/]
|
|
571
|
+
[#e65e1c] [/][#ff671f] ██ ║ ██[/][#ff7a3d] ║ ██ [/][#ff8c5a]╚═╗ [/][#ff9e78] ██ ║ [/][#ffb095] ██████ ║[/]
|
|
572
|
+
[#e65e1c] [/][#ff671f] ██ ║ ██[/][#ff7a3d] ║ █[/][#ff8c5a]█ ║ [/][#ff9e78] ██ ║ [/][#ffb095] ██ ║[/]
|
|
573
|
+
[#e65e1c] [/][#ff671f] ╚══╝ ╚═[/][#ff7a3d]═╝ ╚[/][#ff8c5a]══╝ [/][#ff9e78] ╚══╝ [/][#ffb095] ╚══╝[/]"""
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
# =============================================================================
|
|
577
|
+
# Sabha (Council) Configurations
|
|
578
|
+
# =============================================================================
|
|
579
|
+
|
|
580
|
+
@dataclass
|
|
581
|
+
class SabhaConfig:
|
|
582
|
+
"""Sabha configuration."""
|
|
583
|
+
id: str
|
|
584
|
+
name: str
|
|
585
|
+
hindi_name: str
|
|
586
|
+
description: str
|
|
587
|
+
roles: int
|
|
588
|
+
ram_gb: int
|
|
589
|
+
speed: str
|
|
590
|
+
emoji: str
|
|
591
|
+
model_slots: list # ["heavy", "mid", "light"] etc.
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
SABHAS = [
|
|
595
|
+
SabhaConfig(
|
|
596
|
+
id="maha",
|
|
597
|
+
name="Maha Sabha",
|
|
598
|
+
hindi_name="महा सभा",
|
|
599
|
+
description="Thorough: 3 roles (Analysis → Planning → Execution)",
|
|
600
|
+
roles=3,
|
|
601
|
+
ram_gb=32,
|
|
602
|
+
speed="Slow",
|
|
603
|
+
emoji="👑",
|
|
604
|
+
model_slots=["heavy", "mid", "light"]
|
|
605
|
+
),
|
|
606
|
+
SabhaConfig(
|
|
607
|
+
id="madhyam",
|
|
608
|
+
name="Madhyam Sabha",
|
|
609
|
+
hindi_name="मध्यम सभा",
|
|
610
|
+
description="Balanced: 2 roles (Planning → Execution)",
|
|
611
|
+
roles=2,
|
|
612
|
+
ram_gb=16,
|
|
613
|
+
speed="Medium",
|
|
614
|
+
emoji="⚡",
|
|
615
|
+
model_slots=["heavy", "light"]
|
|
616
|
+
),
|
|
617
|
+
SabhaConfig(
|
|
618
|
+
id="laghu",
|
|
619
|
+
name="Laghu Sabha",
|
|
620
|
+
hindi_name="लघु सभा",
|
|
621
|
+
description="Fast: 1 role (Direct Execution)",
|
|
622
|
+
roles=1,
|
|
623
|
+
ram_gb=8,
|
|
624
|
+
speed="Fast",
|
|
625
|
+
emoji="🚀",
|
|
626
|
+
model_slots=["single"]
|
|
627
|
+
),
|
|
628
|
+
]
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
# =============================================================================
|
|
634
|
+
# Model Catalog
|
|
635
|
+
# =============================================================================
|
|
636
|
+
|
|
637
|
+
MODELS_JSON_PATH = Path(__file__).parent.parent / "data" / "models.json"
|
|
638
|
+
|
|
639
|
+
@dataclass
|
|
640
|
+
class ModelInfo:
|
|
641
|
+
"""Model information."""
|
|
642
|
+
name: str
|
|
643
|
+
shortcut: str
|
|
644
|
+
size_gb: float
|
|
645
|
+
description: str
|
|
646
|
+
source: str # huggingface, ollama, lmstudio
|
|
647
|
+
quantization: str = "Q4_K_M"
|
|
648
|
+
distributor: str = ""
|
|
649
|
+
params: str = ""
|
|
650
|
+
tags: list = None
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
def load_model_catalog() -> dict:
|
|
654
|
+
"""Load model catalog from JSON file."""
|
|
655
|
+
if MODELS_JSON_PATH.exists():
|
|
656
|
+
try:
|
|
657
|
+
with open(MODELS_JSON_PATH) as f:
|
|
658
|
+
data = json.load(f)
|
|
659
|
+
|
|
660
|
+
catalog = {}
|
|
661
|
+
for source_key, source_data in data.get("sources", {}).items():
|
|
662
|
+
models = []
|
|
663
|
+
for m in source_data.get("models", []):
|
|
664
|
+
models.append(ModelInfo(
|
|
665
|
+
name=m.get("name", ""),
|
|
666
|
+
shortcut=m.get("shortcut", ""),
|
|
667
|
+
size_gb=m.get("size_gb", 0),
|
|
668
|
+
description=m.get("description", ""),
|
|
669
|
+
source=source_key,
|
|
670
|
+
quantization=m.get("quantization", "Q4_K_M"),
|
|
671
|
+
distributor=m.get("distributor", ""),
|
|
672
|
+
params=m.get("params", ""),
|
|
673
|
+
tags=m.get("tags", []),
|
|
674
|
+
))
|
|
675
|
+
catalog[source_key] = models
|
|
676
|
+
return catalog
|
|
677
|
+
except Exception as e:
|
|
678
|
+
print(f"Error loading models.json: {e}")
|
|
679
|
+
|
|
680
|
+
# Fallback to minimal catalog
|
|
681
|
+
return {
|
|
682
|
+
"ollama": [
|
|
683
|
+
ModelInfo("Llama 3.2 3B", "llama3.2:3b", 2.0, "Efficient and fast", "ollama", "Q4_K_M", "Meta", "3B"),
|
|
684
|
+
ModelInfo("Qwen 2.5 7B", "qwen2.5:7b", 4.5, "Excellent reasoning", "ollama", "Q4_K_M", "Alibaba", "7B"),
|
|
685
|
+
],
|
|
686
|
+
"huggingface": [
|
|
687
|
+
ModelInfo("Llama 3.2 3B", "meta-llama/Llama-3.2-3B-Instruct", 2.0, "Efficient model", "huggingface", "BF16", "Meta", "3B"),
|
|
688
|
+
],
|
|
689
|
+
"lmstudio": [
|
|
690
|
+
ModelInfo("Llama 3.2 3B", "Llama-3.2-3B-Instruct-GGUF", 2.0, "GGUF format", "lmstudio", "Q4_K_M", "Meta", "3B"),
|
|
691
|
+
],
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
# Load catalog on import
|
|
696
|
+
MODEL_CATALOG = load_model_catalog()
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
# =============================================================================
|
|
700
|
+
# Model Manager Integration (matches CLI system)
|
|
701
|
+
# =============================================================================
|
|
702
|
+
|
|
703
|
+
def map_source_to_backend(source: str) -> str:
|
|
704
|
+
"""
|
|
705
|
+
Map model source to runtime backend (matches CLI behavior).
|
|
706
|
+
|
|
707
|
+
CRITICAL: HuggingFace GGUF models use llama_cpp backend, NOT transformers!
|
|
708
|
+
|
|
709
|
+
Args:
|
|
710
|
+
source: Model source ("huggingface" / "ollama" / "lmstudio" / "native")
|
|
711
|
+
|
|
712
|
+
Returns:
|
|
713
|
+
Backend name for ModelConfig
|
|
714
|
+
"""
|
|
715
|
+
mapping = {
|
|
716
|
+
"huggingface": "llama_cpp", # HF GGUF → llama.cpp (not transformers!)
|
|
717
|
+
"ollama": "ollama", # Ollama → ollama API
|
|
718
|
+
"lmstudio": "openai", # LM Studio → OpenAI-compatible API
|
|
719
|
+
"native": "native", # Native → MLX distributed
|
|
720
|
+
}
|
|
721
|
+
return mapping.get(source.lower(), "llama_cpp")
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
def get_available_models_with_status() -> Dict[str, List[Dict]]:
|
|
725
|
+
"""
|
|
726
|
+
Get models grouped by source, with download status.
|
|
727
|
+
Uses ModelManager to check what's actually downloaded.
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
{
|
|
731
|
+
"huggingface": [{"id": "qwen2.5:1.5b", "name": "...", "downloaded": True, ...}, ...],
|
|
732
|
+
"ollama": [...],
|
|
733
|
+
"lmstudio": [...]
|
|
734
|
+
}
|
|
735
|
+
"""
|
|
736
|
+
from parishad.models.downloader import ModelManager
|
|
737
|
+
|
|
738
|
+
try:
|
|
739
|
+
manager = ModelManager()
|
|
740
|
+
downloaded_models = {m.name: m for m in manager.list_models()}
|
|
741
|
+
except Exception as e:
|
|
742
|
+
print(f"Warning: Could not access ModelManager: {e}")
|
|
743
|
+
downloaded_models = {}
|
|
744
|
+
|
|
745
|
+
# Combine downloaded models + popular models from catalog
|
|
746
|
+
result = {}
|
|
747
|
+
|
|
748
|
+
for source, models in MODEL_CATALOG.items():
|
|
749
|
+
result[source] = []
|
|
750
|
+
for model in models:
|
|
751
|
+
model_id = model.shortcut
|
|
752
|
+
is_downloaded = model_id in downloaded_models
|
|
753
|
+
|
|
754
|
+
model_dict = {
|
|
755
|
+
"id": model_id,
|
|
756
|
+
"name": model.name,
|
|
757
|
+
"downloaded": is_downloaded,
|
|
758
|
+
"size": f"{model.size_gb:.1f} GB" if model.size_gb > 0 else "Unknown",
|
|
759
|
+
"quantization": model.quantization,
|
|
760
|
+
"distributor": model.distributor,
|
|
761
|
+
"params": model.params,
|
|
762
|
+
"tags": model.tags,
|
|
763
|
+
"description": model.description,
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
if is_downloaded:
|
|
767
|
+
dl_model = downloaded_models[model_id]
|
|
768
|
+
model_dict["path"] = str(dl_model.path)
|
|
769
|
+
model_dict["size"] = dl_model.size_human
|
|
770
|
+
|
|
771
|
+
result[source].append(model_dict)
|
|
772
|
+
|
|
773
|
+
return result
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
def ensure_model_available(
|
|
777
|
+
model_id: str,
|
|
778
|
+
source: str,
|
|
779
|
+
progress_callback=None,
|
|
780
|
+
cancel_event=None
|
|
781
|
+
) -> Optional[Path]:
|
|
782
|
+
"""
|
|
783
|
+
Ensure model is downloaded and return path with progress tracking.
|
|
784
|
+
|
|
785
|
+
Args:
|
|
786
|
+
model_id: Model identifier (e.g., "qwen2.5:1.5b")
|
|
787
|
+
source: Source to download from ("huggingface" / "ollama" / "lmstudio")
|
|
788
|
+
progress_callback: Optional callback for progress updates
|
|
789
|
+
cancel_event: Optional threading.Event to signal cancellation
|
|
790
|
+
|
|
791
|
+
Returns:
|
|
792
|
+
Path to model file, or None if download fails/cancelled
|
|
793
|
+
"""
|
|
794
|
+
from parishad.models.downloader import ModelManager
|
|
795
|
+
|
|
796
|
+
try:
|
|
797
|
+
manager = ModelManager()
|
|
798
|
+
|
|
799
|
+
# Check if already present
|
|
800
|
+
path = manager.get_model_path(model_id)
|
|
801
|
+
if path and path.exists():
|
|
802
|
+
return path
|
|
803
|
+
|
|
804
|
+
# Need to download - wrap progress callback to check for cancellation
|
|
805
|
+
if progress_callback:
|
|
806
|
+
def wrapped_callback(progress):
|
|
807
|
+
# Check if cancelled
|
|
808
|
+
if cancel_event and cancel_event.is_set():
|
|
809
|
+
raise KeyboardInterrupt("Download cancelled by user")
|
|
810
|
+
progress_callback(progress)
|
|
811
|
+
|
|
812
|
+
model_info = manager.download(
|
|
813
|
+
model_spec=model_id,
|
|
814
|
+
source=source,
|
|
815
|
+
progress_callback=wrapped_callback
|
|
816
|
+
)
|
|
817
|
+
else:
|
|
818
|
+
model_info = manager.download(
|
|
819
|
+
model_spec=model_id,
|
|
820
|
+
source=source
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
return model_info.path
|
|
824
|
+
|
|
825
|
+
except KeyboardInterrupt:
|
|
826
|
+
print("\nDownload cancelled by user")
|
|
827
|
+
return None
|
|
828
|
+
except Exception as e:
|
|
829
|
+
print(f"Error ensuring model availability: {e}")
|
|
830
|
+
return None
|
|
831
|
+
|
|
832
|
+
|
|
833
|
+
# =============================================================================
|
|
834
|
+
# Backend and Model Availability Detection
|
|
835
|
+
# =============================================================================
|
|
836
|
+
|
|
837
|
+
def detect_available_backends() -> Dict[str, Tuple[bool, str]]:
|
|
838
|
+
"""
|
|
839
|
+
Detect which backends are available on this system.
|
|
840
|
+
|
|
841
|
+
Returns:
|
|
842
|
+
Dict mapping backend_id -> (available: bool, status_message: str)
|
|
843
|
+
"""
|
|
844
|
+
results = {}
|
|
845
|
+
|
|
846
|
+
# Ollama
|
|
847
|
+
try:
|
|
848
|
+
if shutil.which("ollama"):
|
|
849
|
+
result = subprocess.run(
|
|
850
|
+
["ollama", "list"],
|
|
851
|
+
capture_output=True,
|
|
852
|
+
text=True,
|
|
853
|
+
timeout=5
|
|
854
|
+
)
|
|
855
|
+
if result.returncode == 0:
|
|
856
|
+
results["ollama"] = (True, "Ollama running")
|
|
857
|
+
else:
|
|
858
|
+
results["ollama"] = (False, "Ollama installed but not running")
|
|
859
|
+
else:
|
|
860
|
+
results["ollama"] = (False, "Ollama not installed")
|
|
861
|
+
except Exception as e:
|
|
862
|
+
results["ollama"] = (False, f"Ollama check failed: {e}")
|
|
863
|
+
|
|
864
|
+
# HuggingFace/Transformers
|
|
865
|
+
try:
|
|
866
|
+
import transformers
|
|
867
|
+
import torch
|
|
868
|
+
results["huggingface"] = (True, "Transformers installed")
|
|
869
|
+
except ImportError:
|
|
870
|
+
results["huggingface"] = (False, "transformers/torch not installed")
|
|
871
|
+
|
|
872
|
+
# Native MLX backend
|
|
873
|
+
try:
|
|
874
|
+
# Check if native server is reachable
|
|
875
|
+
host = os.environ.get("NATIVE_MLX_HOST", "10.0.0.2")
|
|
876
|
+
port = int(os.environ.get("NATIVE_MLX_PORT", "29500"))
|
|
877
|
+
|
|
878
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
879
|
+
sock.settimeout(2)
|
|
880
|
+
result = sock.connect_ex((host, port))
|
|
881
|
+
sock.close()
|
|
882
|
+
|
|
883
|
+
if result == 0:
|
|
884
|
+
results["native"] = (True, f"MLX server at {host}:{port}")
|
|
885
|
+
else:
|
|
886
|
+
results["native"] = (False, f"MLX server unreachable at {host}:{port}")
|
|
887
|
+
except Exception as e:
|
|
888
|
+
results["native"] = (False, f"Native check failed: {e}")
|
|
889
|
+
|
|
890
|
+
# LM Studio
|
|
891
|
+
try:
|
|
892
|
+
# Check if LM Studio API is accessible (usually localhost:1234)
|
|
893
|
+
import requests
|
|
894
|
+
response = requests.get("http://localhost:1234/v1/models", timeout=2)
|
|
895
|
+
if response.status_code == 200:
|
|
896
|
+
results["lmstudio"] = (True, "LM Studio API available")
|
|
897
|
+
else:
|
|
898
|
+
results["lmstudio"] = (False, "LM Studio API not responding")
|
|
899
|
+
except:
|
|
900
|
+
results["lmstudio"] = (False, "LM Studio not detected")
|
|
901
|
+
|
|
902
|
+
return results
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
def is_model_available(model_id: str, backend: str) -> bool:
|
|
906
|
+
"""
|
|
907
|
+
Check if a specific model is available locally for the given backend.
|
|
908
|
+
|
|
909
|
+
Args:
|
|
910
|
+
model_id: Model identifier (e.g., "llama3.2:3b", "meta-llama/Llama-3.2-3B")
|
|
911
|
+
backend: Backend name ("ollama", "huggingface", "native", etc.)
|
|
912
|
+
|
|
913
|
+
Returns:
|
|
914
|
+
True if model is available, False otherwise
|
|
915
|
+
"""
|
|
916
|
+
if backend == "ollama":
|
|
917
|
+
try:
|
|
918
|
+
result = subprocess.run(
|
|
919
|
+
["ollama", "list"],
|
|
920
|
+
capture_output=True,
|
|
921
|
+
text=True,
|
|
922
|
+
timeout=5
|
|
923
|
+
)
|
|
924
|
+
if result.returncode == 0:
|
|
925
|
+
# Parse output and check if model_id exists
|
|
926
|
+
# Ollama list format: NAME ID SIZE MODIFIED
|
|
927
|
+
for line in result.stdout.splitlines()[1:]: # Skip header
|
|
928
|
+
if line.strip():
|
|
929
|
+
model_name = line.split()[0]
|
|
930
|
+
if model_name == model_id or model_name.startswith(model_id):
|
|
931
|
+
return True
|
|
932
|
+
return False
|
|
933
|
+
except Exception:
|
|
934
|
+
return False
|
|
935
|
+
|
|
936
|
+
elif backend in ("huggingface", "transformers"):
|
|
937
|
+
try:
|
|
938
|
+
# Check HF cache for model
|
|
939
|
+
hf_home = os.environ.get("HF_HOME", str(Path.home() / ".cache" / "huggingface"))
|
|
940
|
+
cache_dir = Path(hf_home) / "hub"
|
|
941
|
+
|
|
942
|
+
if not cache_dir.exists():
|
|
943
|
+
return False
|
|
944
|
+
|
|
945
|
+
# Convert model_id to cache directory format
|
|
946
|
+
# e.g., "meta-llama/Llama-3.2-3B" -> "models--meta-llama--Llama-3.2-3B"
|
|
947
|
+
cache_model_dir = "models--" + model_id.replace("/", "--")
|
|
948
|
+
model_path = cache_dir / cache_model_dir
|
|
949
|
+
|
|
950
|
+
return model_path.exists() and model_path.is_dir()
|
|
951
|
+
except Exception:
|
|
952
|
+
return False
|
|
953
|
+
|
|
954
|
+
elif backend == "native":
|
|
955
|
+
# For native backend, check if server is reachable
|
|
956
|
+
try:
|
|
957
|
+
host = os.environ.get("NATIVE_MLX_HOST", "10.0.0.2")
|
|
958
|
+
port = int(os.environ.get("NATIVE_MLX_PORT", "29500"))
|
|
959
|
+
|
|
960
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
961
|
+
sock.settimeout(2)
|
|
962
|
+
result = sock.connect_ex((host, port))
|
|
963
|
+
sock.close()
|
|
964
|
+
|
|
965
|
+
return result == 0
|
|
966
|
+
except Exception:
|
|
967
|
+
return False
|
|
968
|
+
|
|
969
|
+
# Unknown backend or not implemented
|
|
970
|
+
return False
|
|
971
|
+
|
|
972
|
+
|
|
973
|
+
def get_available_models_for_backend(backend: str) -> List[ModelInfo]:
|
|
974
|
+
"""
|
|
975
|
+
Get list of actually available models for a backend.
|
|
976
|
+
|
|
977
|
+
Args:
|
|
978
|
+
backend: Backend name
|
|
979
|
+
|
|
980
|
+
Returns:
|
|
981
|
+
List of ModelInfo with available models
|
|
982
|
+
"""
|
|
983
|
+
models = []
|
|
984
|
+
|
|
985
|
+
if backend == "ollama":
|
|
986
|
+
try:
|
|
987
|
+
result = subprocess.run(
|
|
988
|
+
["ollama", "list"],
|
|
989
|
+
capture_output=True,
|
|
990
|
+
text=True,
|
|
991
|
+
timeout=5
|
|
992
|
+
)
|
|
993
|
+
if result.returncode == 0:
|
|
994
|
+
for line in result.stdout.splitlines()[1:]: # Skip header
|
|
995
|
+
if line.strip():
|
|
996
|
+
parts = line.split()
|
|
997
|
+
if len(parts) >= 2:
|
|
998
|
+
model_name = parts[0]
|
|
999
|
+
size = parts[2] if len(parts) > 2 else "?"
|
|
1000
|
+
|
|
1001
|
+
# Try to parse size
|
|
1002
|
+
size_gb = 0.0
|
|
1003
|
+
if "GB" in size:
|
|
1004
|
+
try:
|
|
1005
|
+
size_gb = float(size.replace("GB", ""))
|
|
1006
|
+
except:
|
|
1007
|
+
pass
|
|
1008
|
+
|
|
1009
|
+
models.append(ModelInfo(
|
|
1010
|
+
name=model_name.split(":")[0].title(),
|
|
1011
|
+
shortcut=model_name,
|
|
1012
|
+
size_gb=size_gb,
|
|
1013
|
+
description=f"Local Ollama model ({size})",
|
|
1014
|
+
tags="ollama,local",
|
|
1015
|
+
available=True
|
|
1016
|
+
))
|
|
1017
|
+
except Exception:
|
|
1018
|
+
pass
|
|
1019
|
+
|
|
1020
|
+
elif backend in ("huggingface", "transformers"):
|
|
1021
|
+
try:
|
|
1022
|
+
hf_home = os.environ.get("HF_HOME", str(Path.home() / ".cache" / "huggingface"))
|
|
1023
|
+
cache_dir = Path(hf_home) / "hub"
|
|
1024
|
+
|
|
1025
|
+
if cache_dir.exists():
|
|
1026
|
+
for model_dir in cache_dir.iterdir():
|
|
1027
|
+
if model_dir.is_dir() and model_dir.name.startswith("models--"):
|
|
1028
|
+
# Extract model ID from directory name
|
|
1029
|
+
model_id = model_dir.name.replace("models--", "").replace("--", "/")
|
|
1030
|
+
|
|
1031
|
+
# Estimate size
|
|
1032
|
+
size_gb = 0.0
|
|
1033
|
+
try:
|
|
1034
|
+
total_size = sum(f.stat().st_size for f in model_dir.rglob("*") if f.is_file())
|
|
1035
|
+
size_gb = total_size / (1024 ** 3)
|
|
1036
|
+
except:
|
|
1037
|
+
pass
|
|
1038
|
+
|
|
1039
|
+
models.append(ModelInfo(
|
|
1040
|
+
name=model_id.split("/")[-1],
|
|
1041
|
+
shortcut=model_id,
|
|
1042
|
+
size_gb=size_gb,
|
|
1043
|
+
description="Downloaded from HuggingFace",
|
|
1044
|
+
tags="huggingface,local",
|
|
1045
|
+
available=True
|
|
1046
|
+
))
|
|
1047
|
+
except Exception:
|
|
1048
|
+
pass
|
|
1049
|
+
|
|
1050
|
+
elif backend == "native":
|
|
1051
|
+
# Native backend - show the configured remote model
|
|
1052
|
+
host = os.environ.get("NATIVE_MLX_HOST", "10.0.0.2")
|
|
1053
|
+
port = os.environ.get("NATIVE_MLX_PORT", "29500")
|
|
1054
|
+
|
|
1055
|
+
models.append(ModelInfo(
|
|
1056
|
+
name="Llama 3.2 1B (Remote)",
|
|
1057
|
+
shortcut="mlx-community/Llama-3.2-1B-Instruct-4bit",
|
|
1058
|
+
size_gb=0.0, # Remote model
|
|
1059
|
+
description=f"MLX cluster at {host}:{port}",
|
|
1060
|
+
tags="native,remote,mlx",
|
|
1061
|
+
available=is_model_available("mlx-community/Llama-3.2-1B-Instruct-4bit", "native")
|
|
1062
|
+
))
|
|
1063
|
+
|
|
1064
|
+
return models
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
CSS = """
|
|
1068
|
+
Screen {
|
|
1069
|
+
background: $surface;
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
/* Setup Screen Styles */
|
|
1073
|
+
#setup-container {
|
|
1074
|
+
width: 100%;
|
|
1075
|
+
height: 1fr;
|
|
1076
|
+
overflow-y: auto;
|
|
1077
|
+
}
|
|
1078
|
+
|
|
1079
|
+
#setup-content {
|
|
1080
|
+
width: 100%;
|
|
1081
|
+
height: auto;
|
|
1082
|
+
padding: 1 2;
|
|
1083
|
+
}
|
|
1084
|
+
|
|
1085
|
+
#setup-header {
|
|
1086
|
+
height: auto;
|
|
1087
|
+
content-align: center middle;
|
|
1088
|
+
padding-bottom: 1;
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
#setup-logo {
|
|
1092
|
+
width: 100%;
|
|
1093
|
+
height: auto;
|
|
1094
|
+
content-align: center middle;
|
|
1095
|
+
text-align: center;
|
|
1096
|
+
}
|
|
1097
|
+
|
|
1098
|
+
|
|
1099
|
+
#step-title {
|
|
1100
|
+
text-align: center;
|
|
1101
|
+
width: 100%;
|
|
1102
|
+
padding: 1;
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
#sabha-step-title {
|
|
1106
|
+
text-align: center;
|
|
1107
|
+
padding: 1;
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
#model-step-title {
|
|
1111
|
+
text-align: center;
|
|
1112
|
+
padding: 1;
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
.sabha-grid {
|
|
1116
|
+
width: 100%;
|
|
1117
|
+
height: auto;
|
|
1118
|
+
layout: horizontal;
|
|
1119
|
+
padding: 1;
|
|
1120
|
+
}
|
|
1121
|
+
|
|
1122
|
+
/* Maha Sabha - Gold */
|
|
1123
|
+
.sabha-card-maha {
|
|
1124
|
+
width: 1fr;
|
|
1125
|
+
height: auto;
|
|
1126
|
+
min-height: 10;
|
|
1127
|
+
padding: 1 2;
|
|
1128
|
+
margin: 0 1;
|
|
1129
|
+
background: #1a1500 50%;
|
|
1130
|
+
}
|
|
1131
|
+
.sabha-card-maha.selected {
|
|
1132
|
+
background: #3a2e00;
|
|
1133
|
+
text-style: bold;
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
/* Madhyam Sabha - Blue */
|
|
1137
|
+
.sabha-card-madhyam {
|
|
1138
|
+
width: 1fr;
|
|
1139
|
+
height: auto;
|
|
1140
|
+
min-height: 10;
|
|
1141
|
+
padding: 1 2;
|
|
1142
|
+
margin: 0 1;
|
|
1143
|
+
background: #0a1520 50%;
|
|
1144
|
+
}
|
|
1145
|
+
.sabha-card-madhyam.selected {
|
|
1146
|
+
background: #103050;
|
|
1147
|
+
text-style: bold;
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
/* Laghu Sabha - Green */
|
|
1151
|
+
.sabha-card-laghu {
|
|
1152
|
+
width: 1fr;
|
|
1153
|
+
height: auto;
|
|
1154
|
+
min-height: 10;
|
|
1155
|
+
padding: 1 2;
|
|
1156
|
+
margin: 0 1;
|
|
1157
|
+
background: #0a200a 50%;
|
|
1158
|
+
}
|
|
1159
|
+
.sabha-card-laghu.selected {
|
|
1160
|
+
background: #104010;
|
|
1161
|
+
text-style: bold;
|
|
1162
|
+
}
|
|
1163
|
+
|
|
1164
|
+
.sabha-card-title-english {
|
|
1165
|
+
text-align: center;
|
|
1166
|
+
padding-bottom: 0;
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
.sabha-card-title-hindi {
|
|
1170
|
+
text-align: center;
|
|
1171
|
+
padding-top: 0;
|
|
1172
|
+
}
|
|
1173
|
+
|
|
1174
|
+
.sabha-card-desc {
|
|
1175
|
+
color: $text-muted;
|
|
1176
|
+
}
|
|
1177
|
+
|
|
1178
|
+
.sabha-card-stats {
|
|
1179
|
+
color: $text-muted;
|
|
1180
|
+
}
|
|
1181
|
+
|
|
1182
|
+
.model-summary {
|
|
1183
|
+
text-align: center;
|
|
1184
|
+
padding: 0 1 1 1;
|
|
1185
|
+
height: auto;
|
|
1186
|
+
width: 1fr;
|
|
1187
|
+
}
|
|
1188
|
+
|
|
1189
|
+
.model-summary-bar {
|
|
1190
|
+
height: auto;
|
|
1191
|
+
layout: horizontal;
|
|
1192
|
+
align: center middle;
|
|
1193
|
+
padding: 0 1;
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
.btn-reset {
|
|
1197
|
+
width: auto;
|
|
1198
|
+
min-width: 10;
|
|
1199
|
+
height: 3;
|
|
1200
|
+
}
|
|
1201
|
+
|
|
1202
|
+
|
|
1203
|
+
|
|
1204
|
+
/* Model Browser */
|
|
1205
|
+
#model-browser {
|
|
1206
|
+
width: 100%;
|
|
1207
|
+
height: 1fr;
|
|
1208
|
+
border: round $primary;
|
|
1209
|
+
margin: 1 0;
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
.model-tabs {
|
|
1213
|
+
height: 3;
|
|
1214
|
+
layout: horizontal;
|
|
1215
|
+
background: $panel;
|
|
1216
|
+
margin-bottom: 1;
|
|
1217
|
+
}
|
|
1218
|
+
|
|
1219
|
+
/* Ollama Tab - Blue */
|
|
1220
|
+
#tab-ollama {
|
|
1221
|
+
width: 1fr;
|
|
1222
|
+
border: none;
|
|
1223
|
+
background: #1a1a2e;
|
|
1224
|
+
color: #4a9eff;
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
#tab-ollama:hover {
|
|
1228
|
+
background: #252545;
|
|
1229
|
+
}
|
|
1230
|
+
|
|
1231
|
+
#tab-ollama.active {
|
|
1232
|
+
background: #4a9eff;
|
|
1233
|
+
color: #ffffff;
|
|
1234
|
+
}
|
|
1235
|
+
|
|
1236
|
+
/* HuggingFace Tab - Yellow */
|
|
1237
|
+
#tab-huggingface {
|
|
1238
|
+
width: 1fr;
|
|
1239
|
+
border: none;
|
|
1240
|
+
background: #2a2a1a;
|
|
1241
|
+
color: #ffcc00;
|
|
1242
|
+
}
|
|
1243
|
+
|
|
1244
|
+
#tab-huggingface:hover {
|
|
1245
|
+
background: #3a3a25;
|
|
1246
|
+
}
|
|
1247
|
+
|
|
1248
|
+
#tab-huggingface.active {
|
|
1249
|
+
background: #ffcc00;
|
|
1250
|
+
color: #000000;
|
|
1251
|
+
}
|
|
1252
|
+
|
|
1253
|
+
/* LM Studio Tab - Purple */
|
|
1254
|
+
#tab-lmstudio {
|
|
1255
|
+
width: 1fr;
|
|
1256
|
+
border: none;
|
|
1257
|
+
background: #2a1a2e;
|
|
1258
|
+
color: #9966ff;
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
#tab-lmstudio:hover {
|
|
1262
|
+
background: #3a2545;
|
|
1263
|
+
}
|
|
1264
|
+
|
|
1265
|
+
#tab-lmstudio.active {
|
|
1266
|
+
background: #9966ff;
|
|
1267
|
+
color: #ffffff;
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
|
|
1271
|
+
#model-search {
|
|
1272
|
+
margin: 1 0;
|
|
1273
|
+
border: round $primary;
|
|
1274
|
+
}
|
|
1275
|
+
|
|
1276
|
+
#model-search:focus {
|
|
1277
|
+
border: round $accent;
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
#model-browser-container {
|
|
1281
|
+
width: 100%;
|
|
1282
|
+
height: 25;
|
|
1283
|
+
min-height: 15;
|
|
1284
|
+
border: round $primary;
|
|
1285
|
+
margin: 1 0;
|
|
1286
|
+
}
|
|
1287
|
+
|
|
1288
|
+
.model-list {
|
|
1289
|
+
height: 100%;
|
|
1290
|
+
width: 100%;
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
.model-item {
|
|
1294
|
+
height: 3;
|
|
1295
|
+
padding: 0 1;
|
|
1296
|
+
border: round $panel;
|
|
1297
|
+
margin: 0;
|
|
1298
|
+
}
|
|
1299
|
+
|
|
1300
|
+
.model-item:hover {
|
|
1301
|
+
border: round $accent;
|
|
1302
|
+
background: $boost;
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
.model-item.selected {
|
|
1306
|
+
border: double $success;
|
|
1307
|
+
background: $boost;
|
|
1308
|
+
}
|
|
1309
|
+
|
|
1310
|
+
|
|
1311
|
+
/* Action Buttons */
|
|
1312
|
+
#action-bar {
|
|
1313
|
+
height: 5;
|
|
1314
|
+
layout: horizontal;
|
|
1315
|
+
padding: 1 2;
|
|
1316
|
+
margin-top: 1;
|
|
1317
|
+
background: $panel;
|
|
1318
|
+
border-top: solid $primary;
|
|
1319
|
+
}
|
|
1320
|
+
|
|
1321
|
+
#btn-skip {
|
|
1322
|
+
width: 1fr;
|
|
1323
|
+
margin: 0 1;
|
|
1324
|
+
height: 3;
|
|
1325
|
+
}
|
|
1326
|
+
|
|
1327
|
+
#btn-continue {
|
|
1328
|
+
width: 1fr;
|
|
1329
|
+
margin: 0 1;
|
|
1330
|
+
height: 3;
|
|
1331
|
+
}
|
|
1332
|
+
|
|
1333
|
+
/* Chat Screen Styles */
|
|
1334
|
+
#welcome {
|
|
1335
|
+
height: auto;
|
|
1336
|
+
padding: 1;
|
|
1337
|
+
}
|
|
1338
|
+
|
|
1339
|
+
#logo {
|
|
1340
|
+
height: auto;
|
|
1341
|
+
max-height: 12;
|
|
1342
|
+
content-align: center middle;
|
|
1343
|
+
text-align: center;
|
|
1344
|
+
padding-top: 1;
|
|
1345
|
+
}
|
|
1346
|
+
|
|
1347
|
+
#tips {
|
|
1348
|
+
height: auto;
|
|
1349
|
+
max-height: 4;
|
|
1350
|
+
color: $text-muted;
|
|
1351
|
+
padding: 0 1;
|
|
1352
|
+
content-align: center middle;
|
|
1353
|
+
}
|
|
1354
|
+
|
|
1355
|
+
#role-progress {
|
|
1356
|
+
height: 1;
|
|
1357
|
+
margin: 0 1;
|
|
1358
|
+
}
|
|
1359
|
+
|
|
1360
|
+
#chat-area {
|
|
1361
|
+
height: 1fr;
|
|
1362
|
+
min-height: 10;
|
|
1363
|
+
padding: 0 1;
|
|
1364
|
+
overflow-y: auto;
|
|
1365
|
+
scrollbar-size-vertical: 2;
|
|
1366
|
+
}
|
|
1367
|
+
|
|
1368
|
+
#input-box {
|
|
1369
|
+
height: 3;
|
|
1370
|
+
border: round $primary;
|
|
1371
|
+
padding: 0 1;
|
|
1372
|
+
margin: 0 1 1 1;
|
|
1373
|
+
layout: horizontal;
|
|
1374
|
+
}
|
|
1375
|
+
|
|
1376
|
+
#input-box:focus-within {
|
|
1377
|
+
border: round $accent;
|
|
1378
|
+
}
|
|
1379
|
+
|
|
1380
|
+
#prompt-prefix {
|
|
1381
|
+
width: auto;
|
|
1382
|
+
color: $accent;
|
|
1383
|
+
}
|
|
1384
|
+
|
|
1385
|
+
#prompt-input {
|
|
1386
|
+
width: 1fr;
|
|
1387
|
+
border: none;
|
|
1388
|
+
background: transparent;
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
#status {
|
|
1392
|
+
height: 1;
|
|
1393
|
+
padding: 0 1;
|
|
1394
|
+
}
|
|
1395
|
+
|
|
1396
|
+
#prompt-input:focus {
|
|
1397
|
+
border: none;
|
|
1398
|
+
}
|
|
1399
|
+
|
|
1400
|
+
#status {
|
|
1401
|
+
height: 1;
|
|
1402
|
+
padding: 0 1;
|
|
1403
|
+
color: $text-muted;
|
|
1404
|
+
}
|
|
1405
|
+
|
|
1406
|
+
#role-progress {
|
|
1407
|
+
height: 1;
|
|
1408
|
+
text-align: center;
|
|
1409
|
+
padding: 0 1;
|
|
1410
|
+
}
|
|
1411
|
+
"""
|
|
1412
|
+
|
|
1413
|
+
|
|
1414
|
+
# =============================================================================
|
|
1415
|
+
# Setup Screen
|
|
1416
|
+
# =============================================================================
|
|
1417
|
+
|
|
1418
|
+
class SabhaCard(Static):
|
|
1419
|
+
"""A clickable Sabha selection card."""
|
|
1420
|
+
|
|
1421
|
+
class Selected(Message):
|
|
1422
|
+
"""Message when Sabha is selected."""
|
|
1423
|
+
def __init__(self, sabha: SabhaConfig) -> None:
|
|
1424
|
+
self.sabha = sabha
|
|
1425
|
+
super().__init__()
|
|
1426
|
+
|
|
1427
|
+
def __init__(self, sabha: SabhaConfig, **kwargs) -> None:
|
|
1428
|
+
super().__init__(**kwargs)
|
|
1429
|
+
self.sabha = sabha
|
|
1430
|
+
self.is_selected = False
|
|
1431
|
+
|
|
1432
|
+
def compose(self) -> ComposeResult:
|
|
1433
|
+
yield Static(
|
|
1434
|
+
f"{self.sabha.emoji} [bold]{self.sabha.name}[/bold]",
|
|
1435
|
+
classes="sabha-card-title-english"
|
|
1436
|
+
)
|
|
1437
|
+
|
|
1438
|
+
yield Static(self.sabha.description, classes="sabha-card-desc")
|
|
1439
|
+
yield Static(
|
|
1440
|
+
f"[dim]Roles:[/dim] {self.sabha.roles} "
|
|
1441
|
+
f"[dim]RAM:[/dim] {self.sabha.ram_gb}GB "
|
|
1442
|
+
f"[dim]Speed:[/dim] {self.sabha.speed}",
|
|
1443
|
+
classes="sabha-card-stats"
|
|
1444
|
+
)
|
|
1445
|
+
|
|
1446
|
+
def on_click(self) -> None:
|
|
1447
|
+
self.post_message(self.Selected(self.sabha))
|
|
1448
|
+
|
|
1449
|
+
def select(self) -> None:
|
|
1450
|
+
self.is_selected = True
|
|
1451
|
+
self.add_class("selected")
|
|
1452
|
+
|
|
1453
|
+
def deselect(self) -> None:
|
|
1454
|
+
self.is_selected = False
|
|
1455
|
+
self.remove_class("selected")
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
class ModelCard(Static):
|
|
1459
|
+
"""A clickable model selection card."""
|
|
1460
|
+
|
|
1461
|
+
class Selected(Message):
|
|
1462
|
+
"""Message when model is selected."""
|
|
1463
|
+
def __init__(self, model: ModelInfo) -> None:
|
|
1464
|
+
self.model = model
|
|
1465
|
+
super().__init__()
|
|
1466
|
+
|
|
1467
|
+
def __init__(self, model: ModelInfo, **kwargs) -> None:
|
|
1468
|
+
super().__init__(**kwargs)
|
|
1469
|
+
self.model = model
|
|
1470
|
+
self.is_selected = False
|
|
1471
|
+
self.is_installed = self._check_installed()
|
|
1472
|
+
|
|
1473
|
+
def _check_installed(self) -> bool:
|
|
1474
|
+
"""Check if this model is already downloaded."""
|
|
1475
|
+
try:
|
|
1476
|
+
from parishad.models.downloader import ModelManager
|
|
1477
|
+
manager = ModelManager()
|
|
1478
|
+
path = manager.get_model_path(self.model.shortcut)
|
|
1479
|
+
return path is not None and path.exists()
|
|
1480
|
+
except Exception:
|
|
1481
|
+
return False
|
|
1482
|
+
|
|
1483
|
+
def compose(self) -> ComposeResult:
|
|
1484
|
+
m = self.model
|
|
1485
|
+
# Show green tick if installed
|
|
1486
|
+
installed_icon = "[green]✓[/green] " if self.is_installed else ""
|
|
1487
|
+
yield Static(
|
|
1488
|
+
f"{installed_icon}[bold]{m.name}[/bold] [cyan]{m.params}[/cyan] "
|
|
1489
|
+
f"[dim]({m.size_gb:.1f}GB)[/dim] [yellow]{m.distributor}[/yellow]"
|
|
1490
|
+
)
|
|
1491
|
+
|
|
1492
|
+
|
|
1493
|
+
def on_click(self) -> None:
|
|
1494
|
+
self.post_message(self.Selected(self.model))
|
|
1495
|
+
|
|
1496
|
+
def select(self) -> None:
|
|
1497
|
+
self.is_selected = True
|
|
1498
|
+
self.add_class("selected")
|
|
1499
|
+
|
|
1500
|
+
def deselect(self) -> None:
|
|
1501
|
+
self.is_selected = False
|
|
1502
|
+
self.remove_class("selected")
|
|
1503
|
+
|
|
1504
|
+
|
|
1505
|
+
|
|
1506
|
+
class SetupScreen(Screen):
|
|
1507
|
+
"""Setup wizard screen for first-time configuration."""
|
|
1508
|
+
|
|
1509
|
+
BINDINGS = [
|
|
1510
|
+
Binding("escape", "skip", "Skip Setup"),
|
|
1511
|
+
Binding("enter", "confirm", "Confirm"),
|
|
1512
|
+
]
|
|
1513
|
+
|
|
1514
|
+
def __init__(self, initial_config: Optional[ParishadConfig] = None) -> None:
|
|
1515
|
+
super().__init__()
|
|
1516
|
+
self.initial_config = initial_config # For re-setup scenarios
|
|
1517
|
+
self.selected_sabha: Optional[SabhaConfig] = None
|
|
1518
|
+
self.selected_models: Dict[str, ModelInfo] = {} # Map slot_name -> model
|
|
1519
|
+
self.current_slot_idx: int = 0
|
|
1520
|
+
self.current_source = "ollama" # Default to Ollama (matches CLI)
|
|
1521
|
+
self.step = 1 # 1 = Sabha, 2 = Model
|
|
1522
|
+
self.is_downloading = False # Lock to prevent concurrent setup
|
|
1523
|
+
|
|
1524
|
+
# Pre-populate from initial_config if provided
|
|
1525
|
+
if initial_config:
|
|
1526
|
+
# Find sabha config
|
|
1527
|
+
for sabha in SABHAS:
|
|
1528
|
+
if sabha.id == initial_config.sabha:
|
|
1529
|
+
self.selected_sabha = sabha
|
|
1530
|
+
break
|
|
1531
|
+
self.current_source = initial_config.backend
|
|
1532
|
+
|
|
1533
|
+
def compose(self) -> ComposeResult:
|
|
1534
|
+
# Everything in one scrollable container
|
|
1535
|
+
with ScrollableContainer(id="setup-container"):
|
|
1536
|
+
with Vertical(id="setup-content"):
|
|
1537
|
+
# Logo and welcome message (scrolls with content)
|
|
1538
|
+
yield Static(LOGO, id="setup-logo", markup=True)
|
|
1539
|
+
yield Static(
|
|
1540
|
+
"[bold]Welcome to Parishad![/bold]\n"
|
|
1541
|
+
"[dim]Let's set up your AI council.[/dim]",
|
|
1542
|
+
id="step-title"
|
|
1543
|
+
)
|
|
1544
|
+
|
|
1545
|
+
# Step 1: Sabha Selection
|
|
1546
|
+
yield Static("[bold]Step 1:[/bold] Choose your Sabha (Council)", id="sabha-step-title")
|
|
1547
|
+
yield Horizontal(
|
|
1548
|
+
*[SabhaCard(sabha, classes=f"sabha-card-{sabha.id}") for sabha in SABHAS],
|
|
1549
|
+
classes="sabha-grid"
|
|
1550
|
+
)
|
|
1551
|
+
|
|
1552
|
+
|
|
1553
|
+
# Step 2: Model Selection
|
|
1554
|
+
yield Static("\n[bold]Step 2:[/bold] Select models (Waiting for Sabha...)", id="model-step-title")
|
|
1555
|
+
|
|
1556
|
+
# Selected models summary with reset button
|
|
1557
|
+
yield Horizontal(
|
|
1558
|
+
Static("", id="model-summary", classes="model-summary"),
|
|
1559
|
+
Button("🔄 Reset", id="btn-reset-models", variant="default", classes="btn-reset"),
|
|
1560
|
+
classes="model-summary-bar"
|
|
1561
|
+
)
|
|
1562
|
+
|
|
1563
|
+
# Model browser with backend tabs (matches CLI system)
|
|
1564
|
+
with Container(id="model-browser-container"):
|
|
1565
|
+
yield Horizontal(
|
|
1566
|
+
Button("🦙 Ollama", id="tab-ollama", classes="model-tab active"),
|
|
1567
|
+
Button("🤗 HuggingFace", id="tab-huggingface", classes="model-tab"),
|
|
1568
|
+
Button("🎨 LM Studio", id="tab-lmstudio", classes="model-tab"),
|
|
1569
|
+
classes="model-tabs"
|
|
1570
|
+
)
|
|
1571
|
+
|
|
1572
|
+
# Search bar
|
|
1573
|
+
yield Input(placeholder="🔍 Search models...", id="model-search")
|
|
1574
|
+
|
|
1575
|
+
yield ScrollableContainer(
|
|
1576
|
+
*[ModelCard(m, classes="model-item") for m in MODEL_CATALOG.get("ollama", [])],
|
|
1577
|
+
id="model-list",
|
|
1578
|
+
classes="model-list"
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
# Progress bar (initially hidden via CSS)
|
|
1582
|
+
yield ProgressBar(total=100, show_eta=True, id="download-progress")
|
|
1583
|
+
|
|
1584
|
+
# Action buttons - inside scrollable area
|
|
1585
|
+
yield Horizontal(
|
|
1586
|
+
Button("Skip (use defaults)", id="btn-skip", variant="default"),
|
|
1587
|
+
Button("Continue →", id="btn-continue", variant="primary"),
|
|
1588
|
+
id="action-bar"
|
|
1589
|
+
)
|
|
1590
|
+
|
|
1591
|
+
def on_mount(self) -> None:
|
|
1592
|
+
# Pre-select recommended Sabha (Laghu)
|
|
1593
|
+
for card in self.query(SabhaCard):
|
|
1594
|
+
if card.sabha.id == "laghu":
|
|
1595
|
+
card.select()
|
|
1596
|
+
self.selected_sabha = card.sabha
|
|
1597
|
+
break
|
|
1598
|
+
|
|
1599
|
+
# Pre-select first model
|
|
1600
|
+
model_cards = list(self.query(ModelCard))
|
|
1601
|
+
if model_cards:
|
|
1602
|
+
model_cards[0].select()
|
|
1603
|
+
# Don't set model yet, wait for user click
|
|
1604
|
+
|
|
1605
|
+
@on(SabhaCard.Selected)
|
|
1606
|
+
def handle_sabha_selected(self, event: SabhaCard.Selected) -> None:
|
|
1607
|
+
for card in self.query(SabhaCard):
|
|
1608
|
+
card.deselect()
|
|
1609
|
+
event.sabha # The sabha that was selected
|
|
1610
|
+
# Find the card that sent the message and select it
|
|
1611
|
+
for card in self.query(SabhaCard):
|
|
1612
|
+
if card.sabha.id == event.sabha.id:
|
|
1613
|
+
card.select()
|
|
1614
|
+
self.selected_sabha = event.sabha
|
|
1615
|
+
self.current_slot_idx = 0
|
|
1616
|
+
self.selected_models = {}
|
|
1617
|
+
self._update_model_step_title()
|
|
1618
|
+
# Enable/disable continue button
|
|
1619
|
+
self.query_one("#btn-continue", Button).disabled = True
|
|
1620
|
+
break
|
|
1621
|
+
|
|
1622
|
+
@on(ModelCard.Selected)
|
|
1623
|
+
def handle_model_selected(self, event: ModelCard.Selected) -> None:
|
|
1624
|
+
if not self.selected_sabha:
|
|
1625
|
+
return
|
|
1626
|
+
|
|
1627
|
+
# Record selection for current slot
|
|
1628
|
+
slots = self.selected_sabha.model_slots
|
|
1629
|
+
if self.current_slot_idx < len(slots):
|
|
1630
|
+
current_slot = slots[self.current_slot_idx]
|
|
1631
|
+
self.selected_models[current_slot] = event.model
|
|
1632
|
+
|
|
1633
|
+
# Move to next slot
|
|
1634
|
+
self.current_slot_idx += 1
|
|
1635
|
+
|
|
1636
|
+
# Update summary display
|
|
1637
|
+
self._update_model_summary()
|
|
1638
|
+
|
|
1639
|
+
# Check if done
|
|
1640
|
+
if self.current_slot_idx >= len(slots):
|
|
1641
|
+
self.query_one("#btn-continue", Button).disabled = False
|
|
1642
|
+
self.query_one("#model-step-title", Static).update(
|
|
1643
|
+
f"\n[bold]Step 2:[/bold] All models selected! ([green]✓ Ready[/green])"
|
|
1644
|
+
)
|
|
1645
|
+
else:
|
|
1646
|
+
self._update_model_step_title()
|
|
1647
|
+
|
|
1648
|
+
def _update_model_step_title(self) -> None:
|
|
1649
|
+
if not self.selected_sabha:
|
|
1650
|
+
return
|
|
1651
|
+
|
|
1652
|
+
slots = self.selected_sabha.model_slots
|
|
1653
|
+
if self.current_slot_idx < len(slots):
|
|
1654
|
+
current_slot = slots[self.current_slot_idx]
|
|
1655
|
+
self.query_one("#model-step-title", Static).update(
|
|
1656
|
+
f"\n[bold]Step 2:[/bold] Select [cyan]{current_slot.upper()}[/cyan] model "
|
|
1657
|
+
f"({self.current_slot_idx + 1}/{len(slots)})"
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
# Reset card selection visually for next pick
|
|
1661
|
+
for card in self.query(ModelCard):
|
|
1662
|
+
card.deselect()
|
|
1663
|
+
|
|
1664
|
+
def _update_model_summary(self) -> None:
|
|
1665
|
+
"""Update the selected models summary text."""
|
|
1666
|
+
if not self.selected_models:
|
|
1667
|
+
self.query_one("#model-summary", Static).update("")
|
|
1668
|
+
return
|
|
1669
|
+
|
|
1670
|
+
summary = []
|
|
1671
|
+
for slot, model in self.selected_models.items():
|
|
1672
|
+
summary.append(f"[dim]{slot.title()}:[/dim] [cyan]{model.name}[/cyan]")
|
|
1673
|
+
|
|
1674
|
+
self.query_one("#model-summary", Static).update(" ".join(summary))
|
|
1675
|
+
|
|
1676
|
+
@on(Button.Pressed, "#btn-reset-models")
|
|
1677
|
+
def reset_model_selection(self) -> None:
|
|
1678
|
+
"""Reset all model selections and start over."""
|
|
1679
|
+
if not self.selected_sabha:
|
|
1680
|
+
return
|
|
1681
|
+
|
|
1682
|
+
self.current_slot_idx = 0
|
|
1683
|
+
self.selected_models = {}
|
|
1684
|
+
self._update_model_step_title()
|
|
1685
|
+
self._update_model_summary()
|
|
1686
|
+
self.query_one("#btn-continue", Button).disabled = True
|
|
1687
|
+
|
|
1688
|
+
@on(Button.Pressed, "#tab-ollama")
|
|
1689
|
+
def show_ollama(self) -> None:
|
|
1690
|
+
self._switch_tab("ollama")
|
|
1691
|
+
|
|
1692
|
+
@on(Button.Pressed, "#tab-huggingface")
|
|
1693
|
+
def show_huggingface(self) -> None:
|
|
1694
|
+
self._switch_tab("huggingface")
|
|
1695
|
+
|
|
1696
|
+
@on(Button.Pressed, "#tab-lmstudio")
|
|
1697
|
+
def show_lmstudio(self) -> None:
|
|
1698
|
+
self._switch_tab("lmstudio")
|
|
1699
|
+
|
|
1700
|
+
@on(Input.Changed, "#model-search")
|
|
1701
|
+
def on_search_changed(self, event: Input.Changed) -> None:
|
|
1702
|
+
"""Filter models based on search query."""
|
|
1703
|
+
self._update_model_list(event.value)
|
|
1704
|
+
|
|
1705
|
+
def _switch_tab(self, source: str) -> None:
|
|
1706
|
+
self.current_source = source
|
|
1707
|
+
|
|
1708
|
+
# Update tab styling
|
|
1709
|
+
for btn in self.query(".model-tab"):
|
|
1710
|
+
btn.remove_class("active")
|
|
1711
|
+
self.query_one(f"#tab-{source}").add_class("active")
|
|
1712
|
+
|
|
1713
|
+
# Clear search and update list
|
|
1714
|
+
search_input = self.query_one("#model-search", Input)
|
|
1715
|
+
search_input.value = ""
|
|
1716
|
+
self._update_model_list("")
|
|
1717
|
+
|
|
1718
|
+
def _update_model_list(self, search_query: str = "") -> None:
|
|
1719
|
+
"""Update model list with optional search filter."""
|
|
1720
|
+
model_list = self.query_one("#model-list")
|
|
1721
|
+
model_list.remove_children()
|
|
1722
|
+
|
|
1723
|
+
models = MODEL_CATALOG.get(self.current_source, [])
|
|
1724
|
+
|
|
1725
|
+
# Filter by search query
|
|
1726
|
+
if search_query:
|
|
1727
|
+
query = search_query.lower()
|
|
1728
|
+
models = [
|
|
1729
|
+
m for m in models
|
|
1730
|
+
if query in m.name.lower()
|
|
1731
|
+
or query in m.description.lower()
|
|
1732
|
+
or query in m.distributor.lower()
|
|
1733
|
+
or query in m.params.lower()
|
|
1734
|
+
or (m.tags and any(query in tag for tag in m.tags))
|
|
1735
|
+
]
|
|
1736
|
+
|
|
1737
|
+
# Add filtered models
|
|
1738
|
+
for model in models:
|
|
1739
|
+
model_list.mount(ModelCard(model, classes="model-item"))
|
|
1740
|
+
|
|
1741
|
+
# Select first model if any
|
|
1742
|
+
model_cards = list(self.query(ModelCard))
|
|
1743
|
+
if model_cards:
|
|
1744
|
+
model_cards[0].select()
|
|
1745
|
+
self.selected_model = model_cards[0].model
|
|
1746
|
+
|
|
1747
|
+
|
|
1748
|
+
@on(Button.Pressed, "#btn-skip")
|
|
1749
|
+
def action_skip(self) -> None:
|
|
1750
|
+
"""Handle skip/abort - preserve initial config or use defaults."""
|
|
1751
|
+
if self.initial_config:
|
|
1752
|
+
# Re-setup scenario - abort and keep existing config
|
|
1753
|
+
self.dismiss(self.initial_config)
|
|
1754
|
+
else:
|
|
1755
|
+
# First-run scenario - create default config
|
|
1756
|
+
default_config = ParishadConfig(
|
|
1757
|
+
sabha="laghu",
|
|
1758
|
+
backend="ollama", # Default to Ollama (matches CLI)
|
|
1759
|
+
model="qwen2.5:1.5b", # Small Ollama model
|
|
1760
|
+
cwd=str(Path.cwd())
|
|
1761
|
+
)
|
|
1762
|
+
save_parishad_config(default_config)
|
|
1763
|
+
self.dismiss(default_config)
|
|
1764
|
+
|
|
1765
|
+
@on(Button.Pressed, "#btn-continue")
|
|
1766
|
+
def action_confirm(self) -> None:
|
|
1767
|
+
"""Handle confirm - download models then save config and return."""
|
|
1768
|
+
# Strict concurrency check
|
|
1769
|
+
if self.is_downloading:
|
|
1770
|
+
self.notify("Setup already in progress. Please wait...", severity="warning")
|
|
1771
|
+
return
|
|
1772
|
+
|
|
1773
|
+
if self.selected_sabha and len(self.selected_models) >= len(self.selected_sabha.model_slots):
|
|
1774
|
+
# Create ParishadConfig from selections
|
|
1775
|
+
# Store source (huggingface/ollama/lmstudio) for backend mapping
|
|
1776
|
+
primary_model = list(self.selected_models.values())[0].shortcut if self.selected_models else "qwen2.5:1.5b"
|
|
1777
|
+
|
|
1778
|
+
new_config = ParishadConfig(
|
|
1779
|
+
sabha=self.selected_sabha.id,
|
|
1780
|
+
backend=self.current_source, # Source: huggingface/ollama/lmstudio
|
|
1781
|
+
model=primary_model, # Model ID for ModelManager
|
|
1782
|
+
cwd=str(Path.cwd())
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1785
|
+
# Download models before saving config
|
|
1786
|
+
self.run_worker(self._async_download_models(new_config), exclusive=True)
|
|
1787
|
+
|
|
1788
|
+
async def _async_download_models(self, config: ParishadConfig) -> None:
|
|
1789
|
+
"""Download selected models asynchronously using CLI's ModelManager, then save config."""
|
|
1790
|
+
from parishad.models.downloader import ModelManager
|
|
1791
|
+
import asyncio
|
|
1792
|
+
|
|
1793
|
+
manager = ModelManager()
|
|
1794
|
+
loop = asyncio.get_event_loop()
|
|
1795
|
+
|
|
1796
|
+
# Collect all unique model shortcuts that need checking
|
|
1797
|
+
models_to_download = []
|
|
1798
|
+
for slot, model_info in self.selected_models.items():
|
|
1799
|
+
# Check if model already exists using get_model_path (correct API)
|
|
1800
|
+
model_path = manager.get_model_path(model_info.shortcut)
|
|
1801
|
+
if model_path is None or not model_path.exists():
|
|
1802
|
+
# Model not found or file missing, need to download
|
|
1803
|
+
models_to_download.append(model_info)
|
|
1804
|
+
|
|
1805
|
+
# UI Feedback
|
|
1806
|
+
btn = self.query_one("#btn-continue", Button)
|
|
1807
|
+
pbar = self.query_one("#download-progress", ProgressBar)
|
|
1808
|
+
|
|
1809
|
+
original_label = str(btn.label)
|
|
1810
|
+
btn.disabled = True
|
|
1811
|
+
btn.label = "⏳ Setting up..."
|
|
1812
|
+
|
|
1813
|
+
# Show progress bar
|
|
1814
|
+
pbar.display = True
|
|
1815
|
+
pbar.update(total=100, progress=0)
|
|
1816
|
+
|
|
1817
|
+
self.is_downloading = True # Set lock
|
|
1818
|
+
|
|
1819
|
+
# Debug: Log what we're doing
|
|
1820
|
+
db_path = Path.home() / "parishad_debug.log"
|
|
1821
|
+
with open(db_path, "a") as f:
|
|
1822
|
+
f.write(f"\n=== SETUP CONTINUE CLICKED ===\n")
|
|
1823
|
+
f.write(f"Selected models: {list(self.selected_models.keys())}\n")
|
|
1824
|
+
f.write(f"Models to download: {[m.name for m in models_to_download]}\n")
|
|
1825
|
+
|
|
1826
|
+
try:
|
|
1827
|
+
download_errors = []
|
|
1828
|
+
|
|
1829
|
+
# If no models need downloading, just save config and exit
|
|
1830
|
+
if not models_to_download:
|
|
1831
|
+
with open(db_path, "a") as f:
|
|
1832
|
+
f.write(f"All models installed - saving config and exiting\n")
|
|
1833
|
+
self.notify("✓ All models already available!", severity="information", timeout=3)
|
|
1834
|
+
if save_parishad_config(config):
|
|
1835
|
+
with open(db_path, "a") as f:
|
|
1836
|
+
f.write(f"Config saved successfully, dismissing\n")
|
|
1837
|
+
self.dismiss(config)
|
|
1838
|
+
else:
|
|
1839
|
+
self.notify("Failed to save configuration", severity="error", timeout=5)
|
|
1840
|
+
return # Exit early if nothing to download
|
|
1841
|
+
|
|
1842
|
+
# Download each missing model
|
|
1843
|
+
for i, model_info in enumerate(models_to_download):
|
|
1844
|
+
self.notify(f"Downloading {model_info.name}...\nPlease wait (this may take a while)", timeout=10)
|
|
1845
|
+
|
|
1846
|
+
# Reset progress for new file
|
|
1847
|
+
pbar.update(total=100, progress=0)
|
|
1848
|
+
|
|
1849
|
+
# This is EXACTLY what CLI does in main.py:download_model
|
|
1850
|
+
def _do_download():
|
|
1851
|
+
"""Execute download in thread pool (production-safe)."""
|
|
1852
|
+
# DEBUG LOGGING
|
|
1853
|
+
db_path = Path.home() / "parishad_debug.log"
|
|
1854
|
+
with open(db_path, "a") as f:
|
|
1855
|
+
f.write(f"DEBUG: Starting download for {model_info.name} from {model_info.source}\n")
|
|
1856
|
+
|
|
1857
|
+
def _progress(p):
|
|
1858
|
+
"""Track download progress and update TUI safely."""
|
|
1859
|
+
if p.total_bytes > 0:
|
|
1860
|
+
# Calculate percentage
|
|
1861
|
+
percent = (p.downloaded_bytes / p.total_bytes) * 100
|
|
1862
|
+
# Update TUI from thread
|
|
1863
|
+
self.app.call_from_thread(pbar.update, progress=percent)
|
|
1864
|
+
|
|
1865
|
+
try:
|
|
1866
|
+
res = manager.download(
|
|
1867
|
+
model_spec=model_info.shortcut,
|
|
1868
|
+
source="huggingface" if model_info.source == "huggingface" else model_info.source,
|
|
1869
|
+
progress_callback=_progress
|
|
1870
|
+
)
|
|
1871
|
+
with open(db_path, "a") as f:
|
|
1872
|
+
f.write(f"DEBUG: Download success: {res}\n")
|
|
1873
|
+
except Exception as e:
|
|
1874
|
+
with open(db_path, "a") as f:
|
|
1875
|
+
f.write(f"DEBUG: Download FAILED: {e}\n")
|
|
1876
|
+
import traceback
|
|
1877
|
+
with open(db_path, "a") as f:
|
|
1878
|
+
traceback.print_exc(file=f)
|
|
1879
|
+
return False # Explicit failure return
|
|
1880
|
+
|
|
1881
|
+
return True # Explicit success return
|
|
1882
|
+
|
|
1883
|
+
# Run in thread pool to avoid blocking TUI
|
|
1884
|
+
success = await loop.run_in_executor(None, _do_download)
|
|
1885
|
+
|
|
1886
|
+
if not success:
|
|
1887
|
+
download_errors.append(f"{model_info.name} failed (check ~/parishad_debug.log)")
|
|
1888
|
+
|
|
1889
|
+
# Check results
|
|
1890
|
+
if download_errors:
|
|
1891
|
+
error_msg = "\n".join(download_errors)
|
|
1892
|
+
self.notify(f"Download errors occurred:\n{error_msg}", severity="error", timeout=10)
|
|
1893
|
+
else:
|
|
1894
|
+
pbar.update(total=100, progress=100) # Show full completion
|
|
1895
|
+
self.notify("Setup complete! Saving configuration...", timeout=5)
|
|
1896
|
+
# Success - save config and proceed
|
|
1897
|
+
if save_parishad_config(config):
|
|
1898
|
+
self.dismiss(config)
|
|
1899
|
+
else:
|
|
1900
|
+
self.notify("Error saving configuration! Check permissions.", severity="error")
|
|
1901
|
+
|
|
1902
|
+
except Exception as e:
|
|
1903
|
+
self.notify(f"Critical Error: {str(e)}", severity="error")
|
|
1904
|
+
with open(Path.home() / "parishad_debug.log", "a") as f:
|
|
1905
|
+
f.write(f"DEBUG: Critical Outer Exception: {e}\n")
|
|
1906
|
+
|
|
1907
|
+
finally:
|
|
1908
|
+
# Always reset UI state and lock
|
|
1909
|
+
btn.disabled = False
|
|
1910
|
+
btn.label = original_label
|
|
1911
|
+
pbar.display = False # Hide progress bar
|
|
1912
|
+
self.is_downloading = False
|
|
1913
|
+
|
|
1914
|
+
def _save_config(self, use_defaults: bool = False) -> None:
|
|
1915
|
+
"""Deprecated: Config is now saved via ParishadConfig.
|
|
1916
|
+
|
|
1917
|
+
This method is kept for backward compatibility but should not be used.
|
|
1918
|
+
Use save_parishad_config() instead.
|
|
1919
|
+
"""
|
|
1920
|
+
pass
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
# =============================================================================
|
|
1924
|
+
# Sabha Progress Display
|
|
1925
|
+
# =============================================================================
|
|
1926
|
+
|
|
1927
|
+
# Role metadata for display
|
|
1928
|
+
ROLE_INFO = {
|
|
1929
|
+
"darbari": {"emoji": "🏛️", "name": "Darbari", "desc": "Analyzing query"},
|
|
1930
|
+
"majumdar": {"emoji": "📋", "name": "Majumdar", "desc": "Creating plan"},
|
|
1931
|
+
"sainik": {"emoji": "⚔️", "name": "Sainik", "desc": "Implementing"},
|
|
1932
|
+
"prerak": {"emoji": "🔍", "name": "Prerak", "desc": "Checking"},
|
|
1933
|
+
"raja": {"emoji": "👑", "name": "Raja", "desc": "Deciding"},
|
|
1934
|
+
}
|
|
1935
|
+
|
|
1936
|
+
CORE_ROLES = ["darbari", "majumdar", "sainik", "prerak", "raja"]
|
|
1937
|
+
|
|
1938
|
+
|
|
1939
|
+
class RoleProgressBar(Static):
|
|
1940
|
+
"""Display Sabha role execution progress."""
|
|
1941
|
+
|
|
1942
|
+
def __init__(self, **kwargs):
|
|
1943
|
+
super().__init__(**kwargs)
|
|
1944
|
+
self.active_role = None
|
|
1945
|
+
self.completed_roles = []
|
|
1946
|
+
|
|
1947
|
+
def set_active(self, role_name: str) -> None:
|
|
1948
|
+
"""Set the currently active role."""
|
|
1949
|
+
self.active_role = role_name
|
|
1950
|
+
self._update_display()
|
|
1951
|
+
|
|
1952
|
+
def mark_complete(self, role_name: str) -> None:
|
|
1953
|
+
"""Mark a role as complete."""
|
|
1954
|
+
if role_name not in self.completed_roles:
|
|
1955
|
+
self.completed_roles.append(role_name)
|
|
1956
|
+
self._update_display()
|
|
1957
|
+
|
|
1958
|
+
def reset(self) -> None:
|
|
1959
|
+
"""Reset progress."""
|
|
1960
|
+
self.active_role = None
|
|
1961
|
+
self.completed_roles = []
|
|
1962
|
+
self._update_display()
|
|
1963
|
+
|
|
1964
|
+
def _update_display(self) -> None:
|
|
1965
|
+
"""Update the progress display."""
|
|
1966
|
+
parts = []
|
|
1967
|
+
for role in CORE_ROLES:
|
|
1968
|
+
info = ROLE_INFO[role]
|
|
1969
|
+
if role in self.completed_roles:
|
|
1970
|
+
parts.append(f"[green]{info['emoji']}[/green]")
|
|
1971
|
+
elif role == self.active_role:
|
|
1972
|
+
parts.append(f"[yellow]{info['emoji']} {info['name']}...[/yellow]")
|
|
1973
|
+
else:
|
|
1974
|
+
parts.append(f"[dim]{info['emoji']}[/dim]")
|
|
1975
|
+
|
|
1976
|
+
self.update(" ".join(parts))
|
|
1977
|
+
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
|
|
1981
|
+
class CommandSuggester(Suggester):
|
|
1982
|
+
"""Suggests slash commands and arguments for Parishad CLI."""
|
|
1983
|
+
|
|
1984
|
+
def __init__(self):
|
|
1985
|
+
super().__init__(use_cache=False)
|
|
1986
|
+
self.cached_models = None
|
|
1987
|
+
|
|
1988
|
+
async def get_suggestion(self, value: str) -> Optional[str]:
|
|
1989
|
+
"""Get best suggestion (first candidate)."""
|
|
1990
|
+
candidates = await self.get_candidates(value)
|
|
1991
|
+
return candidates[0] if candidates else None
|
|
1992
|
+
|
|
1993
|
+
async def get_candidates(self, value: str) -> List[str]:
|
|
1994
|
+
"""Get all matching candidates for the current input."""
|
|
1995
|
+
candidates = []
|
|
1996
|
+
# Early return only if not a command AND not a file reference
|
|
1997
|
+
if not value.startswith("/") and "@" not in value:
|
|
1998
|
+
return candidates
|
|
1999
|
+
|
|
2000
|
+
value_lower = value.lower()
|
|
2001
|
+
|
|
2002
|
+
# 1. Command arguments
|
|
2003
|
+
if value_lower.startswith("/sabha "):
|
|
2004
|
+
current_arg = value_lower[7:]
|
|
2005
|
+
options = ["laghu", "madhyam", "maha"]
|
|
2006
|
+
return [f"/sabha {opt}" for opt in options if opt.startswith(current_arg)]
|
|
2007
|
+
|
|
2008
|
+
if value_lower.startswith("/model "):
|
|
2009
|
+
if self.cached_models is None:
|
|
2010
|
+
# Setup basic list if manager fails
|
|
2011
|
+
self.cached_models = []
|
|
2012
|
+
try:
|
|
2013
|
+
from parishad.models.downloader import ModelManager
|
|
2014
|
+
# Get model names
|
|
2015
|
+
self.cached_models = [m.name for m in ModelManager().list_models()]
|
|
2016
|
+
except Exception:
|
|
2017
|
+
pass
|
|
2018
|
+
|
|
2019
|
+
if self.cached_models:
|
|
2020
|
+
current_arg = value[7:]
|
|
2021
|
+
return [f"/model {m}" for m in self.cached_models if m.startswith(current_arg)]
|
|
2022
|
+
return []
|
|
2023
|
+
|
|
2024
|
+
if value_lower.startswith("/assign "):
|
|
2025
|
+
# Logic for assign args
|
|
2026
|
+
# 1. Load models if needed
|
|
2027
|
+
if self.cached_models is None:
|
|
2028
|
+
self.cached_models = []
|
|
2029
|
+
try:
|
|
2030
|
+
from parishad.models.downloader import ModelManager
|
|
2031
|
+
self.cached_models = [m.name for m in ModelManager().list_models()]
|
|
2032
|
+
except Exception:
|
|
2033
|
+
pass
|
|
2034
|
+
|
|
2035
|
+
parts = value.split(" ")
|
|
2036
|
+
current_token = parts[-1]
|
|
2037
|
+
prefix_tokens = " ".join(parts[:-1])
|
|
2038
|
+
|
|
2039
|
+
suggestions = []
|
|
2040
|
+
slots = ["big=", "mid=", "small=", "planner=", "judge="]
|
|
2041
|
+
|
|
2042
|
+
if "=" in current_token:
|
|
2043
|
+
# Suggesting value for specific slot
|
|
2044
|
+
# e.g. big=lla -> big=llama3
|
|
2045
|
+
key, val_prefix = current_token.split("=", 1)
|
|
2046
|
+
|
|
2047
|
+
# Filter models matches
|
|
2048
|
+
model_matches = [m for m in self.cached_models if m.startswith(val_prefix)]
|
|
2049
|
+
for m in model_matches:
|
|
2050
|
+
suggestions.append(f"{prefix_tokens} {key}={m}")
|
|
2051
|
+
else:
|
|
2052
|
+
# Suggesting slot OR model (Smart Mode)
|
|
2053
|
+
# 1. Slots
|
|
2054
|
+
for s in slots:
|
|
2055
|
+
if s.startswith(current_token):
|
|
2056
|
+
suggestions.append(f"{prefix_tokens} {s}")
|
|
2057
|
+
|
|
2058
|
+
# 2. Models (Smart Mode)
|
|
2059
|
+
model_matches = [m for m in self.cached_models if m.startswith(current_token)]
|
|
2060
|
+
for m in model_matches:
|
|
2061
|
+
suggestions.append(f"{prefix_tokens} {m}")
|
|
2062
|
+
|
|
2063
|
+
return suggestions
|
|
2064
|
+
|
|
2065
|
+
# 2. @-file Autocomplete (Phase 13)
|
|
2066
|
+
# Check if the *last token* starts with @
|
|
2067
|
+
last_token = value.split(" ")[-1]
|
|
2068
|
+
if last_token.startswith("@"):
|
|
2069
|
+
prefix = value[: -len(last_token)] # Everything before the token
|
|
2070
|
+
partial_path = last_token[1:] # Strip @
|
|
2071
|
+
|
|
2072
|
+
try:
|
|
2073
|
+
# Resolve directory and search pattern
|
|
2074
|
+
if "/" in partial_path:
|
|
2075
|
+
dir_part, file_part = partial_path.rsplit("/", 1)
|
|
2076
|
+
search_dir = Path.cwd() / dir_part
|
|
2077
|
+
glob_pattern = f"{file_part}*"
|
|
2078
|
+
display_dir = f"{dir_part}/"
|
|
2079
|
+
else:
|
|
2080
|
+
search_dir = Path.cwd()
|
|
2081
|
+
glob_pattern = f"{partial_path}*"
|
|
2082
|
+
display_dir = ""
|
|
2083
|
+
|
|
2084
|
+
if search_dir.exists() and search_dir.is_dir():
|
|
2085
|
+
matches = []
|
|
2086
|
+
# List files and dirs
|
|
2087
|
+
for item in search_dir.glob(glob_pattern):
|
|
2088
|
+
# Skip hidden files unless explicitly typed "."
|
|
2089
|
+
if item.name.startswith(".") and not partial_path.startswith("."):
|
|
2090
|
+
continue
|
|
2091
|
+
|
|
2092
|
+
# Append / to directories
|
|
2093
|
+
suffix = "/" if item.is_dir() else ""
|
|
2094
|
+
candidate = f"@{display_dir}{item.name}{suffix}"
|
|
2095
|
+
matches.append(candidate)
|
|
2096
|
+
|
|
2097
|
+
# Sort: Directories first, then files
|
|
2098
|
+
matches.sort(key=lambda x: (not x.endswith("/"), x))
|
|
2099
|
+
|
|
2100
|
+
# Limit to 10 suggestions to avoid clutter
|
|
2101
|
+
return [f"{prefix}{m}" for m in matches[:15]]
|
|
2102
|
+
|
|
2103
|
+
except Exception:
|
|
2104
|
+
pass
|
|
2105
|
+
return []
|
|
2106
|
+
|
|
2107
|
+
# 3. Top-level commands
|
|
2108
|
+
commands = [
|
|
2109
|
+
"/help", "/exit", "/clear", "/config", "/setup",
|
|
2110
|
+
"/model", "/sabha", "/redownload", "/assign", "/scan"
|
|
2111
|
+
]
|
|
2112
|
+
return [cmd for cmd in commands if cmd.startswith(value_lower)]
|
|
2113
|
+
|
|
2114
|
+
|
|
2115
|
+
class ShellInput(Input):
|
|
2116
|
+
"""Input widget with shell-like suggestion cycling (Up/Down) and Tab completion."""
|
|
2117
|
+
|
|
2118
|
+
BINDINGS = [
|
|
2119
|
+
Binding("up", "cycle_suggestion(-1)", "Previous", show=False),
|
|
2120
|
+
Binding("down", "cycle_suggestion(1)", "Next", show=False),
|
|
2121
|
+
Binding("tab", "accept_suggestion", "Accept", show=False),
|
|
2122
|
+
]
|
|
2123
|
+
|
|
2124
|
+
def __init__(self, *args, **kwargs):
|
|
2125
|
+
super().__init__(*args, **kwargs)
|
|
2126
|
+
self.cycle_index = -1
|
|
2127
|
+
self.current_candidates = []
|
|
2128
|
+
self.original_prefix = ""
|
|
2129
|
+
self.is_cycling = False
|
|
2130
|
+
|
|
2131
|
+
def watch_value(self, value: str) -> None:
|
|
2132
|
+
"""Reset cycling when user types manually."""
|
|
2133
|
+
if not self.is_cycling:
|
|
2134
|
+
self.original_prefix = value
|
|
2135
|
+
self.cycle_index = -1
|
|
2136
|
+
self.current_candidates = []
|
|
2137
|
+
|
|
2138
|
+
async def action_cycle_suggestion(self, delta: int) -> None:
|
|
2139
|
+
"""Cycle through suggestions by updating the value directly."""
|
|
2140
|
+
prefix = self.original_prefix
|
|
2141
|
+
if not prefix:
|
|
2142
|
+
return
|
|
2143
|
+
|
|
2144
|
+
# Refresh candidates if needed
|
|
2145
|
+
if not self.current_candidates:
|
|
2146
|
+
if hasattr(self.suggester, "get_candidates"):
|
|
2147
|
+
self.current_candidates = await self.suggester.get_candidates(prefix)
|
|
2148
|
+
else:
|
|
2149
|
+
self.current_candidates = []
|
|
2150
|
+
|
|
2151
|
+
if not self.current_candidates:
|
|
2152
|
+
return
|
|
2153
|
+
|
|
2154
|
+
# Advance index
|
|
2155
|
+
if self.cycle_index == -1:
|
|
2156
|
+
self.cycle_index = 0 if delta > 0 else len(self.current_candidates) - 1
|
|
2157
|
+
else:
|
|
2158
|
+
self.cycle_index = (self.cycle_index + delta) % len(self.current_candidates)
|
|
2159
|
+
|
|
2160
|
+
# Update value directly (Zsh style)
|
|
2161
|
+
self.is_cycling = True
|
|
2162
|
+
self.value = self.current_candidates[self.cycle_index]
|
|
2163
|
+
self.cursor_position = len(self.value)
|
|
2164
|
+
self.is_cycling = False
|
|
2165
|
+
|
|
2166
|
+
async def action_accept_suggestion(self) -> None:
|
|
2167
|
+
"""Accept current suggestion (Tab)."""
|
|
2168
|
+
# If we are already cycling, the value is set, just move cursor
|
|
2169
|
+
if self.cycle_index != -1:
|
|
2170
|
+
self.cursor_position = len(self.value)
|
|
2171
|
+
return
|
|
2172
|
+
|
|
2173
|
+
# Otherwise, try to fetch suggestion manually since we can't access self.suggestion
|
|
2174
|
+
if self.suggester and self.value:
|
|
2175
|
+
sug = await self.suggester.get_suggestion(self.value)
|
|
2176
|
+
if sug:
|
|
2177
|
+
self.value = sug
|
|
2178
|
+
self.cursor_position = len(self.value)
|
|
2179
|
+
|
|
2180
|
+
|
|
2181
|
+
|
|
2182
|
+
class ParishadApp(App):
|
|
2183
|
+
"""Parishad Code TUI Application."""
|
|
2184
|
+
|
|
2185
|
+
# Custom message for opening setup screen from worker thread
|
|
2186
|
+
class OpenSetup(Message):
|
|
2187
|
+
"""Message to open setup screen from worker thread."""
|
|
2188
|
+
pass
|
|
2189
|
+
|
|
2190
|
+
CSS = CSS
|
|
2191
|
+
SCREENS = {"setup": SetupScreen}
|
|
2192
|
+
BINDINGS = [
|
|
2193
|
+
Binding("ctrl+c", "quit", "Exit", show=False),
|
|
2194
|
+
Binding("ctrl+l", "clear", "Clear", show=False),
|
|
2195
|
+
]
|
|
2196
|
+
|
|
2197
|
+
def __init__(self, model: str = None, sabha: str = None, backend: str = None, cwd: str = "."):
|
|
2198
|
+
super().__init__()
|
|
2199
|
+
self.cwd = Path(cwd).resolve()
|
|
2200
|
+
self.council = None
|
|
2201
|
+
self.ctrl_c_pressed = False
|
|
2202
|
+
self.download_cancel_event = None # Track download cancellation
|
|
2203
|
+
self.download_progress_line = None # Track last progress line for updates
|
|
2204
|
+
self._initializing = False # Prevent concurrent initialization
|
|
2205
|
+
self._processing_query = False # Prevent concurrent query processing
|
|
2206
|
+
|
|
2207
|
+
# Load config from disk
|
|
2208
|
+
self.config = load_parishad_config()
|
|
2209
|
+
|
|
2210
|
+
# Apply overrides from CLI if provided
|
|
2211
|
+
if self.config:
|
|
2212
|
+
self.model = model or self.config.model
|
|
2213
|
+
self.backend = backend or self.config.backend
|
|
2214
|
+
self.sabha = sabha or self.config.sabha
|
|
2215
|
+
else:
|
|
2216
|
+
# No config file - use CLI params or defaults
|
|
2217
|
+
self.model = model or "llama3.2:3b"
|
|
2218
|
+
self.backend = backend or "ollama"
|
|
2219
|
+
self.sabha = sabha or "laghu"
|
|
2220
|
+
|
|
2221
|
+
def _load_config(self) -> dict:
|
|
2222
|
+
"""Deprecated: Use load_parishad_config() instead.
|
|
2223
|
+
|
|
2224
|
+
This method is kept for backward compatibility.
|
|
2225
|
+
"""
|
|
2226
|
+
config = load_parishad_config()
|
|
2227
|
+
if config:
|
|
2228
|
+
return config.to_dict()
|
|
2229
|
+
return {}
|
|
2230
|
+
|
|
2231
|
+
def compose(self) -> ComposeResult:
|
|
2232
|
+
"""Create child widgets."""
|
|
2233
|
+
cwd_str = str(self.cwd)
|
|
2234
|
+
if len(cwd_str) > 40:
|
|
2235
|
+
cwd_str = "~" + cwd_str[-39:]
|
|
2236
|
+
|
|
2237
|
+
# Header section (fixed height)
|
|
2238
|
+
yield Static(LOGO, id="logo", markup=True)
|
|
2239
|
+
yield Static(
|
|
2240
|
+
f"\n[dim]Tips: Ask questions, edit files, run commands. /help for more.[/dim]\n"
|
|
2241
|
+
f"[dim]{self.model} · {cwd_str}[/dim]",
|
|
2242
|
+
id="tips",
|
|
2243
|
+
markup=True
|
|
2244
|
+
)
|
|
2245
|
+
yield RoleProgressBar(id="role-progress")
|
|
2246
|
+
|
|
2247
|
+
# Chat area (takes remaining space)
|
|
2248
|
+
yield RichLog(id="chat-area", markup=True, wrap=True, auto_scroll=True, highlight=True)
|
|
2249
|
+
|
|
2250
|
+
# Input box (fixed at bottom)
|
|
2251
|
+
yield Container(
|
|
2252
|
+
Static("> ", id="prompt-prefix"),
|
|
2253
|
+
ShellInput(placeholder="Type your message...", id="prompt-input", suggester=CommandSuggester()),
|
|
2254
|
+
id="input-box"
|
|
2255
|
+
)
|
|
2256
|
+
yield Static("[dim]? for help · Ctrl+C to exit[/dim]", id="status")
|
|
2257
|
+
|
|
2258
|
+
def on_mount(self) -> None:
|
|
2259
|
+
"""Focus input on mount and handle startup flow."""
|
|
2260
|
+
# Check if we need setup
|
|
2261
|
+
# Requires setup if:
|
|
2262
|
+
# 1. No config exists OR
|
|
2263
|
+
# 2. setup_complete is False OR
|
|
2264
|
+
# 3. Config exists but Sabha/Model not selected
|
|
2265
|
+
needs_setup = False
|
|
2266
|
+
if not self.config:
|
|
2267
|
+
needs_setup = True
|
|
2268
|
+
elif not self.config.setup_complete:
|
|
2269
|
+
needs_setup = True
|
|
2270
|
+
elif not self.config.sabha or not self.config.model:
|
|
2271
|
+
needs_setup = True
|
|
2272
|
+
|
|
2273
|
+
if needs_setup:
|
|
2274
|
+
# Show setup screen (Sabha selection + Model browser)
|
|
2275
|
+
self.push_screen(SetupScreen(self.config), callback=self._on_setup_complete)
|
|
2276
|
+
else:
|
|
2277
|
+
# Config complete - go straight to chat
|
|
2278
|
+
self._initialize_chat()
|
|
2279
|
+
|
|
2280
|
+
def on_parishad_app_open_setup(self, message: OpenSetup) -> None:
|
|
2281
|
+
"""Handle OpenSetup message - open setup screen."""
|
|
2282
|
+
self.push_screen(SetupScreen(), callback=self._on_setup_complete)
|
|
2283
|
+
|
|
2284
|
+
def _on_setup_complete(self, config: Optional[ParishadConfig]) -> None:
|
|
2285
|
+
"""Callback when setup is completed or aborted."""
|
|
2286
|
+
if config:
|
|
2287
|
+
# Setup completed with new config
|
|
2288
|
+
self.config = config
|
|
2289
|
+
self.model = config.model
|
|
2290
|
+
self.backend = config.backend
|
|
2291
|
+
self.sabha = config.sabha
|
|
2292
|
+
self.cwd = Path(config.cwd) if config.cwd else Path.cwd()
|
|
2293
|
+
|
|
2294
|
+
# Initialize chat with new config
|
|
2295
|
+
self._initialize_chat()
|
|
2296
|
+
else:
|
|
2297
|
+
# Setup aborted with no previous config - should not happen
|
|
2298
|
+
# (action_skip now always returns a config)
|
|
2299
|
+
self.exit()
|
|
2300
|
+
|
|
2301
|
+
def _initialize_chat(self) -> None:
|
|
2302
|
+
"""Initialize chat interface after config is ready."""
|
|
2303
|
+
self.query_one("#prompt-input", Input).focus()
|
|
2304
|
+
|
|
2305
|
+
# Prevent concurrent initialization
|
|
2306
|
+
if self._initializing:
|
|
2307
|
+
return
|
|
2308
|
+
|
|
2309
|
+
# Run model loading asynchronously to avoid freezing UI
|
|
2310
|
+
self.run_worker(self._async_initialize_council(), exclusive=True)
|
|
2311
|
+
|
|
2312
|
+
async def _async_initialize_council(self) -> None:
|
|
2313
|
+
"""Async worker to initialize Sabha council without blocking UI."""
|
|
2314
|
+
if self._initializing:
|
|
2315
|
+
self.log_message("[yellow]Already initializing...[/yellow]\n")
|
|
2316
|
+
return
|
|
2317
|
+
|
|
2318
|
+
self._initializing = True
|
|
2319
|
+
|
|
2320
|
+
try:
|
|
2321
|
+
from parishad.orchestrator.engine import Parishad
|
|
2322
|
+
from parishad.config.user_config import load_user_config
|
|
2323
|
+
import asyncio
|
|
2324
|
+
|
|
2325
|
+
self.log_message("[cyan]🔄 Initializing Sabha council...[/cyan]\n")
|
|
2326
|
+
|
|
2327
|
+
# Load user config for profile (same as CLI run does)
|
|
2328
|
+
user_cfg = load_user_config()
|
|
2329
|
+
profile = user_cfg.default_profile
|
|
2330
|
+
mode = user_cfg.default_mode
|
|
2331
|
+
|
|
2332
|
+
self.log_message(f"[dim] • Profile: {profile}[/dim]\n")
|
|
2333
|
+
self.log_message(f"[dim] • Mode: {mode}[/dim]\n")
|
|
2334
|
+
|
|
2335
|
+
# Get pipeline config from Sabha selection
|
|
2336
|
+
if self.config:
|
|
2337
|
+
config_name = self.config.get_pipeline_config()
|
|
2338
|
+
self.log_message(f"[dim] • Pipeline: {config_name}[/dim]\n")
|
|
2339
|
+
else:
|
|
2340
|
+
config_name = "core" # Default fallback
|
|
2341
|
+
self.log_message(f"[dim] • Pipeline: {config_name} (default)[/dim]\n")
|
|
2342
|
+
|
|
2343
|
+
self.log_message(f"[yellow]⏳ Loading models (this may take 30-60 seconds)...[/yellow]\n")
|
|
2344
|
+
|
|
2345
|
+
# Initialize Parishad exactly like CLI run does
|
|
2346
|
+
# CRITICAL: Pass model_config_path=None so it uses profiles + models.yaml
|
|
2347
|
+
loop = asyncio.get_event_loop()
|
|
2348
|
+
|
|
2349
|
+
self.log_message(f"[dim] • Creating Parishad engine...[/dim]\n")
|
|
2350
|
+
|
|
2351
|
+
# Add timeout to prevent indefinite freezing when backend is unavailable
|
|
2352
|
+
try:
|
|
2353
|
+
# Build user_forced_config from model_map
|
|
2354
|
+
user_forced_config = {}
|
|
2355
|
+
if self.config.model_map:
|
|
2356
|
+
# Initialize manager to resolve paths
|
|
2357
|
+
from parishad.models.downloader import ModelManager
|
|
2358
|
+
model_manager = ModelManager()
|
|
2359
|
+
|
|
2360
|
+
msg_backend = self.config.backend or "ollama"
|
|
2361
|
+
|
|
2362
|
+
for slot, model_id in self.config.model_map.items():
|
|
2363
|
+
# Default to current config backend
|
|
2364
|
+
current_backend = msg_backend
|
|
2365
|
+
model_file = None
|
|
2366
|
+
|
|
2367
|
+
# Check if it's a known model to resolve backend/path
|
|
2368
|
+
model_info = model_manager.registry.get(model_id)
|
|
2369
|
+
if model_info:
|
|
2370
|
+
# Handle Enum comparison correctly
|
|
2371
|
+
source = model_info.source.value if hasattr(model_info.source, "value") else str(model_info.source)
|
|
2372
|
+
|
|
2373
|
+
if source == "local":
|
|
2374
|
+
current_backend = "llama_cpp"
|
|
2375
|
+
model_file = str(model_info.path)
|
|
2376
|
+
elif source == "ollama":
|
|
2377
|
+
current_backend = "ollama"
|
|
2378
|
+
elif source == "mlx":
|
|
2379
|
+
current_backend = "mlx"
|
|
2380
|
+
else:
|
|
2381
|
+
# Fallback heuristics if not in registry
|
|
2382
|
+
if model_id.startswith("local:"):
|
|
2383
|
+
current_backend = "llama_cpp"
|
|
2384
|
+
elif model_id.startswith("ollama:") or ":" in model_id:
|
|
2385
|
+
current_backend = "ollama"
|
|
2386
|
+
|
|
2387
|
+
user_forced_config[slot] = {
|
|
2388
|
+
"model_id": model_id,
|
|
2389
|
+
"backend_type": current_backend
|
|
2390
|
+
}
|
|
2391
|
+
if model_file:
|
|
2392
|
+
user_forced_config[slot]["model_file"] = model_file
|
|
2393
|
+
|
|
2394
|
+
self.council = await asyncio.wait_for(
|
|
2395
|
+
loop.run_in_executor(
|
|
2396
|
+
None,
|
|
2397
|
+
lambda: Parishad(
|
|
2398
|
+
config=config_name,
|
|
2399
|
+
model_config_path=None, # Let engine use profiles + models.yaml
|
|
2400
|
+
profile=profile,
|
|
2401
|
+
pipeline_config_path=None,
|
|
2402
|
+
trace_dir=None,
|
|
2403
|
+
mock=False,
|
|
2404
|
+
stub=False,
|
|
2405
|
+
mode=mode,
|
|
2406
|
+
user_forced_config=user_forced_config or None,
|
|
2407
|
+
no_retry=False,
|
|
2408
|
+
)
|
|
2409
|
+
),
|
|
2410
|
+
timeout=120.0 # 2 minute timeout for model loading
|
|
2411
|
+
)
|
|
2412
|
+
except asyncio.TimeoutError:
|
|
2413
|
+
self.log_message(
|
|
2414
|
+
"[red]✗ Model loading timed out (120 seconds)[/red]\n"
|
|
2415
|
+
"[yellow]⚠ The backend may not be running or model download is stalled.[/yellow]\n"
|
|
2416
|
+
"[dim]Hints:[/dim]\n"
|
|
2417
|
+
"[dim] • Check if Ollama is running: ollama serve[/dim]\n"
|
|
2418
|
+
"[dim] • Verify model is downloaded: parishad models list[/dim]\n"
|
|
2419
|
+
"[dim] • Try /setup to reconfigure[/dim]\n"
|
|
2420
|
+
)
|
|
2421
|
+
self.council = None
|
|
2422
|
+
return
|
|
2423
|
+
|
|
2424
|
+
if self.council:
|
|
2425
|
+
self.log_message(
|
|
2426
|
+
f"[green]✅ Sabha council ready![/green]\n"
|
|
2427
|
+
f"[dim]Models loaded from profile '{profile}'[/dim]\n"
|
|
2428
|
+
f"[dim]You can now start asking questions.[/dim]\n"
|
|
2429
|
+
)
|
|
2430
|
+
else:
|
|
2431
|
+
self.log_message("[red]✗ Council initialization returned None[/red]\n")
|
|
2432
|
+
|
|
2433
|
+
except Exception as e:
|
|
2434
|
+
import traceback
|
|
2435
|
+
tb = traceback.format_exc()
|
|
2436
|
+
self.log_message(
|
|
2437
|
+
f"[red]✗ Error loading Sabha council:[/red]\n"
|
|
2438
|
+
f"[red]{type(e).__name__}: {e}[/red]\n"
|
|
2439
|
+
f"[dim]{tb}[/dim]\n"
|
|
2440
|
+
)
|
|
2441
|
+
self.council = None
|
|
2442
|
+
finally:
|
|
2443
|
+
self._initializing = False
|
|
2444
|
+
|
|
2445
|
+
# DEPRECATED: TUI now uses same engine setup as CLI 'parishad run'
|
|
2446
|
+
# This method is no longer called
|
|
2447
|
+
# def _create_model_config_from_tui(self):
|
|
2448
|
+
# """Create ModelConfig from TUI ParishadConfig."""
|
|
2449
|
+
# ...
|
|
2450
|
+
|
|
2451
|
+
def _check_backend_availability(self) -> None:
|
|
2452
|
+
"""Check if the configured backend is available and show help if not."""
|
|
2453
|
+
from parishad.models.backends import is_backend_available, get_available_backends
|
|
2454
|
+
|
|
2455
|
+
if not self.config:
|
|
2456
|
+
return
|
|
2457
|
+
|
|
2458
|
+
# Map TUI backend to runner backend
|
|
2459
|
+
backend_map = {
|
|
2460
|
+
"ollama": "ollama",
|
|
2461
|
+
"huggingface": "transformers",
|
|
2462
|
+
"lmstudio": "openai",
|
|
2463
|
+
"openai": "openai",
|
|
2464
|
+
"local": "llama_cpp",
|
|
2465
|
+
"llama_cpp": "llama_cpp",
|
|
2466
|
+
"transformers": "transformers",
|
|
2467
|
+
"mlx": "mlx",
|
|
2468
|
+
}
|
|
2469
|
+
|
|
2470
|
+
backend_name = backend_map.get(self.config.backend.lower(), self.config.backend)
|
|
2471
|
+
|
|
2472
|
+
if not is_backend_available(backend_name):
|
|
2473
|
+
available = get_available_backends()
|
|
2474
|
+
self.log_message(
|
|
2475
|
+
f"\n[yellow]⚠ Backend '{self.config.backend}' is not available![/yellow]\n\n"
|
|
2476
|
+
f"[bold]Selected backend:[/bold] {self.config.backend}\n"
|
|
2477
|
+
f"[bold]Model:[/bold] {self.config.model}\n\n"
|
|
2478
|
+
f"[bold]Issue:[/bold] Required dependencies not installed.\n\n"
|
|
2479
|
+
f"[bold]Available backends:[/bold] {', '.join(available)}\n\n"
|
|
2480
|
+
f"[bold]To fix:[/bold]\n"
|
|
2481
|
+
)
|
|
2482
|
+
|
|
2483
|
+
# Show installation instructions based on backend
|
|
2484
|
+
if backend_name == "transformers":
|
|
2485
|
+
self.log_message(
|
|
2486
|
+
" [cyan]pip install transformers torch[/cyan]\n"
|
|
2487
|
+
" (For GPU: pip install transformers torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118)\n"
|
|
2488
|
+
)
|
|
2489
|
+
elif backend_name == "ollama":
|
|
2490
|
+
self.log_message(
|
|
2491
|
+
" 1. Install Ollama: [cyan]https://ollama.ai[/cyan]\n"
|
|
2492
|
+
" 2. Pull model: [cyan]ollama pull " + self.config.model + "[/cyan]\n"
|
|
2493
|
+
)
|
|
2494
|
+
elif backend_name == "llama_cpp":
|
|
2495
|
+
self.log_message(
|
|
2496
|
+
" [cyan]pip install llama-cpp-python[/cyan]\n"
|
|
2497
|
+
" (For GPU: CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" pip install llama-cpp-python)\n"
|
|
2498
|
+
)
|
|
2499
|
+
elif backend_name == "openai":
|
|
2500
|
+
self.log_message(
|
|
2501
|
+
" [cyan]pip install openai[/cyan]\n"
|
|
2502
|
+
" Set API key: [cyan]export OPENAI_API_KEY=your_key[/cyan]\n"
|
|
2503
|
+
)
|
|
2504
|
+
elif backend_name == "mlx":
|
|
2505
|
+
self.log_message(
|
|
2506
|
+
" [cyan]pip install mlx-lm[/cyan]\n"
|
|
2507
|
+
" (Only works on Apple Silicon M1/M2/M3/M4)\n"
|
|
2508
|
+
)
|
|
2509
|
+
|
|
2510
|
+
self.log_message(
|
|
2511
|
+
"\n[dim]Or run [cyan]/setup[/cyan] to choose a different backend.[/dim]\n"
|
|
2512
|
+
)
|
|
2513
|
+
return
|
|
2514
|
+
|
|
2515
|
+
# Backend is available - show success message
|
|
2516
|
+
self.log_message(
|
|
2517
|
+
f"[dim]Sabha council initialized with {self.config.backend} backend[/dim]"
|
|
2518
|
+
)
|
|
2519
|
+
|
|
2520
|
+
def _check_model_availability(self) -> None:
|
|
2521
|
+
"""DEPRECATED: Use _check_backend_availability instead."""
|
|
2522
|
+
import subprocess
|
|
2523
|
+
try:
|
|
2524
|
+
result = subprocess.run(
|
|
2525
|
+
["ollama", "list"],
|
|
2526
|
+
capture_output=True,
|
|
2527
|
+
text=True,
|
|
2528
|
+
timeout=5
|
|
2529
|
+
)
|
|
2530
|
+
installed_models = result.stdout.lower()
|
|
2531
|
+
|
|
2532
|
+
# Check if our model is in the list
|
|
2533
|
+
model_name = self.model.split(":")[0].lower() # e.g., "llama3.2" from "llama3.2:3b"
|
|
2534
|
+
if model_name not in installed_models:
|
|
2535
|
+
self.log_message(
|
|
2536
|
+
f"\n[yellow]⚠ Model '{self.model}' not installed![/yellow]\n"
|
|
2537
|
+
f"[dim]Run this command in another terminal to install it:[/dim]\n"
|
|
2538
|
+
f"[bold cyan] ollama pull {self.model}[/bold cyan]\n"
|
|
2539
|
+
f"[dim]Then restart parishad.[/dim]\n"
|
|
2540
|
+
)
|
|
2541
|
+
except FileNotFoundError:
|
|
2542
|
+
self.log_message(
|
|
2543
|
+
"\n[yellow]⚠ Ollama not found![/yellow]\n"
|
|
2544
|
+
"[dim]Install Ollama from:[/dim] [bold cyan]https://ollama.ai[/bold cyan]\n"
|
|
2545
|
+
)
|
|
2546
|
+
except Exception:
|
|
2547
|
+
pass # Silent fail for other errors
|
|
2548
|
+
|
|
2549
|
+
|
|
2550
|
+
def log_message(self, message: str) -> None:
|
|
2551
|
+
"""Add message to chat log."""
|
|
2552
|
+
chat = self.query_one("#chat-area", RichLog)
|
|
2553
|
+
chat.write(message)
|
|
2554
|
+
chat.scroll_end()
|
|
2555
|
+
|
|
2556
|
+
@on(Input.Submitted)
|
|
2557
|
+
def handle_input(self, event: Input.Submitted) -> None:
|
|
2558
|
+
"""Handle user input submission with parsing layer."""
|
|
2559
|
+
raw_input = event.value.strip()
|
|
2560
|
+
input_widget = event.input
|
|
2561
|
+
input_widget.value = ""
|
|
2562
|
+
|
|
2563
|
+
if not raw_input:
|
|
2564
|
+
return
|
|
2565
|
+
|
|
2566
|
+
# Parse input
|
|
2567
|
+
parsed = parse_input(raw_input)
|
|
2568
|
+
|
|
2569
|
+
# Show user message (with original input for transparency)
|
|
2570
|
+
self.log_message(f"\n[bold]> {parsed.raw}[/bold]")
|
|
2571
|
+
|
|
2572
|
+
# Handle commands
|
|
2573
|
+
if parsed.is_command:
|
|
2574
|
+
self.handle_command(parsed)
|
|
2575
|
+
return
|
|
2576
|
+
|
|
2577
|
+
# Load referenced files
|
|
2578
|
+
loaded_files = []
|
|
2579
|
+
if parsed.tools:
|
|
2580
|
+
self.log_message("[dim]📎 Loading files...[/dim]")
|
|
2581
|
+
for tool in parsed.tools:
|
|
2582
|
+
if tool["type"] == "file":
|
|
2583
|
+
loaded = load_file(tool["path"], Path(self.cwd))
|
|
2584
|
+
loaded_files.append(loaded)
|
|
2585
|
+
|
|
2586
|
+
if loaded.exists and loaded.content:
|
|
2587
|
+
size_kb = loaded.size_bytes // 1024
|
|
2588
|
+
self.log_message(f" [green]✓[/green] {loaded.path} ({size_kb}KB)")
|
|
2589
|
+
if loaded.error: # Truncation
|
|
2590
|
+
self.log_message(f" [yellow]⚠[/yellow] {loaded.error}")
|
|
2591
|
+
else:
|
|
2592
|
+
self.log_message(f" [red]✗[/red] {loaded.error}")
|
|
2593
|
+
|
|
2594
|
+
# Show active flags
|
|
2595
|
+
if parsed.flags:
|
|
2596
|
+
flag_names = ", ".join(f"#{k}" for k in parsed.flags.keys())
|
|
2597
|
+
self.log_message(f"[dim]🏴 Active flags: {flag_names}[/dim]")
|
|
2598
|
+
|
|
2599
|
+
# Build augmented prompt
|
|
2600
|
+
final_prompt = build_augmented_prompt(
|
|
2601
|
+
parsed.user_query,
|
|
2602
|
+
loaded_files,
|
|
2603
|
+
parsed.flags
|
|
2604
|
+
)
|
|
2605
|
+
|
|
2606
|
+
if not final_prompt.strip():
|
|
2607
|
+
self.log_message("[yellow]⚠ Empty query after parsing[/yellow]")
|
|
2608
|
+
return
|
|
2609
|
+
|
|
2610
|
+
# Process with Sabha council
|
|
2611
|
+
progress = self.query_one("#role-progress", RoleProgressBar)
|
|
2612
|
+
progress.reset()
|
|
2613
|
+
self.log_message("[dim] ⎿ सभा विचार-विमर्श...[/dim]") # Sabha deliberating in Hindi
|
|
2614
|
+
|
|
2615
|
+
if not self.council:
|
|
2616
|
+
self.log_message("\n[red]✗ Sabha council not loaded![/red]")
|
|
2617
|
+
self.log_message("[yellow]⚠ The model failed to initialize. Check the error messages above.[/yellow]")
|
|
2618
|
+
self.log_message("[dim]Hint: Try running /setup to reconfigure, or check models.yaml profile.[/dim]\n")
|
|
2619
|
+
return
|
|
2620
|
+
|
|
2621
|
+
# Prevent concurrent query processing
|
|
2622
|
+
if self._processing_query:
|
|
2623
|
+
self.log_message("[yellow]⚠ Already processing a query, please wait...[/yellow]")
|
|
2624
|
+
return
|
|
2625
|
+
|
|
2626
|
+
# Run Sabha execution asynchronously to prevent UI freezing
|
|
2627
|
+
self.run_worker(self._async_run_sabha(final_prompt, progress), exclusive=True)
|
|
2628
|
+
|
|
2629
|
+
async def _async_run_sabha(self, query: str, progress: RoleProgressBar) -> None:
|
|
2630
|
+
"""Execute Sabha council asynchronously to prevent UI freezing."""
|
|
2631
|
+
if self._processing_query:
|
|
2632
|
+
return
|
|
2633
|
+
|
|
2634
|
+
self._processing_query = True
|
|
2635
|
+
|
|
2636
|
+
import asyncio
|
|
2637
|
+
|
|
2638
|
+
try:
|
|
2639
|
+
# Run the Sabha council pipeline with augmented prompt in thread pool
|
|
2640
|
+
# Add timeout to prevent indefinite freeze if model doesn't respond
|
|
2641
|
+
loop = asyncio.get_event_loop()
|
|
2642
|
+
try:
|
|
2643
|
+
trace = await asyncio.wait_for(
|
|
2644
|
+
loop.run_in_executor(None, self.council.run, query),
|
|
2645
|
+
timeout=300.0 # 5 minute timeout for query execution
|
|
2646
|
+
)
|
|
2647
|
+
except asyncio.TimeoutError:
|
|
2648
|
+
self.log_message(
|
|
2649
|
+
"\n[red]✗ Query execution timed out (5 minutes)[/red]\n"
|
|
2650
|
+
"[yellow]⚠ The model may be stuck or the backend is unresponsive.[/yellow]\n"
|
|
2651
|
+
"[dim]Hints:[/dim]\n"
|
|
2652
|
+
"[dim] • Check if your model backend is still running[/dim]\n"
|
|
2653
|
+
"[dim] • Try a simpler query[/dim]\n"
|
|
2654
|
+
"[dim] • Restart with: /setup[/dim]\n"
|
|
2655
|
+
)
|
|
2656
|
+
return
|
|
2657
|
+
|
|
2658
|
+
# Update progress bar based on trace
|
|
2659
|
+
for role_output in trace.roles:
|
|
2660
|
+
role_name = role_output.role.lower()
|
|
2661
|
+
progress.mark_complete(role_name)
|
|
2662
|
+
|
|
2663
|
+
# Display role activity summary (collapsible style)
|
|
2664
|
+
self.log_message(f"\n[dim]━━━ Sabha Activity ({len(trace.roles)} roles, {trace.total_tokens} tokens) ━━━[/dim]")
|
|
2665
|
+
|
|
2666
|
+
for role_output in trace.roles:
|
|
2667
|
+
role_name = role_output.role.lower()
|
|
2668
|
+
info = ROLE_INFO.get(role_name, {"emoji": "❓", "name": role_name.title()})
|
|
2669
|
+
status_icon = "[green]✓[/green]" if role_output.status == "success" else "[red]✗[/red]"
|
|
2670
|
+
|
|
2671
|
+
# Brief summary of what the role did
|
|
2672
|
+
summary = ""
|
|
2673
|
+
if role_name == "darbari" and role_output.core_output:
|
|
2674
|
+
task_type = role_output.core_output.get("task_type", "unknown")
|
|
2675
|
+
summary = f"→ Task: {task_type}"
|
|
2676
|
+
elif role_name == "majumdar" and role_output.core_output:
|
|
2677
|
+
steps = role_output.core_output.get("steps", [])
|
|
2678
|
+
summary = f"→ {len(steps)} step plan"
|
|
2679
|
+
elif role_name == "prerak" and role_output.core_output:
|
|
2680
|
+
flags = role_output.core_output.get("flags", [])
|
|
2681
|
+
if not flags:
|
|
2682
|
+
summary = "→ No issues"
|
|
2683
|
+
else:
|
|
2684
|
+
summary = f"→ {len(flags)} issue(s)"
|
|
2685
|
+
elif role_name == "raja" and role_output.core_output:
|
|
2686
|
+
conf = role_output.core_output.get("confidence", 0)
|
|
2687
|
+
summary = f"→ Confidence: {int(conf*100)}%"
|
|
2688
|
+
|
|
2689
|
+
# Show model used
|
|
2690
|
+
model_str = ""
|
|
2691
|
+
if role_output.metadata and role_output.metadata.model_id:
|
|
2692
|
+
mid = role_output.metadata.model_id
|
|
2693
|
+
# Strip path
|
|
2694
|
+
if "/" in mid:
|
|
2695
|
+
mid = mid.split("/")[-1]
|
|
2696
|
+
# Strip extension (optional but cleaner)
|
|
2697
|
+
if mid.endswith(".gguf"):
|
|
2698
|
+
mid = mid[:-5]
|
|
2699
|
+
model_str = f"[dim]({mid})[/dim]"
|
|
2700
|
+
|
|
2701
|
+
if role_output.status == "error":
|
|
2702
|
+
err_msg = role_output.error or "Unknown error"
|
|
2703
|
+
# Show full error
|
|
2704
|
+
summary = f"[red]{err_msg}[/red]"
|
|
2705
|
+
|
|
2706
|
+
self.log_message(f" {info['emoji']} {info['name']} {model_str}: {status_icon} {summary}")
|
|
2707
|
+
|
|
2708
|
+
self.log_message(f"[dim]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/dim]\n")
|
|
2709
|
+
|
|
2710
|
+
# Display the final answer from Raja
|
|
2711
|
+
if trace.final_answer:
|
|
2712
|
+
answer = trace.final_answer.final_answer
|
|
2713
|
+
self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{answer}\n")
|
|
2714
|
+
elif trace.error:
|
|
2715
|
+
self.log_message(f"\n[red]Error: {trace.error}[/red]")
|
|
2716
|
+
else:
|
|
2717
|
+
self.log_message("\n[yellow]No answer generated[/yellow]")
|
|
2718
|
+
|
|
2719
|
+
except Exception as e:
|
|
2720
|
+
import traceback
|
|
2721
|
+
tb = traceback.format_exc()
|
|
2722
|
+
self.log_message(f"\n[red]Error ({type(e).__name__}): {e}[/red]\n[dim]{tb[:500]}...[/dim]")
|
|
2723
|
+
finally:
|
|
2724
|
+
# Always reset processing flag
|
|
2725
|
+
self._processing_query = False
|
|
2726
|
+
|
|
2727
|
+
def handle_command(self, parsed: ParsedInput) -> None:
|
|
2728
|
+
"""Handle slash commands with ParsedInput."""
|
|
2729
|
+
cmd = parsed.command_name
|
|
2730
|
+
args = parsed.command_args
|
|
2731
|
+
|
|
2732
|
+
if cmd in ("exit", "quit", "q"):
|
|
2733
|
+
self._cmd_exit()
|
|
2734
|
+
elif cmd == "help" or cmd == "?":
|
|
2735
|
+
self._cmd_help()
|
|
2736
|
+
elif cmd == "clear":
|
|
2737
|
+
self._cmd_clear()
|
|
2738
|
+
elif cmd == "config":
|
|
2739
|
+
self._cmd_config()
|
|
2740
|
+
elif cmd in ("setup", "settings"):
|
|
2741
|
+
self._cmd_setup()
|
|
2742
|
+
elif cmd == "model":
|
|
2743
|
+
self._cmd_model(args)
|
|
2744
|
+
elif cmd == "sabha":
|
|
2745
|
+
self._cmd_sabha(args)
|
|
2746
|
+
elif cmd == "assign":
|
|
2747
|
+
self._cmd_assign(args)
|
|
2748
|
+
elif cmd == "redownload":
|
|
2749
|
+
self._cmd_redownload()
|
|
2750
|
+
elif cmd == "scan":
|
|
2751
|
+
self._cmd_scan()
|
|
2752
|
+
elif cmd == "save":
|
|
2753
|
+
self._cmd_save()
|
|
2754
|
+
else:
|
|
2755
|
+
self.log_message(
|
|
2756
|
+
f"[yellow]Unknown command: /{cmd}[/yellow]\n"
|
|
2757
|
+
f"[dim]Type /help for available commands[/dim]"
|
|
2758
|
+
)
|
|
2759
|
+
|
|
2760
|
+
def _cmd_exit(self) -> None:
|
|
2761
|
+
"""Exit the TUI."""
|
|
2762
|
+
self.exit()
|
|
2763
|
+
|
|
2764
|
+
def _cmd_help(self) -> None:
|
|
2765
|
+
"""Show TUI help."""
|
|
2766
|
+
self.log_message(
|
|
2767
|
+
"\n[bold cyan]═══ Parishad TUI Help ═══[/bold cyan]\n\n"
|
|
2768
|
+
"[bold]Commands:[/bold]\n"
|
|
2769
|
+
" [cyan]/help[/cyan] Show this help\n"
|
|
2770
|
+
" [cyan]/exit[/cyan] Exit the Parishad TUI\n"
|
|
2771
|
+
" [cyan]/clear[/cyan] Clear the chat area\n"
|
|
2772
|
+
" [cyan]/config[/cyan] Show current Sabha/mode/backend\n"
|
|
2773
|
+
" [cyan]/setup[/cyan] Re-configure Sabha/backend/model\n"
|
|
2774
|
+
" [cyan]/model[/cyan] Show or change the current model\n"
|
|
2775
|
+
" [cyan]/model[/cyan] Show or change the current model\n"
|
|
2776
|
+
" [cyan]/redownload[/cyan] Re-download current model (if deleted/corrupted)\n"
|
|
2777
|
+
" [cyan]/scan[/cyan] Scan disk for new GGUF models\n\n"
|
|
2778
|
+
"[bold]Tools:[/bold]\n"
|
|
2779
|
+
" [cyan]@file.py[/cyan] Reference a file (contents will be included)\n"
|
|
2780
|
+
" [cyan]@\"path with spaces.txt\"[/cyan] Reference files with spaces (use quotes)\n\n"
|
|
2781
|
+
"[bold]Flags:[/bold]\n"
|
|
2782
|
+
" [cyan]#idk[/cyan] I don't know - prefer abstaining to guessing\n"
|
|
2783
|
+
" [cyan]#safe[/cyan] Safe mode - conservative, no speculation\n"
|
|
2784
|
+
" [cyan]#noguess[/cyan] Similar to #safe - avoid assumptions\n\n"
|
|
2785
|
+
"[bold]Examples:[/bold]\n"
|
|
2786
|
+
" [dim]> @main.py explain this code[/dim]\n"
|
|
2787
|
+
" [dim]> what is quantum entanglement #idk[/dim]\n"
|
|
2788
|
+
" [dim]> @config.yaml @README.md summarize these files[/dim]\n"
|
|
2789
|
+
)
|
|
2790
|
+
|
|
2791
|
+
def _cmd_clear(self) -> None:
|
|
2792
|
+
"""Clear chat area."""
|
|
2793
|
+
self.query_one("#chat-area", RichLog).clear()
|
|
2794
|
+
self.log_message("[dim]Chat cleared[/dim]")
|
|
2795
|
+
|
|
2796
|
+
def _cmd_config(self) -> None:
|
|
2797
|
+
"""Show current configuration."""
|
|
2798
|
+
if not self.config:
|
|
2799
|
+
self.log_message("[yellow]No configuration loaded. Please run /setup.[/yellow]")
|
|
2800
|
+
return
|
|
2801
|
+
|
|
2802
|
+
# Get dynamic config info
|
|
2803
|
+
from parishad.config.modes import get_mode_config
|
|
2804
|
+
try:
|
|
2805
|
+
mode_data = get_mode_config(self.config.sabha)
|
|
2806
|
+
sabha_display = f"{mode_data.sabha_name} ({mode_data.description})"
|
|
2807
|
+
except ValueError:
|
|
2808
|
+
sabha_display = f"{self.config.sabha} (Unknown Configuration)"
|
|
2809
|
+
|
|
2810
|
+
mode = self.config.get_mode()
|
|
2811
|
+
pipeline = self.config.get_pipeline_config()
|
|
2812
|
+
|
|
2813
|
+
# Get system profile and model directory
|
|
2814
|
+
from parishad.models.downloader import ModelManager, get_default_model_dir
|
|
2815
|
+
from parishad.config.user_config import load_user_config
|
|
2816
|
+
|
|
2817
|
+
try:
|
|
2818
|
+
user_cfg = load_user_config()
|
|
2819
|
+
profile = user_cfg.default_profile
|
|
2820
|
+
except Exception:
|
|
2821
|
+
profile = "minimal_council"
|
|
2822
|
+
|
|
2823
|
+
model_dir = get_default_model_dir()
|
|
2824
|
+
|
|
2825
|
+
# Highlight current model
|
|
2826
|
+
current_model_display = f"[green]{self.model}[/green]"
|
|
2827
|
+
|
|
2828
|
+
config_text = (
|
|
2829
|
+
f"\n[bold cyan]═══ Current Configuration ═══[/bold cyan]\n\n"
|
|
2830
|
+
f"[bold]Sabha Council:[/bold]\n"
|
|
2831
|
+
f" Sabha: {sabha_display}\n"
|
|
2832
|
+
f" Mode: {mode}\n"
|
|
2833
|
+
f" Pipeline: {pipeline}\n\n"
|
|
2834
|
+
f"[bold]Model Backend:[/bold]\n"
|
|
2835
|
+
f" Profile: [cyan]{profile}[/cyan]\n"
|
|
2836
|
+
f" Current Model: {current_model_display}\n"
|
|
2837
|
+
f" Model Dir: {model_dir}\n\n"
|
|
2838
|
+
)
|
|
2839
|
+
|
|
2840
|
+
# Show downloaded models (deduplicated)
|
|
2841
|
+
try:
|
|
2842
|
+
manager = ModelManager()
|
|
2843
|
+
all_models = manager.list_models()
|
|
2844
|
+
|
|
2845
|
+
# Deduplicate by name
|
|
2846
|
+
unique_models = {}
|
|
2847
|
+
for m in all_models:
|
|
2848
|
+
if m.name not in unique_models:
|
|
2849
|
+
unique_models[m.name] = m
|
|
2850
|
+
|
|
2851
|
+
models = list(unique_models.values())
|
|
2852
|
+
|
|
2853
|
+
if models:
|
|
2854
|
+
config_text += "[bold]Downloaded Models:[/bold]\n"
|
|
2855
|
+
for model in models[:10]: # Show first 10
|
|
2856
|
+
marker = "★ " if model.name == self.model else "• "
|
|
2857
|
+
style = "[bold green]" if model.name == self.model else ""
|
|
2858
|
+
end_style = "[/bold green]" if model.name == self.model else ""
|
|
2859
|
+
config_text += f" {marker}{style}{model.name:30} [{model.format.value:12}] {model.size_human}{end_style}\n"
|
|
2860
|
+
if len(models) > 10:
|
|
2861
|
+
config_text += f" [dim]... and {len(models) - 10} more[/dim]\n"
|
|
2862
|
+
else:
|
|
2863
|
+
config_text += "[yellow]No models downloaded yet.[/yellow]\n"
|
|
2864
|
+
config_text += "[dim]Use /setup to download models.[/dim]\n"
|
|
2865
|
+
except Exception as e:
|
|
2866
|
+
config_text += f"[yellow]Could not list models: {e}[/yellow]\n"
|
|
2867
|
+
|
|
2868
|
+
config_text += (
|
|
2869
|
+
f"\n[bold]Working Directory:[/bold]\n"
|
|
2870
|
+
f" {self.cwd}\n\n"
|
|
2871
|
+
f"[dim]Type /help for available commands.[/dim]\n"
|
|
2872
|
+
)
|
|
2873
|
+
|
|
2874
|
+
self.log_message(config_text)
|
|
2875
|
+
|
|
2876
|
+
def _cmd_setup(self) -> None:
|
|
2877
|
+
"""Re-run setup to change configuration."""
|
|
2878
|
+
self.log_message("[dim]Opening setup...[/dim]")
|
|
2879
|
+
self.push_screen(SetupScreen(initial_config=self.config), callback=self._on_reconfig_complete)
|
|
2880
|
+
|
|
2881
|
+
def _on_reconfig_complete(self, config: Optional[ParishadConfig]) -> None:
|
|
2882
|
+
"""Callback when re-configuration is completed or aborted."""
|
|
2883
|
+
if config and config != self.config:
|
|
2884
|
+
# Config changed - reload
|
|
2885
|
+
self.config = config
|
|
2886
|
+
self.model = config.model
|
|
2887
|
+
self.backend = config.backend
|
|
2888
|
+
self.sabha = config.sabha
|
|
2889
|
+
self.cwd = Path(config.cwd) if config.cwd else Path.cwd()
|
|
2890
|
+
|
|
2891
|
+
self.log_message("[green]✓ Configuration updated. Reloading Sabha...[/green]")
|
|
2892
|
+
self._initialize_chat()
|
|
2893
|
+
else:
|
|
2894
|
+
# Aborted or no change
|
|
2895
|
+
self.log_message("[dim]Setup cancelled - keeping current configuration[/dim]")
|
|
2896
|
+
|
|
2897
|
+
def _cmd_model(self, args: List[str]) -> None:
|
|
2898
|
+
"""Show or change model."""
|
|
2899
|
+
if args:
|
|
2900
|
+
new_model = args[0]
|
|
2901
|
+
# Check if model changed
|
|
2902
|
+
if self.model != new_model:
|
|
2903
|
+
self.model = new_model
|
|
2904
|
+
# Update config if exists
|
|
2905
|
+
if self.config:
|
|
2906
|
+
self.config.model = new_model
|
|
2907
|
+
try:
|
|
2908
|
+
save_parishad_config(self.config)
|
|
2909
|
+
self.log_message(f"[green]✓ Model changed to: {self.model}[/green]")
|
|
2910
|
+
# Trigger re-initialization if needed
|
|
2911
|
+
# self._initialize_chat()
|
|
2912
|
+
except Exception as e:
|
|
2913
|
+
self.log_message(f"[red]Failed to save config: {e}[/red]")
|
|
2914
|
+
else:
|
|
2915
|
+
self.log_message(f"[dim]Model changed to: {self.model} (runtime only)[/dim]")
|
|
2916
|
+
else:
|
|
2917
|
+
self.log_message(f"[dim]Model is already: {self.model}[/dim]")
|
|
2918
|
+
else:
|
|
2919
|
+
self.log_message(f"[dim]Current model: {self.model}[/dim]")
|
|
2920
|
+
|
|
2921
|
+
def _cmd_scan(self) -> None:
|
|
2922
|
+
"""Scan for models on disk."""
|
|
2923
|
+
from parishad.models.downloader import ModelManager
|
|
2924
|
+
try:
|
|
2925
|
+
manager = ModelManager()
|
|
2926
|
+
self.log_message("[dim]Scanning for models...[/dim]")
|
|
2927
|
+
|
|
2928
|
+
# This updates the registry (models.json)
|
|
2929
|
+
new_models = manager.scan_for_models()
|
|
2930
|
+
|
|
2931
|
+
# Also read valid models
|
|
2932
|
+
all_models = manager.list_models()
|
|
2933
|
+
|
|
2934
|
+
msg = f"[green]✓ Scan complete.[/green]\n\n"
|
|
2935
|
+
if new_models:
|
|
2936
|
+
msg += f"[bold]Found {len(new_models)} new models:[/bold]\n"
|
|
2937
|
+
for m in new_models:
|
|
2938
|
+
msg += f" • {m.name}\n"
|
|
2939
|
+
else:
|
|
2940
|
+
msg += "[dim]No new models found.[/dim]\n"
|
|
2941
|
+
|
|
2942
|
+
msg += f"\n[dim]Total models available: {len(all_models)}[/dim]"
|
|
2943
|
+
self.log_message(msg)
|
|
2944
|
+
|
|
2945
|
+
except Exception as e:
|
|
2946
|
+
self.log_message(f"[red]Error scanning models: {e}[/red]")
|
|
2947
|
+
|
|
2948
|
+
def _cmd_save(self) -> None:
|
|
2949
|
+
"""Manually save configuration."""
|
|
2950
|
+
if self.config:
|
|
2951
|
+
try:
|
|
2952
|
+
if save_parishad_config(self.config):
|
|
2953
|
+
self.log_message("[green]✓ Configuration saved to disk.[/green]")
|
|
2954
|
+
else:
|
|
2955
|
+
self.log_message("[red]Failed to save configuration.[/red]")
|
|
2956
|
+
except Exception as e:
|
|
2957
|
+
self.log_message(f"[red]Error saving configuration: {e}[/red]")
|
|
2958
|
+
else:
|
|
2959
|
+
self.log_message("[yellow]No configuration to save.[/yellow]")
|
|
2960
|
+
|
|
2961
|
+
def _cmd_sabha(self, args: List[str]) -> None:
|
|
2962
|
+
"""Switch active Sabha council."""
|
|
2963
|
+
valid_sabhas = ["laghu", "madhyam", "maha"]
|
|
2964
|
+
|
|
2965
|
+
if not args:
|
|
2966
|
+
current = self.sabha or "unknown"
|
|
2967
|
+
self.log_message(f"[dim]Current Sabha: {current}[/dim]")
|
|
2968
|
+
self.log_message(f"[dim]Usage: /sabha [{'|'.join(valid_sabhas)}][/dim]")
|
|
2969
|
+
return
|
|
2970
|
+
|
|
2971
|
+
new_sabha = args[0].lower()
|
|
2972
|
+
|
|
2973
|
+
# Handle aliases
|
|
2974
|
+
aliases = {"fast": "laghu", "core": "madhyam", "balanced": "madhyam", "extended": "maha", "thorough": "maha"}
|
|
2975
|
+
new_sabha = aliases.get(new_sabha, new_sabha)
|
|
2976
|
+
|
|
2977
|
+
if new_sabha not in valid_sabhas:
|
|
2978
|
+
self.log_message(f"[red]Invalid Sabha: {new_sabha}[/red]")
|
|
2979
|
+
self.log_message(f"[dim]Valid options: {', '.join(valid_sabhas)}[/dim]")
|
|
2980
|
+
return
|
|
2981
|
+
|
|
2982
|
+
if new_sabha == self.sabha:
|
|
2983
|
+
self.log_message(f"[yellow]Already using {new_sabha}[/yellow]")
|
|
2984
|
+
return
|
|
2985
|
+
|
|
2986
|
+
self.log_message(f"[cyan]Switching to {new_sabha}...[/cyan]")
|
|
2987
|
+
|
|
2988
|
+
# Update config
|
|
2989
|
+
try:
|
|
2990
|
+
self.sabha = new_sabha
|
|
2991
|
+
if self.config:
|
|
2992
|
+
self.config.sabha = new_sabha
|
|
2993
|
+
save_parishad_config(self.config)
|
|
2994
|
+
|
|
2995
|
+
# Re-initialize
|
|
2996
|
+
self._initialize_chat()
|
|
2997
|
+
|
|
2998
|
+
except Exception as e:
|
|
2999
|
+
self.log_message(f"[red]Error switching Sabha: {e}[/red]")
|
|
3000
|
+
|
|
3001
|
+
def _smart_assign_models(self, models: List[str]) -> None:
|
|
3002
|
+
"""Smartly assign models to slots based on size."""
|
|
3003
|
+
from parishad.models.downloader import ModelManager
|
|
3004
|
+
try:
|
|
3005
|
+
manager = ModelManager()
|
|
3006
|
+
all_models = manager.list_models()
|
|
3007
|
+
|
|
3008
|
+
# Find matching model objects
|
|
3009
|
+
selected = []
|
|
3010
|
+
for name in models:
|
|
3011
|
+
# Find best match
|
|
3012
|
+
match = None
|
|
3013
|
+
for m in all_models:
|
|
3014
|
+
if m.name == name:
|
|
3015
|
+
match = m
|
|
3016
|
+
break
|
|
3017
|
+
# Fallback partial match if exact failing
|
|
3018
|
+
if not match:
|
|
3019
|
+
for m in all_models:
|
|
3020
|
+
if name in m.name:
|
|
3021
|
+
match = m
|
|
3022
|
+
break
|
|
3023
|
+
|
|
3024
|
+
if match:
|
|
3025
|
+
selected.append(match)
|
|
3026
|
+
else:
|
|
3027
|
+
self.log_message(f"[yellow]Warning: Model '{name}' not found.[/yellow]")
|
|
3028
|
+
|
|
3029
|
+
if not selected:
|
|
3030
|
+
self.log_message("[red]No valid models found for assignment.[/red]")
|
|
3031
|
+
return
|
|
3032
|
+
|
|
3033
|
+
# Sort by size (descending)
|
|
3034
|
+
selected.sort(key=lambda x: x.size_bytes, reverse=True)
|
|
3035
|
+
|
|
3036
|
+
updates = {}
|
|
3037
|
+
count = len(selected)
|
|
3038
|
+
|
|
3039
|
+
if count == 1:
|
|
3040
|
+
# One model -> Assign to BIG (Primary)
|
|
3041
|
+
# Leaves other slots to default/previous
|
|
3042
|
+
updates = {"big": selected[0].name}
|
|
3043
|
+
|
|
3044
|
+
elif count == 2:
|
|
3045
|
+
# Two models -> Big (Largest), Small (Smallest)
|
|
3046
|
+
updates = {
|
|
3047
|
+
"big": selected[0].name,
|
|
3048
|
+
"small": selected[1].name
|
|
3049
|
+
}
|
|
3050
|
+
|
|
3051
|
+
else:
|
|
3052
|
+
# Three+ models -> Big, Mid, Small
|
|
3053
|
+
mid_idx = count // 2
|
|
3054
|
+
updates = {
|
|
3055
|
+
"big": selected[0].name,
|
|
3056
|
+
"mid": selected[mid_idx].name,
|
|
3057
|
+
"small": selected[-1].name
|
|
3058
|
+
}
|
|
3059
|
+
|
|
3060
|
+
if not self.config.model_map:
|
|
3061
|
+
self.config.model_map = {}
|
|
3062
|
+
|
|
3063
|
+
self.config.model_map.update(updates)
|
|
3064
|
+
save_parishad_config(self.config)
|
|
3065
|
+
|
|
3066
|
+
# Formatted log
|
|
3067
|
+
msg = "[green]Smartly assigned models:[/green]\n"
|
|
3068
|
+
for slot, model in updates.items():
|
|
3069
|
+
msg += f" • {slot.upper():5}: {model}\n"
|
|
3070
|
+
self.log_message(msg)
|
|
3071
|
+
|
|
3072
|
+
# Auto-reload
|
|
3073
|
+
self.log_message("[cyan]Reloading council with new assignments...[/cyan]")
|
|
3074
|
+
self._initialize_chat()
|
|
3075
|
+
|
|
3076
|
+
except Exception as e:
|
|
3077
|
+
self.log_message(f"[red]Smart assignment failed: {e}[/red]")
|
|
3078
|
+
|
|
3079
|
+
def _cmd_assign(self, args: List[str]) -> None:
|
|
3080
|
+
"""Assign models to slots (Explicit or Smart)."""
|
|
3081
|
+
args_str = " ".join(args) if args else ""
|
|
3082
|
+
if not args_str:
|
|
3083
|
+
self.log_message("[yellow]Usage: /assign [big=model]... or /assign [model1] [model2]...[/yellow]")
|
|
3084
|
+
return
|
|
3085
|
+
|
|
3086
|
+
# Check for explicit assignment
|
|
3087
|
+
if "=" in args_str:
|
|
3088
|
+
updates = {}
|
|
3089
|
+
parts = args_str.split()
|
|
3090
|
+
for part in parts:
|
|
3091
|
+
if "=" in part:
|
|
3092
|
+
k, v = part.split("=", 1)
|
|
3093
|
+
if k in ["big", "mid", "small", "planner", "judge"]:
|
|
3094
|
+
updates[k] = v
|
|
3095
|
+
else:
|
|
3096
|
+
self.log_message(f"[red]Unknown slot: {k}[/red]")
|
|
3097
|
+
|
|
3098
|
+
if updates:
|
|
3099
|
+
if not self.config.model_map:
|
|
3100
|
+
self.config.model_map = {}
|
|
3101
|
+
self.config.model_map.update(updates)
|
|
3102
|
+
save_parishad_config(self.config)
|
|
3103
|
+
self.log_message(f"[green]Assigned: {updates}[/green]")
|
|
3104
|
+
self.log_message("[cyan]Reloading council...[/cyan]")
|
|
3105
|
+
self._initialize_chat()
|
|
3106
|
+
else:
|
|
3107
|
+
# Smart Assignment
|
|
3108
|
+
self._smart_assign_models(args)
|
|
3109
|
+
|
|
3110
|
+
def _cmd_redownload(self) -> None:
|
|
3111
|
+
"""Force re-download of current model."""
|
|
3112
|
+
if not self.config:
|
|
3113
|
+
self.log_message("[yellow]No model configured. Use /setup first.[/yellow]")
|
|
3114
|
+
return
|
|
3115
|
+
|
|
3116
|
+
from parishad.models.downloader import ModelManager
|
|
3117
|
+
|
|
3118
|
+
source = self.config.backend
|
|
3119
|
+
model_id = self.config.model
|
|
3120
|
+
|
|
3121
|
+
self.log_message(
|
|
3122
|
+
f"[yellow]Re-downloading {model_id} from {source}...[/yellow]\\n"
|
|
3123
|
+
f"[dim]This will delete the existing model file and download fresh.[/dim]\\n"
|
|
3124
|
+
)
|
|
3125
|
+
|
|
3126
|
+
try:
|
|
3127
|
+
manager = ModelManager()
|
|
3128
|
+
|
|
3129
|
+
# Remove existing model from registry
|
|
3130
|
+
if model_id in manager.registry._models:
|
|
3131
|
+
old_path = manager.registry._models[model_id].path
|
|
3132
|
+
if old_path.exists():
|
|
3133
|
+
old_path.unlink()
|
|
3134
|
+
self.log_message(f"[dim]Deleted old model file: {old_path}[/dim]\\n")
|
|
3135
|
+
del manager.registry._models[model_id]
|
|
3136
|
+
manager.registry._save_registry()
|
|
3137
|
+
|
|
3138
|
+
# Trigger re-initialization which will download
|
|
3139
|
+
self.log_message("[cyan]Starting download...[/cyan]\\n")
|
|
3140
|
+
self._initialize_chat()
|
|
3141
|
+
|
|
3142
|
+
except Exception as e:
|
|
3143
|
+
self.log_message(f"[red]Error during re-download: {e}[/red]\\n")
|
|
3144
|
+
|
|
3145
|
+
def action_quit(self) -> None:
|
|
3146
|
+
"""Handle Ctrl+C - cancel download if in progress, or require double press to exit."""
|
|
3147
|
+
# If download is in progress, cancel it
|
|
3148
|
+
if self.download_cancel_event and not self.download_cancel_event.is_set():
|
|
3149
|
+
self.download_cancel_event.set()
|
|
3150
|
+
self.log_message("\n[yellow]Download cancelled. Press Ctrl+C again to exit.[/yellow]\n")
|
|
3151
|
+
self.download_cancel_event = None
|
|
3152
|
+
return
|
|
3153
|
+
|
|
3154
|
+
# Otherwise, require double press to exit
|
|
3155
|
+
if self.ctrl_c_pressed:
|
|
3156
|
+
self.exit()
|
|
3157
|
+
else:
|
|
3158
|
+
self.ctrl_c_pressed = True
|
|
3159
|
+
# Show in status bar instead of chat
|
|
3160
|
+
self.query_one("#status", Static).update("[yellow]↳ Press Ctrl+C again to exit[/yellow]")
|
|
3161
|
+
# Reset after 2 seconds
|
|
3162
|
+
self.set_timer(2.0, self.reset_ctrl_c)
|
|
3163
|
+
|
|
3164
|
+
def reset_ctrl_c(self) -> None:
|
|
3165
|
+
"""Reset Ctrl+C state."""
|
|
3166
|
+
self.ctrl_c_pressed = False
|
|
3167
|
+
# Restore status bar
|
|
3168
|
+
self.query_one("#status", Static).update("[dim]? for help · Ctrl+C to exit[/dim]")
|
|
3169
|
+
|
|
3170
|
+
def action_clear(self) -> None:
|
|
3171
|
+
"""Clear chat area."""
|
|
3172
|
+
self.query_one("#chat-area", RichLog).clear()
|
|
3173
|
+
|
|
3174
|
+
|
|
3175
|
+
def run_code_cli(
|
|
3176
|
+
backend: str = "ollama",
|
|
3177
|
+
model: str = "llama3.2:3b",
|
|
3178
|
+
cwd: Optional[str] = None,
|
|
3179
|
+
sabha: Optional[str] = None, # Sabha ID: "laghu"/"madhyam"/"maha"
|
|
3180
|
+
mode: Optional[str] = None, # Mode key: "fast"/"balanced"/"thorough"
|
|
3181
|
+
):
|
|
3182
|
+
"""
|
|
3183
|
+
Run Parishad Code TUI.
|
|
3184
|
+
|
|
3185
|
+
Args:
|
|
3186
|
+
backend: Backend to use (ollama, lmstudio, etc.)
|
|
3187
|
+
model: Model name/ID
|
|
3188
|
+
cwd: Working directory
|
|
3189
|
+
sabha: Sabha ID to use (if specified, overrides config)
|
|
3190
|
+
mode: Mode key to use (converted to sabha internally)
|
|
3191
|
+
"""
|
|
3192
|
+
import platform
|
|
3193
|
+
import logging
|
|
3194
|
+
|
|
3195
|
+
# Configure logging to file for debugging
|
|
3196
|
+
log_file = os.path.expanduser("~/parishad_debug.log")
|
|
3197
|
+
logging.basicConfig(
|
|
3198
|
+
filename=log_file,
|
|
3199
|
+
level=logging.DEBUG,
|
|
3200
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
3201
|
+
filemode='w'
|
|
3202
|
+
)
|
|
3203
|
+
logging.info("Parishad CLI starting...")
|
|
3204
|
+
|
|
3205
|
+
# Windows-specific terminal fixes
|
|
3206
|
+
if platform.system() == "Windows":
|
|
3207
|
+
# Enable ANSI escape sequences on Windows
|
|
3208
|
+
try:
|
|
3209
|
+
import ctypes
|
|
3210
|
+
kernel32 = ctypes.windll.kernel32
|
|
3211
|
+
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
|
|
3212
|
+
except Exception:
|
|
3213
|
+
pass
|
|
3214
|
+
|
|
3215
|
+
# Force UTF-8 encoding for Windows console
|
|
3216
|
+
if sys.stdout.encoding != 'utf-8':
|
|
3217
|
+
try:
|
|
3218
|
+
if hasattr(sys.stdout, 'reconfigure'):
|
|
3219
|
+
sys.stdout.reconfigure(encoding='utf-8', errors='replace')
|
|
3220
|
+
if hasattr(sys.stderr, 'reconfigure'):
|
|
3221
|
+
sys.stderr.reconfigure(encoding='utf-8', errors='replace')
|
|
3222
|
+
except Exception:
|
|
3223
|
+
pass
|
|
3224
|
+
|
|
3225
|
+
# If mode is specified, convert to sabha
|
|
3226
|
+
if mode and not sabha:
|
|
3227
|
+
mode_to_sabha = {
|
|
3228
|
+
"fast": "laghu",
|
|
3229
|
+
"balanced": "madhyam",
|
|
3230
|
+
"thorough": "maha"
|
|
3231
|
+
}
|
|
3232
|
+
sabha = mode_to_sabha.get(mode, "laghu")
|
|
3233
|
+
|
|
3234
|
+
working_dir = Path(cwd).resolve() if cwd else Path.cwd()
|
|
3235
|
+
|
|
3236
|
+
# Pass sabha directly to constructor (app.config is ParishadConfig dataclass, not dict)
|
|
3237
|
+
app = ParishadApp(model=model, sabha=sabha, cwd=str(working_dir))
|
|
3238
|
+
|
|
3239
|
+
# Run with inline driver for better Windows compatibility
|
|
3240
|
+
try:
|
|
3241
|
+
app.run()
|
|
3242
|
+
except KeyboardInterrupt:
|
|
3243
|
+
# Clean exit on Ctrl+C
|
|
3244
|
+
pass
|
|
3245
|
+
except Exception as e:
|
|
3246
|
+
# Show error and exit cleanly
|
|
3247
|
+
print(f"\nError: {e}")
|
|
3248
|
+
import traceback
|
|
3249
|
+
traceback.print_exc()
|
|
3250
|
+
sys.exit(1)
|
|
3251
|
+
|
|
3252
|
+
|
|
3253
|
+
if __name__ == "__main__":
|
|
3254
|
+
run_code_cli()
|