parishad 0.1.7__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {parishad-0.1.7 → parishad-0.1.8}/.gitignore +3 -0
- {parishad-0.1.7 → parishad-0.1.8}/CHANGELOG.md +7 -0
- {parishad-0.1.7 → parishad-0.1.8}/PKG-INFO +1 -1
- parishad-0.1.8/docs/TUI_FREEZE_WINDOWS.md +143 -0
- {parishad-0.1.7 → parishad-0.1.8}/pyproject.toml +1 -1
- parishad-0.1.8/src/main.py +1 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/cli/code.py +604 -4
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/orchestrator/engine.py +68 -16
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/sainik.py +6 -2
- {parishad-0.1.7 → parishad-0.1.8}/.github/workflows/publish.yml +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/CODE_OF_CONDUCT.md +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/CONTRIBUTING.md +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/LICENSE +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/README.md +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/SECURITY.md +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/docs/assets/logo.jpeg +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/docs/assets/logo.svg +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/__main__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/checker/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/checker/deterministic.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/checker/ensemble.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/checker/retrieval.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/cli/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/cli/main.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/cli/prarambh.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/cli/sthapana.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/config/modes.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/config/pipeline.core.yaml +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/config/pipeline.extended.yaml +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/config/pipeline.fast.yaml +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/config/user_config.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/data/catalog.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/data/models.json +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/memory/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/base.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/huggingface.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/llama_cpp.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/mlx_lm.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/ollama.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/openai_api.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/backends/transformers_hf.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/costs.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/downloader.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/optimizations.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/profiles.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/reliability.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/runner.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/models/tokenization.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/orchestrator/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/orchestrator/config_loader.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/orchestrator/exceptions.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/base.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/dandadhyaksha.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/darbari.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/majumdar.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/pantapradhan.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/prerak.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/raja.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/sacheev.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/sar_senapati.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/roles/vidushak.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/base.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/fs.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/perception.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/retrieval.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/tools/shell.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/__init__.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/hardware.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/installer.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/logging.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/scanner.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/text.py +0 -0
- {parishad-0.1.7 → parishad-0.1.8}/src/parishad/utils/tracing.py +0 -0
|
@@ -5,6 +5,13 @@ All notable changes to the **Parishad** project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.1.8] - 2026-02-04
|
|
9
|
+
|
|
10
|
+
### Fixed
|
|
11
|
+
- **TUI Freeze on Windows**: Resolved GIL blocking issue that caused the TUI to freeze during llama-cpp inference by moving model execution to a separate subprocess.
|
|
12
|
+
- **File Spam Prevention**: Added smart filtering to prevent unwanted file creation during general queries and math problems. Files are now only created when explicitly requested.
|
|
13
|
+
- **Output Persistence**: Implemented automatic `output.json` updates with final answers for every query execution.
|
|
14
|
+
|
|
8
15
|
## [0.1.7] - 2026-01-26
|
|
9
16
|
|
|
10
17
|
### Fixed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: parishad
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
|
|
5
5
|
Project-URL: Homepage, https://github.com/parishad-council/parishad
|
|
6
6
|
Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# TUI Freeze Issue on Windows - Technical Documentation
|
|
2
|
+
|
|
3
|
+
> **Status: RESOLVED** ✅
|
|
4
|
+
> **Solution: Subprocess-based inference isolation**
|
|
5
|
+
|
|
6
|
+
## Problem Summary
|
|
7
|
+
|
|
8
|
+
The Parishad TUI froze on Windows during LLM inference when using `llama-cpp-python`. The UI would become completely unresponsive even though model inference completed successfully in the background.
|
|
9
|
+
|
|
10
|
+
### Symptoms
|
|
11
|
+
- TUI freezes immediately after submitting a query
|
|
12
|
+
- Model inference completes (GPU usage spikes then drops)
|
|
13
|
+
- Output files are generated correctly
|
|
14
|
+
- TUI remains frozen until force-quit
|
|
15
|
+
- **Works fine on macOS, only affects Windows**
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Root Cause
|
|
20
|
+
|
|
21
|
+
### Python's Global Interpreter Lock (GIL)
|
|
22
|
+
|
|
23
|
+
The `llama-cpp-python` library holds Python's GIL during C-level inference operations. This blocks **all Python threads**, including Textual's event loop.
|
|
24
|
+
|
|
25
|
+
Even when inference runs in a separate thread:
|
|
26
|
+
1. Main thread (Textual TUI) waits for timer callbacks
|
|
27
|
+
2. Worker thread (llama-cpp) acquires GIL for C extension
|
|
28
|
+
3. C extension doesn't release GIL during computation
|
|
29
|
+
4. Main thread cannot execute any Python code
|
|
30
|
+
5. **TUI freezes completely**
|
|
31
|
+
|
|
32
|
+
### Proof
|
|
33
|
+
|
|
34
|
+
Debug logging showed a 31-second gap where the TUI's timer stopped firing entirely:
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
[16:04:33.134] POLL: Timer fired... ← LAST TIMER CALLBACK
|
|
38
|
+
|
|
39
|
+
⚠️ 31 SECONDS - NO TIMER CALLBACKS ⚠️
|
|
40
|
+
|
|
41
|
+
[16:05:04.329] === INFERENCE COMPLETE ===
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
This proves the GIL was held continuously during inference.
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Solution: Process Isolation
|
|
49
|
+
|
|
50
|
+
Since threads share the GIL, the only solution is **true process isolation** using `subprocess.Popen`.
|
|
51
|
+
|
|
52
|
+
### Architecture
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
┌─────────────────────────┐ FILE IPC ┌─────────────────────────┐
|
|
56
|
+
│ MAIN PROCESS │ ←─────────────→ │ SUBPROCESS │
|
|
57
|
+
│ (TUI - Textual) │ │ (Inference) │
|
|
58
|
+
│ │ │ │
|
|
59
|
+
│ • Timers work ✓ │ query.txt → │ • Loads Parishad │
|
|
60
|
+
│ • UI responsive ✓ │ ← result.json │ • Runs council.run() │
|
|
61
|
+
│ • Polls every 500ms │ │ • Saves JSON result │
|
|
62
|
+
│ │ │ │
|
|
63
|
+
│ OWN GIL (free) │ │ OWN GIL (busy) │
|
|
64
|
+
└─────────────────────────┘ └─────────────────────────┘
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Implementation
|
|
68
|
+
|
|
69
|
+
1. Save query to `~/.parishad/temp_query.txt`
|
|
70
|
+
2. Generate Python script that imports Parishad and runs inference
|
|
71
|
+
3. Launch subprocess with `subprocess.Popen()` (hidden window on Windows)
|
|
72
|
+
4. Poll for `temp_result.json` every 500ms
|
|
73
|
+
5. Display result when file appears
|
|
74
|
+
|
|
75
|
+
### Key Code
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
# Launch subprocess - SEPARATE PROCESS = SEPARATE GIL
|
|
79
|
+
self._subprocess = subprocess.Popen(
|
|
80
|
+
[python_exe, str(script_file)],
|
|
81
|
+
stdout=subprocess.PIPE,
|
|
82
|
+
stderr=subprocess.PIPE,
|
|
83
|
+
startupinfo=startupinfo, # Hidden window on Windows
|
|
84
|
+
cwd=str(self.cwd),
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Poll for result (TUI stays responsive!)
|
|
88
|
+
def poll_subprocess_result():
|
|
89
|
+
if result_file.exists():
|
|
90
|
+
result = json.loads(result_file.read_text())
|
|
91
|
+
display_result(result)
|
|
92
|
+
else:
|
|
93
|
+
self.set_timer(0.5, poll_subprocess_result)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Tradeoffs
|
|
99
|
+
|
|
100
|
+
| Aspect | Before (Threading) | After (Subprocess) |
|
|
101
|
+
|--------|-------------------|-------------------|
|
|
102
|
+
| TUI responsiveness | **FROZEN** | **RESPONSIVE** |
|
|
103
|
+
| GIL blocking | Yes | No |
|
|
104
|
+
| Model reload per query | No | Yes (~10-15s) |
|
|
105
|
+
| Memory usage | Shared | Doubled |
|
|
106
|
+
| IPC complexity | Low | Medium (file-based) |
|
|
107
|
+
|
|
108
|
+
The model reload overhead is acceptable because the TUI now remains usable during inference.
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## Files Modified
|
|
113
|
+
|
|
114
|
+
- `src/parishad/cli/code.py` - Replaced threading with subprocess-based inference
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
## Testing
|
|
119
|
+
|
|
120
|
+
1. Run `parishad code`
|
|
121
|
+
2. Submit any query
|
|
122
|
+
3. Verify TUI remains responsive (can scroll, etc.)
|
|
123
|
+
4. Result appears after inference completes
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## Related Issues
|
|
128
|
+
|
|
129
|
+
This is a known pattern in LLM TUI applications:
|
|
130
|
+
|
|
131
|
+
- `anomalyco/opencode` - Issues #9269, #8229
|
|
132
|
+
- `ggerganov/llama.cpp` - Issues #3135, #1793
|
|
133
|
+
- `Textualize/textual` - Issues #2167, #4552
|
|
134
|
+
|
|
135
|
+
All report similar symptoms: TUI freezes during LLM inference on Windows due to GIL contention.
|
|
136
|
+
|
|
137
|
+
---
|
|
138
|
+
|
|
139
|
+
## Future Improvements
|
|
140
|
+
|
|
141
|
+
1. **Persistent subprocess** - Keep model loaded between queries
|
|
142
|
+
2. **Named pipes** - Faster IPC than file polling
|
|
143
|
+
3. **Model caching** - Reduce reload time
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "parishad"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.8"
|
|
8
8
|
description = "A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
def add_numbers(a, b):
|
|
@@ -16,6 +16,8 @@ import sys
|
|
|
16
16
|
import subprocess
|
|
17
17
|
import socket
|
|
18
18
|
import shutil
|
|
19
|
+
import threading
|
|
20
|
+
import queue
|
|
19
21
|
from pathlib import Path
|
|
20
22
|
from typing import Optional, Dict, List, Tuple
|
|
21
23
|
|
|
@@ -2187,6 +2189,40 @@ class ParishadApp(App):
|
|
|
2187
2189
|
"""Message to open setup screen from worker thread."""
|
|
2188
2190
|
pass
|
|
2189
2191
|
|
|
2192
|
+
# Custom messages for non-blocking thread worker communication
|
|
2193
|
+
class LogMessage(Message):
|
|
2194
|
+
"""Non-blocking log message from worker thread."""
|
|
2195
|
+
def __init__(self, text: str) -> None:
|
|
2196
|
+
self.text = text
|
|
2197
|
+
super().__init__()
|
|
2198
|
+
|
|
2199
|
+
class SabhaResultReady(Message):
|
|
2200
|
+
"""Non-blocking message when Sabha result is ready."""
|
|
2201
|
+
def __init__(self, trace) -> None:
|
|
2202
|
+
self.trace = trace
|
|
2203
|
+
super().__init__()
|
|
2204
|
+
|
|
2205
|
+
class SabhaError(Message):
|
|
2206
|
+
"""Non-blocking message when Sabha encounters an error."""
|
|
2207
|
+
def __init__(self, error: Exception, traceback_str: str) -> None:
|
|
2208
|
+
self.error = error
|
|
2209
|
+
self.traceback_str = traceback_str
|
|
2210
|
+
super().__init__()
|
|
2211
|
+
|
|
2212
|
+
class CouncilReady(Message):
|
|
2213
|
+
"""Non-blocking message when council initialization completes."""
|
|
2214
|
+
def __init__(self, success: bool, profile: str = "", error_msg: str = "") -> None:
|
|
2215
|
+
self.success = success
|
|
2216
|
+
self.profile = profile
|
|
2217
|
+
self.error_msg = error_msg
|
|
2218
|
+
super().__init__()
|
|
2219
|
+
|
|
2220
|
+
class WorkerComplete(Message):
|
|
2221
|
+
"""Non-blocking message when any worker completes."""
|
|
2222
|
+
def __init__(self, worker_type: str) -> None:
|
|
2223
|
+
self.worker_type = worker_type
|
|
2224
|
+
super().__init__()
|
|
2225
|
+
|
|
2190
2226
|
CSS = CSS
|
|
2191
2227
|
SCREENS = {"setup": SetupScreen}
|
|
2192
2228
|
BINDINGS = [
|
|
@@ -2204,6 +2240,12 @@ class ParishadApp(App):
|
|
|
2204
2240
|
self._initializing = False # Prevent concurrent initialization
|
|
2205
2241
|
self._processing_query = False # Prevent concurrent query processing
|
|
2206
2242
|
|
|
2243
|
+
# CRITICAL FOR WINDOWS: Thread-safe result queue for native threading
|
|
2244
|
+
# This bypasses Textual's worker system which causes freezes on Windows
|
|
2245
|
+
self._result_queue = queue.Queue()
|
|
2246
|
+
self._worker_thread = None
|
|
2247
|
+
self._subprocess = None # For subprocess-based inference
|
|
2248
|
+
|
|
2207
2249
|
# Load config from disk
|
|
2208
2250
|
self.config = load_parishad_config()
|
|
2209
2251
|
|
|
@@ -2306,8 +2348,9 @@ class ParishadApp(App):
|
|
|
2306
2348
|
if self._initializing:
|
|
2307
2349
|
return
|
|
2308
2350
|
|
|
2309
|
-
# Run model loading
|
|
2310
|
-
|
|
2351
|
+
# Run model loading in a thread worker to avoid freezing UI on Windows
|
|
2352
|
+
# CRITICAL: Using thread=True ensures blocking model loading doesn't freeze the TUI
|
|
2353
|
+
self.run_worker(self._initialize_council_thread_worker, thread=True, exclusive=True)
|
|
2311
2354
|
|
|
2312
2355
|
async def _async_initialize_council(self) -> None:
|
|
2313
2356
|
"""Async worker to initialize Sabha council without blocking UI."""
|
|
@@ -2442,6 +2485,117 @@ class ParishadApp(App):
|
|
|
2442
2485
|
finally:
|
|
2443
2486
|
self._initializing = False
|
|
2444
2487
|
|
|
2488
|
+
def _initialize_council_thread_worker(self) -> None:
|
|
2489
|
+
"""
|
|
2490
|
+
Initialize Sabha council in a dedicated thread worker.
|
|
2491
|
+
|
|
2492
|
+
CRITICAL FOR WINDOWS: This method runs in a real OS thread (not an asyncio executor)
|
|
2493
|
+
which prevents the TUI from freezing during blocking model loading.
|
|
2494
|
+
|
|
2495
|
+
UI updates are sent via non-blocking post_message to prevent deadlock.
|
|
2496
|
+
"""
|
|
2497
|
+
if self._initializing:
|
|
2498
|
+
self.post_message(self.LogMessage("[yellow]Already initializing...[/yellow]\n"))
|
|
2499
|
+
return
|
|
2500
|
+
|
|
2501
|
+
self._initializing = True
|
|
2502
|
+
|
|
2503
|
+
try:
|
|
2504
|
+
from parishad.orchestrator.engine import Parishad
|
|
2505
|
+
from parishad.config.user_config import load_user_config
|
|
2506
|
+
|
|
2507
|
+
self.post_message(self.LogMessage("[cyan]🔄 Initializing Sabha council...[/cyan]\n"))
|
|
2508
|
+
|
|
2509
|
+
# Load user config for profile (same as CLI run does)
|
|
2510
|
+
user_cfg = load_user_config()
|
|
2511
|
+
profile = user_cfg.default_profile
|
|
2512
|
+
mode = user_cfg.default_mode
|
|
2513
|
+
|
|
2514
|
+
self.post_message(self.LogMessage(f"[dim] • Profile: {profile}[/dim]\n"))
|
|
2515
|
+
self.post_message(self.LogMessage(f"[dim] • Mode: {mode}[/dim]\n"))
|
|
2516
|
+
|
|
2517
|
+
# Get pipeline config from Sabha selection
|
|
2518
|
+
if self.config:
|
|
2519
|
+
config_name = self.config.get_pipeline_config()
|
|
2520
|
+
self.post_message(self.LogMessage(f"[dim] • Pipeline: {config_name}[/dim]\n"))
|
|
2521
|
+
else:
|
|
2522
|
+
config_name = "core" # Default fallback
|
|
2523
|
+
self.post_message(self.LogMessage(f"[dim] • Pipeline: {config_name} (default)[/dim]\n"))
|
|
2524
|
+
|
|
2525
|
+
self.post_message(self.LogMessage(f"[yellow]⏳ Loading models (this may take 30-60 seconds)...[/yellow]\n"))
|
|
2526
|
+
self.post_message(self.LogMessage(f"[dim] • Creating Parishad engine...[/dim]\n"))
|
|
2527
|
+
|
|
2528
|
+
# Build user_forced_config from model_map
|
|
2529
|
+
user_forced_config = {}
|
|
2530
|
+
if self.config and self.config.model_map:
|
|
2531
|
+
# Initialize manager to resolve paths
|
|
2532
|
+
from parishad.models.downloader import ModelManager
|
|
2533
|
+
model_manager = ModelManager()
|
|
2534
|
+
|
|
2535
|
+
msg_backend = self.config.backend or "ollama"
|
|
2536
|
+
|
|
2537
|
+
for slot, model_id in self.config.model_map.items():
|
|
2538
|
+
# Default to current config backend
|
|
2539
|
+
current_backend = msg_backend
|
|
2540
|
+
model_file = None
|
|
2541
|
+
|
|
2542
|
+
# Check if it's a known model to resolve backend/path
|
|
2543
|
+
model_info = model_manager.registry.get(model_id)
|
|
2544
|
+
if model_info:
|
|
2545
|
+
# Handle Enum comparison correctly
|
|
2546
|
+
source = model_info.source.value if hasattr(model_info.source, "value") else str(model_info.source)
|
|
2547
|
+
|
|
2548
|
+
if source == "local":
|
|
2549
|
+
current_backend = "llama_cpp"
|
|
2550
|
+
model_file = str(model_info.path)
|
|
2551
|
+
elif source == "ollama":
|
|
2552
|
+
current_backend = "ollama"
|
|
2553
|
+
elif source == "mlx":
|
|
2554
|
+
current_backend = "mlx"
|
|
2555
|
+
else:
|
|
2556
|
+
# Fallback heuristics if not in registry
|
|
2557
|
+
if model_id.startswith("local:"):
|
|
2558
|
+
current_backend = "llama_cpp"
|
|
2559
|
+
elif model_id.startswith("ollama:") or ":" in model_id:
|
|
2560
|
+
current_backend = "ollama"
|
|
2561
|
+
|
|
2562
|
+
user_forced_config[slot] = {
|
|
2563
|
+
"model_id": model_id,
|
|
2564
|
+
"backend_type": current_backend
|
|
2565
|
+
}
|
|
2566
|
+
if model_file:
|
|
2567
|
+
user_forced_config[slot]["model_file"] = model_file
|
|
2568
|
+
|
|
2569
|
+
# Create the Parishad engine (blocking call in this thread)
|
|
2570
|
+
self.council = Parishad(
|
|
2571
|
+
config=config_name,
|
|
2572
|
+
model_config_path=None, # Let engine use profiles + models.yaml
|
|
2573
|
+
profile=profile,
|
|
2574
|
+
pipeline_config_path=None,
|
|
2575
|
+
trace_dir=None,
|
|
2576
|
+
mock=False,
|
|
2577
|
+
stub=False,
|
|
2578
|
+
mode=mode,
|
|
2579
|
+
user_forced_config=user_forced_config or None,
|
|
2580
|
+
no_retry=False,
|
|
2581
|
+
)
|
|
2582
|
+
|
|
2583
|
+
if self.council:
|
|
2584
|
+
self.post_message(self.CouncilReady(success=True, profile=profile))
|
|
2585
|
+
else:
|
|
2586
|
+
self.post_message(self.CouncilReady(success=False, error_msg="Council initialization returned None"))
|
|
2587
|
+
|
|
2588
|
+
except Exception as e:
|
|
2589
|
+
import traceback
|
|
2590
|
+
tb = traceback.format_exc()
|
|
2591
|
+
self.post_message(self.CouncilReady(
|
|
2592
|
+
success=False,
|
|
2593
|
+
error_msg=f"{type(e).__name__}: {e}\n{tb}"
|
|
2594
|
+
))
|
|
2595
|
+
self.council = None
|
|
2596
|
+
finally:
|
|
2597
|
+
self._initializing = False
|
|
2598
|
+
|
|
2445
2599
|
# DEPRECATED: TUI now uses same engine setup as CLI 'parishad run'
|
|
2446
2600
|
# This method is no longer called
|
|
2447
2601
|
# def _create_model_config_from_tui(self):
|
|
@@ -2626,8 +2780,145 @@ class ParishadApp(App):
|
|
|
2626
2780
|
self.log_message("[yellow]⚠ Already processing a query, please wait...[/yellow]")
|
|
2627
2781
|
return
|
|
2628
2782
|
|
|
2629
|
-
#
|
|
2630
|
-
|
|
2783
|
+
# CRITICAL FOR WINDOWS: The GIL blocks Textual's event loop during llama-cpp inference
|
|
2784
|
+
# even when using threads. We use subprocess.Popen to spawn a SEPARATE Python process.
|
|
2785
|
+
# This is the only way to keep the TUI responsive on Windows with llama-cpp-python.
|
|
2786
|
+
# See: docs/TUI_FREEZE_WINDOWS.md for full technical explanation.
|
|
2787
|
+
self._processing_query = True
|
|
2788
|
+
|
|
2789
|
+
# Save query to temp file for subprocess to read
|
|
2790
|
+
query_file = Path.home() / ".parishad" / "temp_query.txt"
|
|
2791
|
+
result_file = Path.home() / ".parishad" / "temp_result.json"
|
|
2792
|
+
status_file = Path.home() / ".parishad" / "temp_status.txt"
|
|
2793
|
+
|
|
2794
|
+
# Clean up old files
|
|
2795
|
+
for f in [result_file, status_file]:
|
|
2796
|
+
if f.exists():
|
|
2797
|
+
f.unlink()
|
|
2798
|
+
|
|
2799
|
+
query_file.write_text(final_prompt, encoding="utf-8")
|
|
2800
|
+
|
|
2801
|
+
# Get the Python executable path
|
|
2802
|
+
python_exe = sys.executable
|
|
2803
|
+
|
|
2804
|
+
# Build inline script that runs inference and saves result
|
|
2805
|
+
inline_script = f'''
|
|
2806
|
+
import sys
|
|
2807
|
+
import json
|
|
2808
|
+
from pathlib import Path
|
|
2809
|
+
|
|
2810
|
+
query_file = Path(r"{query_file}")
|
|
2811
|
+
result_file = Path(r"{result_file}")
|
|
2812
|
+
status_file = Path(r"{status_file}")
|
|
2813
|
+
|
|
2814
|
+
try:
|
|
2815
|
+
status_file.write_text("starting", encoding="utf-8")
|
|
2816
|
+
|
|
2817
|
+
query = query_file.read_text(encoding="utf-8")
|
|
2818
|
+
|
|
2819
|
+
status_file.write_text("loading", encoding="utf-8")
|
|
2820
|
+
|
|
2821
|
+
# Import and run inference
|
|
2822
|
+
from parishad.orchestrator.engine import Parishad
|
|
2823
|
+
from parishad.config.user_config import load_user_config
|
|
2824
|
+
|
|
2825
|
+
user_cfg = load_user_config()
|
|
2826
|
+
|
|
2827
|
+
council = Parishad(
|
|
2828
|
+
config="core",
|
|
2829
|
+
profile=user_cfg.default_profile,
|
|
2830
|
+
mode=user_cfg.default_mode,
|
|
2831
|
+
)
|
|
2832
|
+
|
|
2833
|
+
status_file.write_text("running", encoding="utf-8")
|
|
2834
|
+
|
|
2835
|
+
trace = council.run(query)
|
|
2836
|
+
|
|
2837
|
+
status_file.write_text("complete", encoding="utf-8")
|
|
2838
|
+
|
|
2839
|
+
# Save result as JSON
|
|
2840
|
+
result = {{
|
|
2841
|
+
"success": True,
|
|
2842
|
+
"roles": len(trace.roles),
|
|
2843
|
+
"tokens": trace.total_tokens,
|
|
2844
|
+
"final_answer": trace.final_answer.final_answer if trace.final_answer else None,
|
|
2845
|
+
"error": trace.error,
|
|
2846
|
+
}}
|
|
2847
|
+
result_file.write_text(json.dumps(result, indent=2), encoding="utf-8")
|
|
2848
|
+
|
|
2849
|
+
except Exception as e:
|
|
2850
|
+
import traceback
|
|
2851
|
+
result = {{
|
|
2852
|
+
"success": False,
|
|
2853
|
+
"error": str(e),
|
|
2854
|
+
"traceback": traceback.format_exc()
|
|
2855
|
+
}}
|
|
2856
|
+
result_file.write_text(json.dumps(result, indent=2), encoding="utf-8")
|
|
2857
|
+
status_file.write_text("error", encoding="utf-8")
|
|
2858
|
+
'''
|
|
2859
|
+
|
|
2860
|
+
# Write script to temp file
|
|
2861
|
+
script_file = Path.home() / ".parishad" / "temp_inference_script.py"
|
|
2862
|
+
script_file.write_text(inline_script, encoding="utf-8")
|
|
2863
|
+
|
|
2864
|
+
# Launch subprocess - runs in a COMPLETELY SEPARATE PROCESS (no GIL sharing!)
|
|
2865
|
+
# Use CREATE_NO_WINDOW on Windows to avoid console popup
|
|
2866
|
+
startupinfo = None
|
|
2867
|
+
if sys.platform == "win32":
|
|
2868
|
+
startupinfo = subprocess.STARTUPINFO()
|
|
2869
|
+
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
|
2870
|
+
startupinfo.wShowWindow = 0 # SW_HIDE
|
|
2871
|
+
|
|
2872
|
+
self._subprocess = subprocess.Popen(
|
|
2873
|
+
[python_exe, str(script_file)],
|
|
2874
|
+
stdout=subprocess.PIPE,
|
|
2875
|
+
stderr=subprocess.PIPE,
|
|
2876
|
+
startupinfo=startupinfo,
|
|
2877
|
+
cwd=str(self.cwd),
|
|
2878
|
+
)
|
|
2879
|
+
|
|
2880
|
+
# Poll for result file
|
|
2881
|
+
def poll_subprocess_result():
|
|
2882
|
+
# Check if result file exists (inference complete)
|
|
2883
|
+
if result_file.exists():
|
|
2884
|
+
try:
|
|
2885
|
+
result = json.loads(result_file.read_text(encoding="utf-8"))
|
|
2886
|
+
|
|
2887
|
+
if result.get("success"):
|
|
2888
|
+
# Display the result
|
|
2889
|
+
self.log_message(f"\n[dim]━━━ Sabha Activity ({result.get('roles')} roles, {result.get('tokens')} tokens) ━━━[/dim]")
|
|
2890
|
+
|
|
2891
|
+
if result.get("final_answer"):
|
|
2892
|
+
self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{result['final_answer']}\n")
|
|
2893
|
+
elif result.get("error"):
|
|
2894
|
+
self.log_message(f"\n[red]Error: {result['error']}[/red]")
|
|
2895
|
+
else:
|
|
2896
|
+
self.log_message("\n[green]Query completed successfully![/green]")
|
|
2897
|
+
else:
|
|
2898
|
+
self.log_message(f"\n[red]Error: {result.get('error')}[/red]\n[dim]{result.get('traceback', '')[:500]}...[/dim]")
|
|
2899
|
+
|
|
2900
|
+
# Cleanup temp files
|
|
2901
|
+
for f in [result_file, status_file, script_file]:
|
|
2902
|
+
try:
|
|
2903
|
+
f.unlink()
|
|
2904
|
+
except:
|
|
2905
|
+
pass
|
|
2906
|
+
|
|
2907
|
+
self._processing_query = False
|
|
2908
|
+
try:
|
|
2909
|
+
self.query_one("#prompt-input", Input).focus()
|
|
2910
|
+
except:
|
|
2911
|
+
pass
|
|
2912
|
+
|
|
2913
|
+
except Exception:
|
|
2914
|
+
self._processing_query = False
|
|
2915
|
+
else:
|
|
2916
|
+
# Keep polling until result is ready
|
|
2917
|
+
if self._processing_query:
|
|
2918
|
+
self.set_timer(0.5, poll_subprocess_result)
|
|
2919
|
+
|
|
2920
|
+
# Start polling for result
|
|
2921
|
+
self.set_timer(0.5, poll_subprocess_result)
|
|
2631
2922
|
|
|
2632
2923
|
async def _async_run_sabha(self, query: str, progress: RoleProgressBar) -> None:
|
|
2633
2924
|
"""Execute Sabha council asynchronously to prevent UI freezing."""
|
|
@@ -2740,6 +3031,315 @@ class ParishadApp(App):
|
|
|
2740
3031
|
except:
|
|
2741
3032
|
pass
|
|
2742
3033
|
|
|
3034
|
+
def _native_sabha_worker(self, query: str) -> None:
|
|
3035
|
+
"""
|
|
3036
|
+
Native Python thread worker for Sabha execution.
|
|
3037
|
+
|
|
3038
|
+
CRITICAL FOR WINDOWS: This uses a regular Python thread and a thread-safe
|
|
3039
|
+
queue instead of Textual's worker system which causes freezes on Windows.
|
|
3040
|
+
"""
|
|
3041
|
+
debug_log(">>> WORKER THREAD STARTED <<<")
|
|
3042
|
+
debug_log(f"Worker thread ID: {threading.current_thread().ident}")
|
|
3043
|
+
debug_log(f"Query to process: {query[:100]}...")
|
|
3044
|
+
|
|
3045
|
+
try:
|
|
3046
|
+
# Run the blocking inference in this native thread
|
|
3047
|
+
debug_log("Calling self.council.run()... (this will block)")
|
|
3048
|
+
debug_log("=== INFERENCE START ===")
|
|
3049
|
+
|
|
3050
|
+
trace = self.council.run(query)
|
|
3051
|
+
|
|
3052
|
+
debug_log("=== INFERENCE COMPLETE ===")
|
|
3053
|
+
debug_log(f"Trace received: {trace is not None}")
|
|
3054
|
+
if trace:
|
|
3055
|
+
debug_log(f"Trace roles: {len(trace.roles)}, tokens: {trace.total_tokens}")
|
|
3056
|
+
|
|
3057
|
+
# Put result in queue (thread-safe, non-blocking)
|
|
3058
|
+
debug_log("Putting result in queue...")
|
|
3059
|
+
self._result_queue.put(("success", trace))
|
|
3060
|
+
debug_log("Result queued successfully!")
|
|
3061
|
+
|
|
3062
|
+
except Exception as e:
|
|
3063
|
+
import traceback
|
|
3064
|
+
tb = traceback.format_exc()
|
|
3065
|
+
debug_log(f"!!! WORKER ERROR: {type(e).__name__}: {e}")
|
|
3066
|
+
debug_log(f"Traceback: {tb[:500]}")
|
|
3067
|
+
# Put error in queue
|
|
3068
|
+
self._result_queue.put(("error", (e, tb)))
|
|
3069
|
+
debug_log("Error queued.")
|
|
3070
|
+
|
|
3071
|
+
debug_log(">>> WORKER THREAD EXITING <<<")
|
|
3072
|
+
|
|
3073
|
+
def _poll_result_queue(self) -> None:
|
|
3074
|
+
"""
|
|
3075
|
+
Timer callback to poll the result queue for Sabha results.
|
|
3076
|
+
|
|
3077
|
+
This is called by a Textual timer and runs on the main event loop thread,
|
|
3078
|
+
so it's safe to update the UI directly.
|
|
3079
|
+
"""
|
|
3080
|
+
debug_log("POLL: Timer fired, checking queue...")
|
|
3081
|
+
|
|
3082
|
+
try:
|
|
3083
|
+
# Non-blocking check for results
|
|
3084
|
+
result_type, result_data = self._result_queue.get_nowait()
|
|
3085
|
+
|
|
3086
|
+
debug_log(f"POLL: Got result from queue! Type: {result_type}")
|
|
3087
|
+
|
|
3088
|
+
# Process the result on the main thread (safe for UI updates)
|
|
3089
|
+
if result_type == "success":
|
|
3090
|
+
debug_log("POLL: Processing success result, calling _display_sabha_result_direct...")
|
|
3091
|
+
self._display_sabha_result_direct(result_data)
|
|
3092
|
+
debug_log("POLL: Display complete!")
|
|
3093
|
+
else:
|
|
3094
|
+
debug_log("POLL: Processing error result...")
|
|
3095
|
+
error, tb = result_data
|
|
3096
|
+
self.log_message(f"\n[red]Error ({type(error).__name__}): {error}[/red]\n[dim]{tb[:500]}...[/dim]")
|
|
3097
|
+
|
|
3098
|
+
# Clean up
|
|
3099
|
+
debug_log("POLL: Cleaning up, setting _processing_query = False")
|
|
3100
|
+
self._processing_query = False
|
|
3101
|
+
try:
|
|
3102
|
+
self.query_one("#prompt-input", Input).focus()
|
|
3103
|
+
debug_log("POLL: Input refocused!")
|
|
3104
|
+
except Exception:
|
|
3105
|
+
debug_log("POLL: Could not refocus input")
|
|
3106
|
+
|
|
3107
|
+
debug_log("=== QUERY EXECUTION COMPLETE ===")
|
|
3108
|
+
|
|
3109
|
+
except queue.Empty:
|
|
3110
|
+
# No result yet, keep polling
|
|
3111
|
+
if self._processing_query:
|
|
3112
|
+
# Don't log every tick to avoid log spam, just every 10th
|
|
3113
|
+
self.set_timer(0.1, self._poll_result_queue)
|
|
3114
|
+
|
|
3115
|
+
def _display_sabha_result_direct(self, trace) -> None:
|
|
3116
|
+
"""Display Sabha result directly (called from main thread via poll timer)."""
|
|
3117
|
+
# Update progress bar based on trace
|
|
3118
|
+
try:
|
|
3119
|
+
progress = self.query_one("#role-progress", RoleProgressBar)
|
|
3120
|
+
for role_output in trace.roles:
|
|
3121
|
+
role_name = role_output.role.lower()
|
|
3122
|
+
progress.mark_complete(role_name)
|
|
3123
|
+
except Exception:
|
|
3124
|
+
pass # Progress bar update is non-critical
|
|
3125
|
+
|
|
3126
|
+
# Display role activity summary (collapsible style)
|
|
3127
|
+
self.log_message(f"\n[dim]━━━ Sabha Activity ({len(trace.roles)} roles, {trace.total_tokens} tokens) ━━━[/dim]")
|
|
3128
|
+
|
|
3129
|
+
for role_output in trace.roles:
|
|
3130
|
+
role_name = role_output.role.lower()
|
|
3131
|
+
info = ROLE_INFO.get(role_name, {"emoji": "❓", "name": role_name.title()})
|
|
3132
|
+
status_icon = "[green]✓[/green]" if role_output.status == "success" else "[red]✗[/red]"
|
|
3133
|
+
|
|
3134
|
+
# Brief summary of what the role did
|
|
3135
|
+
summary = ""
|
|
3136
|
+
if role_name == "darbari" and role_output.core_output:
|
|
3137
|
+
task_type = role_output.core_output.get("task_type", "unknown")
|
|
3138
|
+
summary = f"→ Task: {task_type}"
|
|
3139
|
+
elif role_name == "majumdar" and role_output.core_output:
|
|
3140
|
+
steps = role_output.core_output.get("steps", [])
|
|
3141
|
+
summary = f"→ {len(steps)} step plan"
|
|
3142
|
+
elif role_name == "prerak" and role_output.core_output:
|
|
3143
|
+
flags = role_output.core_output.get("flags", [])
|
|
3144
|
+
if not flags:
|
|
3145
|
+
summary = "→ No issues"
|
|
3146
|
+
else:
|
|
3147
|
+
summary = f"→ {len(flags)} issue(s)"
|
|
3148
|
+
elif role_name == "raja" and role_output.core_output:
|
|
3149
|
+
conf = role_output.core_output.get("confidence", 0)
|
|
3150
|
+
summary = f"→ Confidence: {int(conf*100)}%"
|
|
3151
|
+
|
|
3152
|
+
# Show model used
|
|
3153
|
+
model_str = ""
|
|
3154
|
+
if role_output.metadata and role_output.metadata.model_id:
|
|
3155
|
+
mid = role_output.metadata.model_id
|
|
3156
|
+
if "/" in mid:
|
|
3157
|
+
mid = mid.split("/")[-1]
|
|
3158
|
+
if mid.endswith(".gguf"):
|
|
3159
|
+
mid = mid[:-5]
|
|
3160
|
+
model_str = f"[dim]({mid})[/dim]"
|
|
3161
|
+
|
|
3162
|
+
if role_output.status == "error":
|
|
3163
|
+
err_msg = role_output.error or "Unknown error"
|
|
3164
|
+
summary = f"[red]{err_msg}[/red]"
|
|
3165
|
+
|
|
3166
|
+
self.log_message(f" {info['emoji']} {info['name']} {model_str}: {status_icon} {summary}")
|
|
3167
|
+
|
|
3168
|
+
self.log_message(f"[dim]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/dim]\n")
|
|
3169
|
+
|
|
3170
|
+
# Check for file generation
|
|
3171
|
+
for role_output in trace.roles:
|
|
3172
|
+
if role_output.core_output and role_output.core_output.get("target_file"):
|
|
3173
|
+
fpath = role_output.core_output.get("target_file")
|
|
3174
|
+
self.log_message(f"\n[bold blue]📁 File Generated:[/bold blue] {fpath}")
|
|
3175
|
+
|
|
3176
|
+
# Display the final answer from Raja
|
|
3177
|
+
if trace.final_answer:
|
|
3178
|
+
answer = trace.final_answer.final_answer
|
|
3179
|
+
self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{answer}\n")
|
|
3180
|
+
elif trace.error:
|
|
3181
|
+
self.log_message(f"\n[red]Error: {trace.error}[/red]")
|
|
3182
|
+
else:
|
|
3183
|
+
file_gen = any(r.core_output and r.core_output.get("target_file") for r in trace.roles)
|
|
3184
|
+
if not file_gen:
|
|
3185
|
+
self.log_message("\n[yellow]No answer generated[/yellow]")
|
|
3186
|
+
|
|
3187
|
+
def _run_sabha_thread_worker(self, query: str) -> None:
|
|
3188
|
+
"""
|
|
3189
|
+
Execute Sabha council in a dedicated thread worker.
|
|
3190
|
+
|
|
3191
|
+
CRITICAL FOR WINDOWS: This method runs in a real OS thread (not an asyncio executor)
|
|
3192
|
+
which prevents the TUI from freezing during blocking llama-cpp inference.
|
|
3193
|
+
|
|
3194
|
+
UI updates are sent via non-blocking post_message to prevent deadlock.
|
|
3195
|
+
"""
|
|
3196
|
+
if self._processing_query:
|
|
3197
|
+
return
|
|
3198
|
+
|
|
3199
|
+
self._processing_query = True
|
|
3200
|
+
|
|
3201
|
+
try:
|
|
3202
|
+
# Run the blocking inference in this thread
|
|
3203
|
+
# This won't freeze the UI because it's a real thread worker
|
|
3204
|
+
trace = self.council.run(query)
|
|
3205
|
+
|
|
3206
|
+
# Send non-blocking message with result (won't deadlock!)
|
|
3207
|
+
self.post_message(self.SabhaResultReady(trace))
|
|
3208
|
+
|
|
3209
|
+
except Exception as e:
|
|
3210
|
+
import traceback
|
|
3211
|
+
tb = traceback.format_exc()
|
|
3212
|
+
# Send non-blocking error message
|
|
3213
|
+
self.post_message(self.SabhaError(e, tb))
|
|
3214
|
+
finally:
|
|
3215
|
+
self._processing_query = False
|
|
3216
|
+
# Send non-blocking completion message
|
|
3217
|
+
self.post_message(self.WorkerComplete("sabha"))
|
|
3218
|
+
|
|
3219
|
+
def on_parishad_app_log_message(self, message: LogMessage) -> None:
|
|
3220
|
+
"""Handle non-blocking log messages from worker threads."""
|
|
3221
|
+
self.log_message(message.text)
|
|
3222
|
+
|
|
3223
|
+
def on_parishad_app_sabha_result_ready(self, message: SabhaResultReady) -> None:
|
|
3224
|
+
"""Handle Sabha result from worker thread (non-blocking)."""
|
|
3225
|
+
trace = message.trace
|
|
3226
|
+
|
|
3227
|
+
# Update progress bar based on trace
|
|
3228
|
+
try:
|
|
3229
|
+
progress = self.query_one("#role-progress", RoleProgressBar)
|
|
3230
|
+
for role_output in trace.roles:
|
|
3231
|
+
role_name = role_output.role.lower()
|
|
3232
|
+
progress.mark_complete(role_name)
|
|
3233
|
+
except Exception:
|
|
3234
|
+
pass # Progress bar update is non-critical
|
|
3235
|
+
|
|
3236
|
+
# Display role activity summary (collapsible style)
|
|
3237
|
+
self.log_message(f"\n[dim]━━━ Sabha Activity ({len(trace.roles)} roles, {trace.total_tokens} tokens) ━━━[/dim]")
|
|
3238
|
+
|
|
3239
|
+
for role_output in trace.roles:
|
|
3240
|
+
role_name = role_output.role.lower()
|
|
3241
|
+
info = ROLE_INFO.get(role_name, {"emoji": "❓", "name": role_name.title()})
|
|
3242
|
+
status_icon = "[green]✓[/green]" if role_output.status == "success" else "[red]✗[/red]"
|
|
3243
|
+
|
|
3244
|
+
# Brief summary of what the role did
|
|
3245
|
+
summary = ""
|
|
3246
|
+
if role_name == "darbari" and role_output.core_output:
|
|
3247
|
+
task_type = role_output.core_output.get("task_type", "unknown")
|
|
3248
|
+
summary = f"→ Task: {task_type}"
|
|
3249
|
+
elif role_name == "majumdar" and role_output.core_output:
|
|
3250
|
+
steps = role_output.core_output.get("steps", [])
|
|
3251
|
+
summary = f"→ {len(steps)} step plan"
|
|
3252
|
+
elif role_name == "prerak" and role_output.core_output:
|
|
3253
|
+
flags = role_output.core_output.get("flags", [])
|
|
3254
|
+
if not flags:
|
|
3255
|
+
summary = "→ No issues"
|
|
3256
|
+
else:
|
|
3257
|
+
summary = f"→ {len(flags)} issue(s)"
|
|
3258
|
+
elif role_name == "raja" and role_output.core_output:
|
|
3259
|
+
conf = role_output.core_output.get("confidence", 0)
|
|
3260
|
+
summary = f"→ Confidence: {int(conf*100)}%"
|
|
3261
|
+
|
|
3262
|
+
# Show model used
|
|
3263
|
+
model_str = ""
|
|
3264
|
+
if role_output.metadata and role_output.metadata.model_id:
|
|
3265
|
+
mid = role_output.metadata.model_id
|
|
3266
|
+
# Strip path
|
|
3267
|
+
if "/" in mid:
|
|
3268
|
+
mid = mid.split("/")[-1]
|
|
3269
|
+
# Strip extension (optional but cleaner)
|
|
3270
|
+
if mid.endswith(".gguf"):
|
|
3271
|
+
mid = mid[:-5]
|
|
3272
|
+
model_str = f"[dim]({mid})[/dim]"
|
|
3273
|
+
|
|
3274
|
+
if role_output.status == "error":
|
|
3275
|
+
err_msg = role_output.error or "Unknown error"
|
|
3276
|
+
# Show full error
|
|
3277
|
+
summary = f"[red]{err_msg}[/red]"
|
|
3278
|
+
|
|
3279
|
+
self.log_message(f" {info['emoji']} {info['name']} {model_str}: {status_icon} {summary}")
|
|
3280
|
+
|
|
3281
|
+
self.log_message(f"[dim]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/dim]\n")
|
|
3282
|
+
|
|
3283
|
+
# Check for silent file generation (common source of confusion)
|
|
3284
|
+
for role_output in trace.roles:
|
|
3285
|
+
if role_output.core_output and role_output.core_output.get("target_file"):
|
|
3286
|
+
fpath = role_output.core_output.get("target_file")
|
|
3287
|
+
self.log_message(f"\n[bold blue]📁 File Generated:[/bold blue] {fpath}")
|
|
3288
|
+
|
|
3289
|
+
# Display the final answer from Raja
|
|
3290
|
+
if trace.final_answer:
|
|
3291
|
+
answer = trace.final_answer.final_answer
|
|
3292
|
+
self.log_message(f"\n[bold]👑 Raja's Answer:[/bold]\n{answer}\n")
|
|
3293
|
+
elif trace.error:
|
|
3294
|
+
self.log_message(f"\n[red]Error: {trace.error}[/red]")
|
|
3295
|
+
else:
|
|
3296
|
+
# Check if we generated a file but no text answer
|
|
3297
|
+
file_gen = any(r.core_output and r.core_output.get("target_file") for r in trace.roles)
|
|
3298
|
+
if not file_gen:
|
|
3299
|
+
self.log_message("\n[yellow]No answer generated[/yellow]")
|
|
3300
|
+
|
|
3301
|
+
def on_parishad_app_sabha_error(self, message: SabhaError) -> None:
|
|
3302
|
+
"""Handle Sabha error from worker thread (non-blocking)."""
|
|
3303
|
+
self.log_message(f"\n[red]Error ({type(message.error).__name__}): {message.error}[/red]\n[dim]{message.traceback_str[:500]}...[/dim]")
|
|
3304
|
+
|
|
3305
|
+
def on_parishad_app_worker_complete(self, message: WorkerComplete) -> None:
|
|
3306
|
+
"""Handle worker completion - refocus input."""
|
|
3307
|
+
try:
|
|
3308
|
+
self.query_one("#prompt-input", Input).focus()
|
|
3309
|
+
except Exception:
|
|
3310
|
+
pass
|
|
3311
|
+
|
|
3312
|
+
def on_parishad_app_council_ready(self, message: CouncilReady) -> None:
|
|
3313
|
+
"""Handle council initialization completion (non-blocking)."""
|
|
3314
|
+
if message.success:
|
|
3315
|
+
self.log_message(
|
|
3316
|
+
f"[green]✅ Sabha council ready![/green]\n"
|
|
3317
|
+
f"[dim]Models loaded from profile '{message.profile}'[/dim]\n"
|
|
3318
|
+
f"[dim]You can now start asking questions.[/dim]\n"
|
|
3319
|
+
)
|
|
3320
|
+
else:
|
|
3321
|
+
self.log_message(
|
|
3322
|
+
f"[red]✗ Error loading Sabha council:[/red]\n"
|
|
3323
|
+
f"[dim]{message.error_msg}[/dim]\n"
|
|
3324
|
+
)
|
|
3325
|
+
|
|
3326
|
+
# Keep these for backward compatibility but they are no longer used for thread workers
|
|
3327
|
+
def _display_sabha_result(self, trace) -> None:
|
|
3328
|
+
"""Display Sabha result on main thread (DEPRECATED - use message handlers now)."""
|
|
3329
|
+
# Delegate to message handler
|
|
3330
|
+
self.on_parishad_app_sabha_result_ready(self.SabhaResultReady(trace))
|
|
3331
|
+
|
|
3332
|
+
def _display_sabha_error(self, error: Exception, tb: str) -> None:
|
|
3333
|
+
"""Display Sabha error on main thread (DEPRECATED - use message handlers now)."""
|
|
3334
|
+
self.on_parishad_app_sabha_error(self.SabhaError(error, tb))
|
|
3335
|
+
|
|
3336
|
+
def _refocus_input(self) -> None:
|
|
3337
|
+
"""Refocus input widget after query completion (DEPRECATED - use message handlers now)."""
|
|
3338
|
+
try:
|
|
3339
|
+
self.query_one("#prompt-input", Input).focus()
|
|
3340
|
+
except Exception:
|
|
3341
|
+
pass
|
|
3342
|
+
|
|
2743
3343
|
def handle_command(self, parsed: ParsedInput) -> None:
|
|
2744
3344
|
"""Handle slash commands with ParsedInput."""
|
|
2745
3345
|
cmd = parsed.command_name
|
|
@@ -656,23 +656,47 @@ class ParishadEngine:
|
|
|
656
656
|
content = output.core_output.get("content")
|
|
657
657
|
|
|
658
658
|
if target_file and content:
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
659
|
+
# Smart filter: Only write files if user explicitly requested it
|
|
660
|
+
# Check if the query contains file-related keywords
|
|
661
|
+
query_lower = ctx.user_query.lower()
|
|
662
|
+
file_keywords = [
|
|
663
|
+
'create', 'write', 'save', 'generate', 'make', 'update', 'modify',
|
|
664
|
+
'file', 'script', '.py', '.txt', '.md', '.json', '.yaml', '.yml',
|
|
665
|
+
'to file', 'in file', 'save to', 'write to'
|
|
666
|
+
]
|
|
667
|
+
|
|
668
|
+
# Check if any file keyword is in the query
|
|
669
|
+
should_write_file = any(keyword in query_lower for keyword in file_keywords)
|
|
670
|
+
|
|
671
|
+
# Also check for specific file path mentions (e.g., "src/main.py")
|
|
672
|
+
import re
|
|
673
|
+
file_path_pattern = r'\b[\w/\\]+\.\w+\b'
|
|
674
|
+
if re.search(file_path_pattern, ctx.user_query):
|
|
675
|
+
should_write_file = True
|
|
676
|
+
|
|
677
|
+
if should_write_file:
|
|
678
|
+
try:
|
|
679
|
+
# Use FS tool to write
|
|
680
|
+
logger.info(f"Writing file {target_file} via Sainik")
|
|
672
681
|
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
682
|
+
# Simple content write
|
|
683
|
+
result = self.fs_tool.run("write", path=target_file, content=content)
|
|
684
|
+
|
|
685
|
+
if not result.success:
|
|
686
|
+
logger.error(f"Failed to write file {target_file}: {result.error}")
|
|
687
|
+
output.error = f"File write failed: {result.error}"
|
|
688
|
+
# Optionally mark partial success?
|
|
689
|
+
else:
|
|
690
|
+
logger.info(f"Successfully wrote {target_file}")
|
|
691
|
+
|
|
692
|
+
except Exception as e:
|
|
693
|
+
logger.error(f"Error handling file write for {target_file}: {e}")
|
|
694
|
+
output.error = f"File write exception: {str(e)}"
|
|
695
|
+
else:
|
|
696
|
+
logger.info(f"Skipping file write for {target_file} - no file creation keyword detected in query")
|
|
697
|
+
# Clear the target_file from output to prevent confusion
|
|
698
|
+
if "target_file" in output.core_output:
|
|
699
|
+
output.core_output["target_file"] = None
|
|
676
700
|
|
|
677
701
|
# Phase 13: General Tool Execution (Agentic)
|
|
678
702
|
if role_name == "sainik" and output.status == "success":
|
|
@@ -884,6 +908,9 @@ class ParishadEngine:
|
|
|
884
908
|
if self.trace_dir:
|
|
885
909
|
self._save_trace(trace)
|
|
886
910
|
|
|
911
|
+
# Save final answer to output.json in workspace root
|
|
912
|
+
self._save_output_json(trace)
|
|
913
|
+
|
|
887
914
|
logger.info(
|
|
888
915
|
f"Parishad run complete: {ctx.query_id} "
|
|
889
916
|
f"(tokens: {ctx.tokens_used}/{budget}, success: {success})"
|
|
@@ -965,6 +992,31 @@ class ParishadEngine:
|
|
|
965
992
|
f.write(trace.to_json())
|
|
966
993
|
|
|
967
994
|
logger.debug(f"Trace saved: {filepath}")
|
|
995
|
+
|
|
996
|
+
def _save_output_json(self, trace: Trace) -> None:
|
|
997
|
+
"""Save final answer to output.json in workspace root."""
|
|
998
|
+
try:
|
|
999
|
+
import json
|
|
1000
|
+
from pathlib import Path
|
|
1001
|
+
|
|
1002
|
+
# Get the workspace root (current working directory)
|
|
1003
|
+
output_path = Path.cwd() / "output.json"
|
|
1004
|
+
|
|
1005
|
+
# Extract the final answer text
|
|
1006
|
+
if trace.final_answer:
|
|
1007
|
+
output_content = trace.final_answer.final_answer
|
|
1008
|
+
else:
|
|
1009
|
+
output_content = "No answer generated"
|
|
1010
|
+
|
|
1011
|
+
# Write the output to output.json
|
|
1012
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
|
1013
|
+
json.dump(output_content, f, indent=2, ensure_ascii=False)
|
|
1014
|
+
|
|
1015
|
+
logger.debug(f"Output saved to: {output_path}")
|
|
1016
|
+
|
|
1017
|
+
except Exception as e:
|
|
1018
|
+
logger.warning(f"Failed to save output.json: {e}")
|
|
1019
|
+
|
|
968
1020
|
|
|
969
1021
|
|
|
970
1022
|
class Parishad:
|
|
@@ -52,8 +52,12 @@ You must ALWAYS respond with a valid JSON object in the following format:
|
|
|
52
52
|
Guidelines:
|
|
53
53
|
- If writing code, put the COMPLETE runnable code in "content".
|
|
54
54
|
- If writing text, put the clear explanation in "content".
|
|
55
|
-
-
|
|
56
|
-
-
|
|
55
|
+
- **CRITICAL: ONLY set "target_file" if the user EXPLICITLY asks to create/save/write a file with a specific filename.**
|
|
56
|
+
- Examples where you SHOULD set target_file: "create a script called math_utils.py", "save this to config.json", "write code in src/main.py"
|
|
57
|
+
- Examples where you should NOT set target_file: "what is 2+2", "write code to add numbers", "how do I calculate X", "solve this math problem", "explain Y"
|
|
58
|
+
- For math problems, explanations, and general questions: Put the answer in "content" and set "target_file": null
|
|
59
|
+
- NEVER write to docs/ directory unless explicitly instructed to do so by the user.
|
|
60
|
+
- NEVER create files just because you're writing code - only if the user wants to SAVE it to a file.
|
|
57
61
|
- "target_file" should be relative to the current directory (e.g., "src/main.py").
|
|
58
62
|
- If "target_file" is a text/markdown/json file (not executable code), put the RAW content in "content". DO NOT write a Python script to create it.
|
|
59
63
|
- If you need to Use a tool, add it to `tool_calls`. Available tools will be listed in the prompt.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|