parishad 0.1.6__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {parishad-0.1.6 → parishad-0.1.8}/.gitignore +3 -0
- {parishad-0.1.6 → parishad-0.1.8}/CHANGELOG.md +13 -0
- {parishad-0.1.6 → parishad-0.1.8}/PKG-INFO +1 -1
- parishad-0.1.8/docs/TUI_FREEZE_WINDOWS.md +143 -0
- {parishad-0.1.6 → parishad-0.1.8}/pyproject.toml +1 -1
- parishad-0.1.8/src/main.py +1 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/__init__.py +1 -1
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/code.py +608 -5
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.core.yaml +1 -1
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.extended.yaml +1 -1
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.fast.yaml +1 -1
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/data/models.json +1 -1
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/runner.py +3 -3
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/config_loader.py +2 -2
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/engine.py +68 -16
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sainik.py +6 -2
- {parishad-0.1.6 → parishad-0.1.8}/.github/workflows/publish.yml +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/CODE_OF_CONDUCT.md +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/CONTRIBUTING.md +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/LICENSE +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/README.md +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/SECURITY.md +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/docs/assets/logo.jpeg +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/docs/assets/logo.svg +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/__main__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/deterministic.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/ensemble.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/retrieval.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/main.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/prarambh.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/sthapana.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/modes.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/user_config.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/data/catalog.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/memory/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/base.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/huggingface.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/llama_cpp.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/mlx_lm.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/ollama.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/openai_api.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/transformers_hf.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/costs.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/downloader.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/optimizations.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/profiles.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/reliability.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/tokenization.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/exceptions.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/base.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/dandadhyaksha.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/darbari.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/majumdar.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/pantapradhan.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/prerak.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/raja.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sacheev.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sar_senapati.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/vidushak.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/base.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/fs.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/perception.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/retrieval.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/shell.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/__init__.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/hardware.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/installer.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/logging.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/scanner.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/text.py +0 -0
- {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/tracing.py +0 -0
|
@@ -5,6 +5,19 @@ All notable changes to the **Parishad** project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.1.8] - 2026-02-04
|
|
9
|
+
|
|
10
|
+
### Fixed
|
|
11
|
+
- **TUI Freeze on Windows**: Resolved GIL blocking issue that caused the TUI to freeze during llama-cpp inference by moving model execution to a separate subprocess.
|
|
12
|
+
- **File Spam Prevention**: Added smart filtering to prevent unwanted file creation during general queries and math problems. Files are now only created when explicitly requested.
|
|
13
|
+
- **Output Persistence**: Implemented automatic `output.json` updates with final answers for every query execution.
|
|
14
|
+
|
|
15
|
+
## [0.1.7] - 2026-01-26
|
|
16
|
+
|
|
17
|
+
### Fixed
|
|
18
|
+
- **Memory Optimization**: Reduced default context length from 8192 to 4096 tokens to prevent VRAM saturation and freezing on 4GB GPUs (like RTX 3050).
|
|
19
|
+
- **Windows TUI Rendering**: Fixed "garbage characters" in loading spinner by switching to ASCII-safe animation on Windows.
|
|
20
|
+
|
|
8
21
|
## [0.1.6] - 2026-01-26
|
|
9
22
|
|
|
10
23
|
### Fixed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: parishad
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
|
|
5
5
|
Project-URL: Homepage, https://github.com/parishad-council/parishad
|
|
6
6
|
Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# TUI Freeze Issue on Windows - Technical Documentation
|
|
2
|
+
|
|
3
|
+
> **Status: RESOLVED** ✅
|
|
4
|
+
> **Solution: Subprocess-based inference isolation**
|
|
5
|
+
|
|
6
|
+
## Problem Summary
|
|
7
|
+
|
|
8
|
+
The Parishad TUI froze on Windows during LLM inference when using `llama-cpp-python`. The UI would become completely unresponsive even though model inference completed successfully in the background.
|
|
9
|
+
|
|
10
|
+
### Symptoms
|
|
11
|
+
- TUI freezes immediately after submitting a query
|
|
12
|
+
- Model inference completes (GPU usage spikes then drops)
|
|
13
|
+
- Output files are generated correctly
|
|
14
|
+
- TUI remains frozen until force-quit
|
|
15
|
+
- **Works fine on macOS, only affects Windows**
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Root Cause
|
|
20
|
+
|
|
21
|
+
### Python's Global Interpreter Lock (GIL)
|
|
22
|
+
|
|
23
|
+
The `llama-cpp-python` library holds Python's GIL during C-level inference operations. This blocks **all Python threads**, including Textual's event loop.
|
|
24
|
+
|
|
25
|
+
Even when inference runs in a separate thread:
|
|
26
|
+
1. Main thread (Textual TUI) waits for timer callbacks
|
|
27
|
+
2. Worker thread (llama-cpp) acquires GIL for C extension
|
|
28
|
+
3. C extension doesn't release GIL during computation
|
|
29
|
+
4. Main thread cannot execute any Python code
|
|
30
|
+
5. **TUI freezes completely**
|
|
31
|
+
|
|
32
|
+
### Proof
|
|
33
|
+
|
|
34
|
+
Debug logging showed a 31-second gap where the TUI's timer stopped firing entirely:
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
[16:04:33.134] POLL: Timer fired... ← LAST TIMER CALLBACK
|
|
38
|
+
|
|
39
|
+
⚠️ 31 SECONDS - NO TIMER CALLBACKS ⚠️
|
|
40
|
+
|
|
41
|
+
[16:05:04.329] === INFERENCE COMPLETE ===
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
This proves the GIL was held continuously during inference.
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Solution: Process Isolation
|
|
49
|
+
|
|
50
|
+
Since threads share the GIL, the only solution is **true process isolation** using `subprocess.Popen`.
|
|
51
|
+
|
|
52
|
+
### Architecture
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
┌─────────────────────────┐ FILE IPC ┌─────────────────────────┐
|
|
56
|
+
│ MAIN PROCESS │ ←─────────────→ │ SUBPROCESS │
|
|
57
|
+
│ (TUI - Textual) │ │ (Inference) │
|
|
58
|
+
│ │ │ │
|
|
59
|
+
│ • Timers work ✓ │ query.txt → │ • Loads Parishad │
|
|
60
|
+
│ • UI responsive ✓ │ ← result.json │ • Runs council.run() │
|
|
61
|
+
│ • Polls every 500ms │ │ • Saves JSON result │
|
|
62
|
+
│ │ │ │
|
|
63
|
+
│ OWN GIL (free) │ │ OWN GIL (busy) │
|
|
64
|
+
└─────────────────────────┘ └─────────────────────────┘
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Implementation
|
|
68
|
+
|
|
69
|
+
1. Save query to `~/.parishad/temp_query.txt`
|
|
70
|
+
2. Generate Python script that imports Parishad and runs inference
|
|
71
|
+
3. Launch subprocess with `subprocess.Popen()` (hidden window on Windows)
|
|
72
|
+
4. Poll for `temp_result.json` every 500ms
|
|
73
|
+
5. Display result when file appears
|
|
74
|
+
|
|
75
|
+
### Key Code
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
# Launch subprocess - SEPARATE PROCESS = SEPARATE GIL
|
|
79
|
+
self._subprocess = subprocess.Popen(
|
|
80
|
+
[python_exe, str(script_file)],
|
|
81
|
+
stdout=subprocess.PIPE,
|
|
82
|
+
stderr=subprocess.PIPE,
|
|
83
|
+
startupinfo=startupinfo, # Hidden window on Windows
|
|
84
|
+
cwd=str(self.cwd),
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Poll for result (TUI stays responsive!)
|
|
88
|
+
def poll_subprocess_result():
|
|
89
|
+
if result_file.exists():
|
|
90
|
+
result = json.loads(result_file.read_text())
|
|
91
|
+
display_result(result)
|
|
92
|
+
else:
|
|
93
|
+
self.set_timer(0.5, poll_subprocess_result)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Tradeoffs
|
|
99
|
+
|
|
100
|
+
| Aspect | Before (Threading) | After (Subprocess) |
|
|
101
|
+
|--------|-------------------|-------------------|
|
|
102
|
+
| TUI responsiveness | **FROZEN** | **RESPONSIVE** |
|
|
103
|
+
| GIL blocking | Yes | No |
|
|
104
|
+
| Model reload per query | No | Yes (~10-15s) |
|
|
105
|
+
| Memory usage | Shared | Doubled |
|
|
106
|
+
| IPC complexity | Low | Medium (file-based) |
|
|
107
|
+
|
|
108
|
+
The model reload overhead is acceptable because the TUI now remains usable during inference.
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## Files Modified
|
|
113
|
+
|
|
114
|
+
- `src/parishad/cli/code.py` - Replaced threading with subprocess-based inference
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
## Testing
|
|
119
|
+
|
|
120
|
+
1. Run `parishad code`
|
|
121
|
+
2. Submit any query
|
|
122
|
+
3. Verify TUI remains responsive (can scroll, etc.)
|
|
123
|
+
4. Result appears after inference completes
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## Related Issues
|
|
128
|
+
|
|
129
|
+
This is a known pattern in LLM TUI applications:
|
|
130
|
+
|
|
131
|
+
- `anomalyco/opencode` - Issues #9269, #8229
|
|
132
|
+
- `ggerganov/llama.cpp` - Issues #3135, #1793
|
|
133
|
+
- `Textualize/textual` - Issues #2167, #4552
|
|
134
|
+
|
|
135
|
+
All report similar symptoms: TUI freezes during LLM inference on Windows due to GIL contention.
|
|
136
|
+
|
|
137
|
+
---
|
|
138
|
+
|
|
139
|
+
## Future Improvements
|
|
140
|
+
|
|
141
|
+
1. **Persistent subprocess** - Keep model loaded between queries
|
|
142
|
+
2. **Named pipes** - Faster IPC than file polling
|
|
143
|
+
3. **Model caching** - Reduce reload time
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "parishad"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.8"
|
|
8
8
|
description = "A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
def add_numbers(a, b):
|
|
@@ -5,7 +5,7 @@ Parishad orchestrates multiple local language models into a structured "council"
|
|
|
5
5
|
that achieves higher reliability than a single model under strict compute budgets.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
__version__ = "0.1.
|
|
8
|
+
__version__ = "0.1.7"
|
|
9
9
|
|
|
10
10
|
from .orchestrator.engine import Parishad, ParishadEngine, PipelineConfig
|
|
11
11
|
from .models.runner import ModelRunner, ModelConfig
|