parishad 0.1.6__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. {parishad-0.1.6 → parishad-0.1.8}/.gitignore +3 -0
  2. {parishad-0.1.6 → parishad-0.1.8}/CHANGELOG.md +13 -0
  3. {parishad-0.1.6 → parishad-0.1.8}/PKG-INFO +1 -1
  4. parishad-0.1.8/docs/TUI_FREEZE_WINDOWS.md +143 -0
  5. {parishad-0.1.6 → parishad-0.1.8}/pyproject.toml +1 -1
  6. parishad-0.1.8/src/main.py +1 -0
  7. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/__init__.py +1 -1
  8. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/code.py +608 -5
  9. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.core.yaml +1 -1
  10. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.extended.yaml +1 -1
  11. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/pipeline.fast.yaml +1 -1
  12. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/data/models.json +1 -1
  13. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/runner.py +3 -3
  14. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/config_loader.py +2 -2
  15. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/engine.py +68 -16
  16. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sainik.py +6 -2
  17. {parishad-0.1.6 → parishad-0.1.8}/.github/workflows/publish.yml +0 -0
  18. {parishad-0.1.6 → parishad-0.1.8}/CODE_OF_CONDUCT.md +0 -0
  19. {parishad-0.1.6 → parishad-0.1.8}/CONTRIBUTING.md +0 -0
  20. {parishad-0.1.6 → parishad-0.1.8}/LICENSE +0 -0
  21. {parishad-0.1.6 → parishad-0.1.8}/README.md +0 -0
  22. {parishad-0.1.6 → parishad-0.1.8}/SECURITY.md +0 -0
  23. {parishad-0.1.6 → parishad-0.1.8}/docs/assets/logo.jpeg +0 -0
  24. {parishad-0.1.6 → parishad-0.1.8}/docs/assets/logo.svg +0 -0
  25. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/__main__.py +0 -0
  26. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/__init__.py +0 -0
  27. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/deterministic.py +0 -0
  28. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/ensemble.py +0 -0
  29. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/checker/retrieval.py +0 -0
  30. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/__init__.py +0 -0
  31. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/main.py +0 -0
  32. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/prarambh.py +0 -0
  33. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/cli/sthapana.py +0 -0
  34. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/modes.py +0 -0
  35. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/config/user_config.py +0 -0
  36. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/data/catalog.py +0 -0
  37. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/memory/__init__.py +0 -0
  38. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/__init__.py +0 -0
  39. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/__init__.py +0 -0
  40. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/base.py +0 -0
  41. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/huggingface.py +0 -0
  42. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/llama_cpp.py +0 -0
  43. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/mlx_lm.py +0 -0
  44. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/ollama.py +0 -0
  45. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/openai_api.py +0 -0
  46. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/backends/transformers_hf.py +0 -0
  47. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/costs.py +0 -0
  48. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/downloader.py +0 -0
  49. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/optimizations.py +0 -0
  50. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/profiles.py +0 -0
  51. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/reliability.py +0 -0
  52. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/models/tokenization.py +0 -0
  53. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/__init__.py +0 -0
  54. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/orchestrator/exceptions.py +0 -0
  55. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/__init__.py +0 -0
  56. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/base.py +0 -0
  57. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/dandadhyaksha.py +0 -0
  58. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/darbari.py +0 -0
  59. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/majumdar.py +0 -0
  60. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/pantapradhan.py +0 -0
  61. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/prerak.py +0 -0
  62. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/raja.py +0 -0
  63. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sacheev.py +0 -0
  64. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/sar_senapati.py +0 -0
  65. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/roles/vidushak.py +0 -0
  66. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/__init__.py +0 -0
  67. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/base.py +0 -0
  68. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/fs.py +0 -0
  69. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/perception.py +0 -0
  70. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/retrieval.py +0 -0
  71. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/tools/shell.py +0 -0
  72. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/__init__.py +0 -0
  73. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/hardware.py +0 -0
  74. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/installer.py +0 -0
  75. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/logging.py +0 -0
  76. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/scanner.py +0 -0
  77. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/text.py +0 -0
  78. {parishad-0.1.6 → parishad-0.1.8}/src/parishad/utils/tracing.py +0 -0
@@ -88,6 +88,7 @@ parishad_traces/
88
88
  benchmark_traces/
89
89
  gaia_benchmark.log
90
90
  gaia_results.jsonl
91
+ requirements.txt
91
92
 
92
93
  # API keys and secrets
93
94
  secrets.yaml
@@ -158,3 +159,5 @@ local_config.yaml
158
159
  config.local.yaml
159
160
  pipeline.*.local.yaml
160
161
 
162
+
163
+
@@ -5,6 +5,19 @@ All notable changes to the **Parishad** project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.1.8] - 2026-02-04
9
+
10
+ ### Fixed
11
+ - **TUI Freeze on Windows**: Resolved GIL blocking issue that caused the TUI to freeze during llama-cpp inference by moving model execution to a separate subprocess.
12
+ - **File Spam Prevention**: Added smart filtering to prevent unwanted file creation during general queries and math problems. Files are now only created when explicitly requested.
13
+ - **Output Persistence**: Implemented automatic `output.json` updates with final answers for every query execution.
14
+
15
+ ## [0.1.7] - 2026-01-26
16
+
17
+ ### Fixed
18
+ - **Memory Optimization**: Reduced default context length from 8192 to 4096 tokens to prevent VRAM saturation and freezing on 4GB GPUs (like RTX 3050).
19
+ - **Windows TUI Rendering**: Fixed "garbage characters" in loading spinner by switching to ASCII-safe animation on Windows.
20
+
8
21
  ## [0.1.6] - 2026-01-26
9
22
 
10
23
  ### Fixed
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: parishad
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
5
5
  Project-URL: Homepage, https://github.com/parishad-council/parishad
6
6
  Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
@@ -0,0 +1,143 @@
1
+ # TUI Freeze Issue on Windows - Technical Documentation
2
+
3
+ > **Status: RESOLVED** ✅
4
+ > **Solution: Subprocess-based inference isolation**
5
+
6
+ ## Problem Summary
7
+
8
+ The Parishad TUI froze on Windows during LLM inference when using `llama-cpp-python`. The UI would become completely unresponsive even though model inference completed successfully in the background.
9
+
10
+ ### Symptoms
11
+ - TUI freezes immediately after submitting a query
12
+ - Model inference completes (GPU usage spikes then drops)
13
+ - Output files are generated correctly
14
+ - TUI remains frozen until force-quit
15
+ - **Works fine on macOS, only affects Windows**
16
+
17
+ ---
18
+
19
+ ## Root Cause
20
+
21
+ ### Python's Global Interpreter Lock (GIL)
22
+
23
+ The `llama-cpp-python` library holds Python's GIL during C-level inference operations. This blocks **all Python threads**, including Textual's event loop.
24
+
25
+ Even when inference runs in a separate thread:
26
+ 1. Main thread (Textual TUI) waits for timer callbacks
27
+ 2. Worker thread (llama-cpp) acquires GIL for C extension
28
+ 3. C extension doesn't release GIL during computation
29
+ 4. Main thread cannot execute any Python code
30
+ 5. **TUI freezes completely**
31
+
32
+ ### Proof
33
+
34
+ Debug logging showed a 31-second gap where the TUI's timer stopped firing entirely:
35
+
36
+ ```
37
+ [16:04:33.134] POLL: Timer fired... ← LAST TIMER CALLBACK
38
+
39
+ ⚠️ 31 SECONDS - NO TIMER CALLBACKS ⚠️
40
+
41
+ [16:05:04.329] === INFERENCE COMPLETE ===
42
+ ```
43
+
44
+ This proves the GIL was held continuously during inference.
45
+
46
+ ---
47
+
48
+ ## Solution: Process Isolation
49
+
50
+ Since threads share the GIL, the only solution is **true process isolation** using `subprocess.Popen`.
51
+
52
+ ### Architecture
53
+
54
+ ```
55
+ ┌─────────────────────────┐ FILE IPC ┌─────────────────────────┐
56
+ │ MAIN PROCESS │ ←─────────────→ │ SUBPROCESS │
57
+ │ (TUI - Textual) │ │ (Inference) │
58
+ │ │ │ │
59
+ │ • Timers work ✓ │ query.txt → │ • Loads Parishad │
60
+ │ • UI responsive ✓ │ ← result.json │ • Runs council.run() │
61
+ │ • Polls every 500ms │ │ • Saves JSON result │
62
+ │ │ │ │
63
+ │ OWN GIL (free) │ │ OWN GIL (busy) │
64
+ └─────────────────────────┘ └─────────────────────────┘
65
+ ```
66
+
67
+ ### Implementation
68
+
69
+ 1. Save query to `~/.parishad/temp_query.txt`
70
+ 2. Generate Python script that imports Parishad and runs inference
71
+ 3. Launch subprocess with `subprocess.Popen()` (hidden window on Windows)
72
+ 4. Poll for `temp_result.json` every 500ms
73
+ 5. Display result when file appears
74
+
75
+ ### Key Code
76
+
77
+ ```python
78
+ # Launch subprocess - SEPARATE PROCESS = SEPARATE GIL
79
+ self._subprocess = subprocess.Popen(
80
+ [python_exe, str(script_file)],
81
+ stdout=subprocess.PIPE,
82
+ stderr=subprocess.PIPE,
83
+ startupinfo=startupinfo, # Hidden window on Windows
84
+ cwd=str(self.cwd),
85
+ )
86
+
87
+ # Poll for result (TUI stays responsive!)
88
+ def poll_subprocess_result():
89
+ if result_file.exists():
90
+ result = json.loads(result_file.read_text())
91
+ display_result(result)
92
+ else:
93
+ self.set_timer(0.5, poll_subprocess_result)
94
+ ```
95
+
96
+ ---
97
+
98
+ ## Tradeoffs
99
+
100
+ | Aspect | Before (Threading) | After (Subprocess) |
101
+ |--------|-------------------|-------------------|
102
+ | TUI responsiveness | **FROZEN** | **RESPONSIVE** |
103
+ | GIL blocking | Yes | No |
104
+ | Model reload per query | No | Yes (~10-15s) |
105
+ | Memory usage | Shared | Doubled |
106
+ | IPC complexity | Low | Medium (file-based) |
107
+
108
+ The model reload overhead is acceptable because the TUI now remains usable during inference.
109
+
110
+ ---
111
+
112
+ ## Files Modified
113
+
114
+ - `src/parishad/cli/code.py` - Replaced threading with subprocess-based inference
115
+
116
+ ---
117
+
118
+ ## Testing
119
+
120
+ 1. Run `parishad code`
121
+ 2. Submit any query
122
+ 3. Verify TUI remains responsive (can scroll, etc.)
123
+ 4. Result appears after inference completes
124
+
125
+ ---
126
+
127
+ ## Related Issues
128
+
129
+ This is a known pattern in LLM TUI applications:
130
+
131
+ - `anomalyco/opencode` - Issues #9269, #8229
132
+ - `ggerganov/llama.cpp` - Issues #3135, #1793
133
+ - `Textualize/textual` - Issues #2167, #4552
134
+
135
+ All report similar symptoms: TUI freezes during LLM inference on Windows due to GIL contention.
136
+
137
+ ---
138
+
139
+ ## Future Improvements
140
+
141
+ 1. **Persistent subprocess** - Keep model loaded between queries
142
+ 2. **Named pipes** - Faster IPC than file polling
143
+ 3. **Model caching** - Reduce reload time
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "parishad"
7
- version = "0.1.6"
7
+ version = "0.1.8"
8
8
  description = "A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -0,0 +1 @@
1
+ def add_numbers(a, b):
@@ -5,7 +5,7 @@ Parishad orchestrates multiple local language models into a structured "council"
5
5
  that achieves higher reliability than a single model under strict compute budgets.
6
6
  """
7
7
 
8
- __version__ = "0.1.6"
8
+ __version__ = "0.1.7"
9
9
 
10
10
  from .orchestrator.engine import Parishad, ParishadEngine, PipelineConfig
11
11
  from .models.runner import ModelRunner, ModelConfig