parishad 0.1.3__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {parishad-0.1.3 → parishad-0.1.5}/CHANGELOG.md +11 -0
- {parishad-0.1.3 → parishad-0.1.5}/PKG-INFO +3 -2
- {parishad-0.1.3 → parishad-0.1.5}/pyproject.toml +2 -2
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/__init__.py +1 -1
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/cli/main.py +5 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/config/pipeline.core.yaml +1 -1
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/config/pipeline.extended.yaml +1 -1
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/config/pipeline.fast.yaml +1 -1
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/data/models.json +1 -1
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/runner.py +14 -4
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/orchestrator/config_loader.py +2 -2
- parishad-0.1.5/src/parishad/utils/installer.py +86 -0
- {parishad-0.1.3 → parishad-0.1.5}/.github/workflows/publish.yml +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/.gitignore +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/CODE_OF_CONDUCT.md +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/CONTRIBUTING.md +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/LICENSE +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/README.md +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/SECURITY.md +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/docs/assets/logo.jpeg +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/docs/assets/logo.svg +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/__main__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/checker/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/checker/deterministic.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/checker/ensemble.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/checker/retrieval.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/cli/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/cli/code.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/cli/prarambh.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/cli/sthapana.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/config/modes.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/config/user_config.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/data/catalog.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/memory/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/base.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/huggingface.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/llama_cpp.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/mlx_lm.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/ollama.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/openai_api.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/backends/transformers_hf.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/costs.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/downloader.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/optimizations.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/profiles.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/reliability.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/models/tokenization.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/orchestrator/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/orchestrator/engine.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/orchestrator/exceptions.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/base.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/dandadhyaksha.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/darbari.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/majumdar.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/pantapradhan.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/prerak.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/raja.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/sacheev.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/sainik.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/sar_senapati.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/roles/vidushak.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/base.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/fs.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/perception.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/retrieval.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/tools/shell.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/__init__.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/hardware.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/logging.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/scanner.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/text.py +0 -0
- {parishad-0.1.3 → parishad-0.1.5}/src/parishad/utils/tracing.py +0 -0
|
@@ -5,6 +5,17 @@ All notable changes to the **Parishad** project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.1.5] - 2026-01-26
|
|
9
|
+
|
|
10
|
+
### Added
|
|
11
|
+
- **Runtime Auto-Installer**: Integrated automatic detection and installation of `llama-cpp-python` wheels on Windows/Mac/Linux. Running `parishad` now fixes missing backend dependencies automatically.
|
|
12
|
+
|
|
13
|
+
## [0.1.4] - 2026-01-26
|
|
14
|
+
|
|
15
|
+
### Fixed
|
|
16
|
+
- **Windows Compilation Spam**: Reverted `llama-cpp-python` to optional dependency to prevent pip build failures on Windows.
|
|
17
|
+
- **Helpful Errors**: Added smart detection for missing backend on Windows, providing direct `pip install` commands for pre-built wheels.
|
|
18
|
+
|
|
8
19
|
## [0.1.3] - 2026-01-26
|
|
9
20
|
|
|
10
21
|
### Fixed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: parishad
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.5
|
|
4
4
|
Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
|
|
5
5
|
Project-URL: Homepage, https://github.com/parishad-council/parishad
|
|
6
6
|
Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
|
|
@@ -23,7 +23,6 @@ Requires-Python: >=3.10
|
|
|
23
23
|
Requires-Dist: click>=8.0.0
|
|
24
24
|
Requires-Dist: httpx>=0.25.0
|
|
25
25
|
Requires-Dist: jsonschema>=4.0.0
|
|
26
|
-
Requires-Dist: llama-cpp-python>=0.2.0
|
|
27
26
|
Requires-Dist: numpy<2.0
|
|
28
27
|
Requires-Dist: openai>=1.0.0
|
|
29
28
|
Requires-Dist: psutil>=5.9.0
|
|
@@ -39,6 +38,7 @@ Requires-Dist: black>=23.0.0; extra == 'all'
|
|
|
39
38
|
Requires-Dist: chromadb>=0.5.0; extra == 'all'
|
|
40
39
|
Requires-Dist: datasets>=2.14.0; extra == 'all'
|
|
41
40
|
Requires-Dist: faiss-cpu>=1.7.0; extra == 'all'
|
|
41
|
+
Requires-Dist: llama-cpp-python>=0.2.0; extra == 'all'
|
|
42
42
|
Requires-Dist: mypy>=1.0.0; extra == 'all'
|
|
43
43
|
Requires-Dist: pandas>=2.0.0; extra == 'all'
|
|
44
44
|
Requires-Dist: pre-commit>=3.0.0; extra == 'all'
|
|
@@ -71,6 +71,7 @@ Requires-Dist: grpcio-tools>=1.50.0; extra == 'distributed'
|
|
|
71
71
|
Requires-Dist: grpcio>=1.50.0; extra == 'distributed'
|
|
72
72
|
Provides-Extra: local
|
|
73
73
|
Requires-Dist: accelerate>=0.25.0; extra == 'local'
|
|
74
|
+
Requires-Dist: llama-cpp-python>=0.2.0; extra == 'local'
|
|
74
75
|
Requires-Dist: torch>=2.0.0; extra == 'local'
|
|
75
76
|
Requires-Dist: transformers>=4.35.0; extra == 'local'
|
|
76
77
|
Provides-Extra: mlx
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "parishad"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.5"
|
|
8
8
|
description = "A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -45,11 +45,11 @@ dependencies = [
|
|
|
45
45
|
"numpy<2.0",
|
|
46
46
|
"textual>=0.70.0",
|
|
47
47
|
"psutil>=5.9.0",
|
|
48
|
-
"llama-cpp-python>=0.2.0",
|
|
49
48
|
]
|
|
50
49
|
|
|
51
50
|
[project.optional-dependencies]
|
|
52
51
|
local = [
|
|
52
|
+
"llama-cpp-python>=0.2.0",
|
|
53
53
|
"transformers>=4.35.0",
|
|
54
54
|
"torch>=2.0.0",
|
|
55
55
|
"accelerate>=0.25.0",
|
|
@@ -5,7 +5,7 @@ Parishad orchestrates multiple local language models into a structured "council"
|
|
|
5
5
|
that achieves higher reliability than a single model under strict compute budgets.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
__version__ = "0.1.
|
|
8
|
+
__version__ = "0.1.5"
|
|
9
9
|
|
|
10
10
|
from .orchestrator.engine import Parishad, ParishadEngine, PipelineConfig
|
|
11
11
|
from .models.runner import ModelRunner, ModelConfig
|
|
@@ -189,6 +189,11 @@ def cli(ctx):
|
|
|
189
189
|
config - View or modify configuration
|
|
190
190
|
sthapana - स्थापना (Setup) - Configure your Parishad Sabha
|
|
191
191
|
"""
|
|
192
|
+
if ctx.invoked_subcommand != "init":
|
|
193
|
+
# Check backend installation (skip for init/config commands if needed, but safer to just check)
|
|
194
|
+
from ..utils.installer import check_and_install_backend
|
|
195
|
+
check_and_install_backend()
|
|
196
|
+
|
|
192
197
|
if ctx.invoked_subcommand is None:
|
|
193
198
|
# First run - ask for permissions
|
|
194
199
|
if is_first_run():
|
|
@@ -307,10 +307,20 @@ def _create_backend(backend_type: Backend | str) -> BackendProtocol:
|
|
|
307
307
|
|
|
308
308
|
if backend_name == "llama_cpp":
|
|
309
309
|
if not is_backend_available("llama_cpp"):
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
)
|
|
310
|
+
import platform
|
|
311
|
+
msg = "llama-cpp-python is not installed."
|
|
312
|
+
|
|
313
|
+
if platform.system() == "Windows":
|
|
314
|
+
msg += (
|
|
315
|
+
"\n\n❌ compilation failed likely due to missing MSVC/CUDA."
|
|
316
|
+
"\n💡 SOLUTION: Install a pre-built wheel for Windows:"
|
|
317
|
+
"\n pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121"
|
|
318
|
+
"\n (Select the cpu or cuda version matching your system)"
|
|
319
|
+
)
|
|
320
|
+
else:
|
|
321
|
+
msg += "\nInstall with: pip install llama-cpp-python"
|
|
322
|
+
|
|
323
|
+
raise BackendNotAvailableError(msg)
|
|
314
324
|
return LlamaCppBackend()
|
|
315
325
|
|
|
316
326
|
if backend_name == "openai":
|
|
@@ -21,7 +21,7 @@ class RoleSpec:
|
|
|
21
21
|
name: str
|
|
22
22
|
class_name: str
|
|
23
23
|
slot: str
|
|
24
|
-
version: str = "0.1.
|
|
24
|
+
version: str = "0.1.5"
|
|
25
25
|
budget_tokens: int = 1000
|
|
26
26
|
dependencies: list[str] = field(default_factory=list)
|
|
27
27
|
max_tokens: Optional[int] = None
|
|
@@ -110,7 +110,7 @@ def load_pipeline_config(name: str, config_dir: Optional[Path] = None) -> list[R
|
|
|
110
110
|
name=role_name.lower(), # Always store as lowercase for consistent lookups
|
|
111
111
|
class_name=role_config.get("class", role_name.capitalize()),
|
|
112
112
|
slot=role_config.get("slot", "mid"),
|
|
113
|
-
version=role_config.get("version", "0.1.
|
|
113
|
+
version=role_config.get("version", "0.1.5"),
|
|
114
114
|
budget_tokens=role_config.get("budget_tokens", 1000),
|
|
115
115
|
dependencies=role_config.get("dependencies", []),
|
|
116
116
|
max_tokens=role_config.get("max_tokens"),
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
|
|
2
|
+
import subprocess
|
|
3
|
+
import sys
|
|
4
|
+
import platform
|
|
5
|
+
import os
|
|
6
|
+
import importlib.util
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
console = Console()
|
|
10
|
+
|
|
11
|
+
def check_and_install_backend():
|
|
12
|
+
"""
|
|
13
|
+
Check if llama-cpp-python is installed.
|
|
14
|
+
If not, automatically install it using pre-built wheels or optimal settings.
|
|
15
|
+
"""
|
|
16
|
+
if importlib.util.find_spec("llama_cpp") is not None:
|
|
17
|
+
return
|
|
18
|
+
|
|
19
|
+
console.print("\n[bold yellow]⚠️ Core backend (llama-cpp-python) is missing.[/bold yellow]")
|
|
20
|
+
console.print("[dim]Parishad needs this to run local models.[/dim]")
|
|
21
|
+
|
|
22
|
+
system = platform.system()
|
|
23
|
+
|
|
24
|
+
# 1. Windows: The main pain point. Use pre-built wheels.
|
|
25
|
+
if system == "Windows":
|
|
26
|
+
console.print("\n[cyan]🪟 Windows detected. Scanning for NVIDIA GPU...[/cyan]")
|
|
27
|
+
|
|
28
|
+
use_cuda = False
|
|
29
|
+
try:
|
|
30
|
+
# Simple check for nvcc
|
|
31
|
+
subprocess.run(["nvcc", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
32
|
+
console.print("[green]✓ CUDA Toolkit detected (nvcc found).[/green]")
|
|
33
|
+
use_cuda = True
|
|
34
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
35
|
+
console.print("[yellow]! CUDA not found. Defaulting to CPU.[/yellow]")
|
|
36
|
+
console.print("[dim](If you have an NVIDIA GPU, install CUDA Toolkit 12.x for 10x speed)[/dim]")
|
|
37
|
+
|
|
38
|
+
console.print(f"\n[bold green]🚀 Auto-installing optimized backend for {'CUDA 12.x' if use_cuda else 'CPU'}...[/bold green]")
|
|
39
|
+
console.print("[dim]This may take a minute...[/dim]\n")
|
|
40
|
+
|
|
41
|
+
cmd = [sys.executable, "-m", "pip", "install", "llama-cpp-python"]
|
|
42
|
+
cmd.extend(["--prefer-binary", "--extra-index-url"])
|
|
43
|
+
|
|
44
|
+
if use_cuda:
|
|
45
|
+
# Use cu124 wheels (compatible with most modern 12.x)
|
|
46
|
+
cmd.append("https://abetlen.github.io/llama-cpp-python/whl/cu124")
|
|
47
|
+
else:
|
|
48
|
+
cmd.append("https://abetlen.github.io/llama-cpp-python/whl/cpu")
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
subprocess.check_call(cmd)
|
|
52
|
+
console.print("\n[bold green]✓ Backend installed successfully![/bold green]")
|
|
53
|
+
except subprocess.CalledProcessError:
|
|
54
|
+
console.print("\n[bold red]❌ Installation failed.[/bold red]")
|
|
55
|
+
console.print("Please copy-paste this command manually:")
|
|
56
|
+
console.print(f" {' '.join(cmd)}")
|
|
57
|
+
sys.exit(1)
|
|
58
|
+
|
|
59
|
+
# 2. Mac: Enable Metal
|
|
60
|
+
elif system == "Darwin":
|
|
61
|
+
console.print("\n[cyan]🍎 Mac detected. Installing with Metal (GPU) support...[/cyan]")
|
|
62
|
+
|
|
63
|
+
env = os.environ.copy()
|
|
64
|
+
env["CMAKE_ARGS"] = "-DGGML_METAL=on"
|
|
65
|
+
|
|
66
|
+
cmd = [sys.executable, "-m", "pip", "install", "llama-cpp-python"]
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
subprocess.check_call(cmd, env=env)
|
|
70
|
+
console.print("\n[bold green]✓ Backend installed successfully![/bold green]")
|
|
71
|
+
except subprocess.CalledProcessError:
|
|
72
|
+
console.print("\n[bold red]❌ Installation failed.[/bold red]")
|
|
73
|
+
sys.exit(1)
|
|
74
|
+
|
|
75
|
+
# 3. Linux: Standard install
|
|
76
|
+
else:
|
|
77
|
+
console.print("\n[cyan]🐧 Linux detected. Installing from PyPI...[/cyan]")
|
|
78
|
+
cmd = [sys.executable, "-m", "pip", "install", "llama-cpp-python"]
|
|
79
|
+
try:
|
|
80
|
+
subprocess.check_call(cmd)
|
|
81
|
+
console.print("\n[bold green]✓ Backend installed successfully![/bold green]")
|
|
82
|
+
except subprocess.CalledProcessError:
|
|
83
|
+
sys.exit(1)
|
|
84
|
+
|
|
85
|
+
# clear some space
|
|
86
|
+
print()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|