parishad 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parishad/__init__.py +70 -0
- parishad/__main__.py +10 -0
- parishad/checker/__init__.py +25 -0
- parishad/checker/deterministic.py +644 -0
- parishad/checker/ensemble.py +496 -0
- parishad/checker/retrieval.py +546 -0
- parishad/cli/__init__.py +6 -0
- parishad/cli/code.py +3254 -0
- parishad/cli/main.py +1158 -0
- parishad/cli/prarambh.py +99 -0
- parishad/cli/sthapana.py +368 -0
- parishad/config/modes.py +139 -0
- parishad/config/pipeline.core.yaml +128 -0
- parishad/config/pipeline.extended.yaml +172 -0
- parishad/config/pipeline.fast.yaml +89 -0
- parishad/config/user_config.py +115 -0
- parishad/data/catalog.py +118 -0
- parishad/data/models.json +108 -0
- parishad/memory/__init__.py +79 -0
- parishad/models/__init__.py +181 -0
- parishad/models/backends/__init__.py +247 -0
- parishad/models/backends/base.py +211 -0
- parishad/models/backends/huggingface.py +318 -0
- parishad/models/backends/llama_cpp.py +239 -0
- parishad/models/backends/mlx_lm.py +141 -0
- parishad/models/backends/ollama.py +253 -0
- parishad/models/backends/openai_api.py +193 -0
- parishad/models/backends/transformers_hf.py +198 -0
- parishad/models/costs.py +385 -0
- parishad/models/downloader.py +1557 -0
- parishad/models/optimizations.py +871 -0
- parishad/models/profiles.py +610 -0
- parishad/models/reliability.py +876 -0
- parishad/models/runner.py +651 -0
- parishad/models/tokenization.py +287 -0
- parishad/orchestrator/__init__.py +24 -0
- parishad/orchestrator/config_loader.py +210 -0
- parishad/orchestrator/engine.py +1113 -0
- parishad/orchestrator/exceptions.py +14 -0
- parishad/roles/__init__.py +71 -0
- parishad/roles/base.py +712 -0
- parishad/roles/dandadhyaksha.py +163 -0
- parishad/roles/darbari.py +246 -0
- parishad/roles/majumdar.py +274 -0
- parishad/roles/pantapradhan.py +150 -0
- parishad/roles/prerak.py +357 -0
- parishad/roles/raja.py +345 -0
- parishad/roles/sacheev.py +203 -0
- parishad/roles/sainik.py +427 -0
- parishad/roles/sar_senapati.py +164 -0
- parishad/roles/vidushak.py +69 -0
- parishad/tools/__init__.py +7 -0
- parishad/tools/base.py +57 -0
- parishad/tools/fs.py +110 -0
- parishad/tools/perception.py +96 -0
- parishad/tools/retrieval.py +74 -0
- parishad/tools/shell.py +103 -0
- parishad/utils/__init__.py +7 -0
- parishad/utils/hardware.py +122 -0
- parishad/utils/logging.py +79 -0
- parishad/utils/scanner.py +164 -0
- parishad/utils/text.py +61 -0
- parishad/utils/tracing.py +133 -0
- parishad-0.1.0.dist-info/METADATA +256 -0
- parishad-0.1.0.dist-info/RECORD +68 -0
- parishad-0.1.0.dist-info/WHEEL +4 -0
- parishad-0.1.0.dist-info/entry_points.txt +2 -0
- parishad-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Smart Model Scanner for Parishad.
|
|
3
|
+
Scans standard locations for existing LLMs to avoid redundant downloads.
|
|
4
|
+
"""
|
|
5
|
+
import shutil
|
|
6
|
+
import subprocess
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import List, Dict, Any
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class FoundModel:
|
|
17
|
+
name: str
|
|
18
|
+
source: str # "ollama", "huggingface", "lmstudio"
|
|
19
|
+
path: str = ""
|
|
20
|
+
size_gb: float = 0.0
|
|
21
|
+
|
|
22
|
+
def __str__(self):
|
|
23
|
+
return f"{self.name} ({self.source}, {self.size_gb:.1f}GB)"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ModelScanner:
|
|
27
|
+
"""Scans system for local LLMs."""
|
|
28
|
+
|
|
29
|
+
def scan_all(self) -> List[FoundModel]:
|
|
30
|
+
"""Run all scans and return combined results."""
|
|
31
|
+
models = []
|
|
32
|
+
models.extend(self.scan_ollama())
|
|
33
|
+
models.extend(self.scan_huggingface())
|
|
34
|
+
return models
|
|
35
|
+
|
|
36
|
+
def scan_ollama(self) -> List[FoundModel]:
|
|
37
|
+
"""Check for Ollama models via CLI."""
|
|
38
|
+
models = []
|
|
39
|
+
if not shutil.which("ollama"):
|
|
40
|
+
return []
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
# Run 'ollama list'
|
|
44
|
+
result = subprocess.run(
|
|
45
|
+
["ollama", "list"],
|
|
46
|
+
capture_output=True,
|
|
47
|
+
text=True,
|
|
48
|
+
timeout=5
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
if result.returncode != 0:
|
|
52
|
+
return []
|
|
53
|
+
|
|
54
|
+
# Parse output (skip header)
|
|
55
|
+
lines = result.stdout.strip().split('\n')
|
|
56
|
+
if len(lines) < 2:
|
|
57
|
+
return []
|
|
58
|
+
|
|
59
|
+
for line in lines[1:]:
|
|
60
|
+
parts = line.split()
|
|
61
|
+
if len(parts) >= 1:
|
|
62
|
+
name = parts[0]
|
|
63
|
+
# Simple size parsing if available, else 0
|
|
64
|
+
size_gb = 0.0
|
|
65
|
+
# Parse size (handles "2.0 GB" and "2.0GB")
|
|
66
|
+
size_gb = 0.0
|
|
67
|
+
try:
|
|
68
|
+
# Case 1: "2.0 GB" (separate tokens)
|
|
69
|
+
if len(parts) >= 4 and parts[3] == 'GB':
|
|
70
|
+
size_gb = float(parts[2])
|
|
71
|
+
# Case 2: "2.0GB" (merged token)
|
|
72
|
+
elif len(parts) >= 3 and 'GB' in parts[2]:
|
|
73
|
+
size_gb = float(parts[2].replace('GB', ''))
|
|
74
|
+
# Case 3: MB support
|
|
75
|
+
elif len(parts) >= 4 and parts[3] == 'MB':
|
|
76
|
+
size_gb = float(parts[2]) / 1024
|
|
77
|
+
except ValueError:
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
models.append(FoundModel(
|
|
81
|
+
name=name,
|
|
82
|
+
source="ollama",
|
|
83
|
+
path=str(Path.home() / ".ollama" / "models"),
|
|
84
|
+
size_gb=size_gb
|
|
85
|
+
))
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
logger.warning(f"Ollama scan failed: {e}")
|
|
89
|
+
|
|
90
|
+
return models
|
|
91
|
+
|
|
92
|
+
def scan_huggingface(self) -> List[FoundModel]:
|
|
93
|
+
"""Scan ~/.cache/huggingface/hub for models."""
|
|
94
|
+
models = []
|
|
95
|
+
cache_dir = Path.home() / ".cache" / "huggingface" / "hub"
|
|
96
|
+
|
|
97
|
+
if not cache_dir.exists():
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
for item in cache_dir.iterdir():
|
|
102
|
+
if item.is_dir() and item.name.startswith("models--"):
|
|
103
|
+
# Format: models--org--modelname
|
|
104
|
+
# Convert to org/modelname
|
|
105
|
+
raw_name = item.name.replace("models--", "")
|
|
106
|
+
clean_name = raw_name.replace("--", "/")
|
|
107
|
+
|
|
108
|
+
# Calculate size
|
|
109
|
+
size_gb = sum(f.stat().st_size for f in item.rglob('*') if f.is_file()) / (1024**3)
|
|
110
|
+
|
|
111
|
+
models.append(FoundModel(
|
|
112
|
+
name=clean_name,
|
|
113
|
+
source="huggingface",
|
|
114
|
+
path=str(item),
|
|
115
|
+
size_gb=size_gb
|
|
116
|
+
))
|
|
117
|
+
except Exception as e:
|
|
118
|
+
logger.warning(f"HF scan failed: {e}")
|
|
119
|
+
|
|
120
|
+
return models
|
|
121
|
+
|
|
122
|
+
def scan_directory(self, root_path: Path, min_size_gb: float = 0.5) -> List[FoundModel]:
|
|
123
|
+
"""
|
|
124
|
+
Deep scan a directory recursively for large model files.
|
|
125
|
+
Looks for .gguf, .bin, .safetensors > min_size_gb.
|
|
126
|
+
"""
|
|
127
|
+
models = []
|
|
128
|
+
extensions = {'.gguf', '.bin', '.safetensors', '.pt', '.pth'}
|
|
129
|
+
|
|
130
|
+
# Skip these common potentially huge/slow dirs
|
|
131
|
+
skip_dirs = {'.git', '.vscode', '.idea', '__pycache__', 'node_modules', 'Library', 'AppData'}
|
|
132
|
+
|
|
133
|
+
if not root_path.exists():
|
|
134
|
+
return []
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
# Walk the tree
|
|
138
|
+
for path in root_path.rglob('*'):
|
|
139
|
+
# 1. Check if we should skip this directory (optimization)
|
|
140
|
+
if path.is_dir():
|
|
141
|
+
if path.name in skip_dirs or path.name.startswith('.'):
|
|
142
|
+
continue
|
|
143
|
+
|
|
144
|
+
# 2. Check file criteria
|
|
145
|
+
if path.is_file() and path.suffix.lower() in extensions:
|
|
146
|
+
try:
|
|
147
|
+
size_bytes = path.stat().st_size
|
|
148
|
+
size_gb = size_bytes / (1024**3)
|
|
149
|
+
|
|
150
|
+
if size_gb >= min_size_gb:
|
|
151
|
+
# Found a potential model!
|
|
152
|
+
models.append(FoundModel(
|
|
153
|
+
name=path.name,
|
|
154
|
+
source="local_file",
|
|
155
|
+
path=str(path),
|
|
156
|
+
size_gb=size_gb
|
|
157
|
+
))
|
|
158
|
+
except (PermissionError, OSError):
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.warning(f"Deep scan error at {root_path}: {e}")
|
|
163
|
+
|
|
164
|
+
return models
|
parishad/utils/text.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""Text processing utilities for Parishad."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def truncate_text(
|
|
7
|
+
text: str,
|
|
8
|
+
max_chars: int,
|
|
9
|
+
suffix: str = "... [TRUNCATED]"
|
|
10
|
+
) -> str:
|
|
11
|
+
"""
|
|
12
|
+
Truncate text to a maximum character count with suffix.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
text: Text to truncate
|
|
16
|
+
max_chars: Maximum character count
|
|
17
|
+
suffix: Suffix to append when truncating (default: "... [TRUNCATED]")
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
Original text if under limit, or truncated text with suffix
|
|
21
|
+
|
|
22
|
+
Examples:
|
|
23
|
+
>>> truncate_text("Hello world", 20)
|
|
24
|
+
'Hello world'
|
|
25
|
+
>>> truncate_text("A" * 100, 10)
|
|
26
|
+
'AAAAAAAAAA... [TRUNCATED]'
|
|
27
|
+
"""
|
|
28
|
+
if len(text) <= max_chars:
|
|
29
|
+
return text
|
|
30
|
+
return text[:max_chars] + suffix
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def truncate_with_note(
|
|
34
|
+
text: str,
|
|
35
|
+
max_chars: int,
|
|
36
|
+
label: str = "content"
|
|
37
|
+
) -> tuple[str, bool]:
|
|
38
|
+
"""
|
|
39
|
+
Truncate text and return whether truncation occurred.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
text: Text to truncate
|
|
43
|
+
max_chars: Maximum character count
|
|
44
|
+
label: Label for the truncation note
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Tuple of (truncated_text, was_truncated)
|
|
48
|
+
|
|
49
|
+
Examples:
|
|
50
|
+
>>> text, truncated = truncate_with_note("short", 100)
|
|
51
|
+
>>> truncated
|
|
52
|
+
False
|
|
53
|
+
>>> text, truncated = truncate_with_note("A" * 200, 50, "worker")
|
|
54
|
+
>>> truncated
|
|
55
|
+
True
|
|
56
|
+
"""
|
|
57
|
+
if len(text) <= max_chars:
|
|
58
|
+
return text, False
|
|
59
|
+
|
|
60
|
+
truncated = text[:max_chars] + f"\n... [{label} truncated: {len(text)} chars total]"
|
|
61
|
+
return truncated, True
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""Trace management for Parishad runs."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional, Iterator
|
|
7
|
+
|
|
8
|
+
from ..roles.base import Trace
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TraceManager:
|
|
12
|
+
"""
|
|
13
|
+
Manages execution traces for analysis and debugging.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, trace_dir: str | Path):
|
|
17
|
+
"""
|
|
18
|
+
Initialize TraceManager.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
trace_dir: Directory containing trace files
|
|
22
|
+
"""
|
|
23
|
+
self.trace_dir = Path(trace_dir)
|
|
24
|
+
|
|
25
|
+
def list_traces(self) -> list[str]:
|
|
26
|
+
"""List all trace IDs in the directory."""
|
|
27
|
+
traces = []
|
|
28
|
+
for path in self.trace_dir.glob("trace_*.json"):
|
|
29
|
+
trace_id = path.stem.replace("trace_", "")
|
|
30
|
+
traces.append(trace_id)
|
|
31
|
+
return sorted(traces)
|
|
32
|
+
|
|
33
|
+
def load_trace(self, trace_id: str) -> dict:
|
|
34
|
+
"""Load a trace by ID."""
|
|
35
|
+
path = self.trace_dir / f"trace_{trace_id}.json"
|
|
36
|
+
if not path.exists():
|
|
37
|
+
raise FileNotFoundError(f"Trace not found: {trace_id}")
|
|
38
|
+
|
|
39
|
+
with open(path) as f:
|
|
40
|
+
return json.load(f)
|
|
41
|
+
|
|
42
|
+
def iter_traces(self) -> Iterator[dict]:
|
|
43
|
+
"""Iterate over all traces."""
|
|
44
|
+
for trace_id in self.list_traces():
|
|
45
|
+
yield self.load_trace(trace_id)
|
|
46
|
+
|
|
47
|
+
def get_summary(self) -> dict:
|
|
48
|
+
"""Get summary statistics for all traces."""
|
|
49
|
+
traces = list(self.iter_traces())
|
|
50
|
+
|
|
51
|
+
if not traces:
|
|
52
|
+
return {"count": 0}
|
|
53
|
+
|
|
54
|
+
total_tokens = sum(t.get("total_tokens", 0) for t in traces)
|
|
55
|
+
total_latency = sum(t.get("total_latency_ms", 0) for t in traces)
|
|
56
|
+
success_count = sum(1 for t in traces if t.get("success", False))
|
|
57
|
+
|
|
58
|
+
return {
|
|
59
|
+
"count": len(traces),
|
|
60
|
+
"success_rate": success_count / len(traces),
|
|
61
|
+
"avg_tokens": total_tokens / len(traces),
|
|
62
|
+
"avg_latency_ms": total_latency / len(traces),
|
|
63
|
+
"total_tokens": total_tokens,
|
|
64
|
+
"total_latency_ms": total_latency,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
def filter_by_config(self, config: str) -> list[dict]:
|
|
68
|
+
"""Filter traces by configuration."""
|
|
69
|
+
return [t for t in self.iter_traces() if t.get("config") == config]
|
|
70
|
+
|
|
71
|
+
def filter_by_date(
|
|
72
|
+
self,
|
|
73
|
+
start_date: Optional[datetime] = None,
|
|
74
|
+
end_date: Optional[datetime] = None
|
|
75
|
+
) -> list[dict]:
|
|
76
|
+
"""Filter traces by date range."""
|
|
77
|
+
results = []
|
|
78
|
+
for trace in self.iter_traces():
|
|
79
|
+
timestamp_str = trace.get("timestamp", "")
|
|
80
|
+
try:
|
|
81
|
+
timestamp = datetime.fromisoformat(timestamp_str)
|
|
82
|
+
except ValueError:
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
if start_date and timestamp < start_date:
|
|
86
|
+
continue
|
|
87
|
+
if end_date and timestamp > end_date:
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
results.append(trace)
|
|
91
|
+
|
|
92
|
+
return results
|
|
93
|
+
|
|
94
|
+
def export_metrics(self, output_path: str | Path) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Export trace metrics to a CSV file.
|
|
97
|
+
|
|
98
|
+
Useful for analysis and plotting.
|
|
99
|
+
"""
|
|
100
|
+
import csv
|
|
101
|
+
|
|
102
|
+
with open(output_path, "w", newline="") as f:
|
|
103
|
+
writer = csv.writer(f)
|
|
104
|
+
|
|
105
|
+
# Header
|
|
106
|
+
writer.writerow([
|
|
107
|
+
"query_id",
|
|
108
|
+
"config",
|
|
109
|
+
"timestamp",
|
|
110
|
+
"total_tokens",
|
|
111
|
+
"total_latency_ms",
|
|
112
|
+
"budget_remaining",
|
|
113
|
+
"retries",
|
|
114
|
+
"success",
|
|
115
|
+
"num_roles",
|
|
116
|
+
"final_confidence"
|
|
117
|
+
])
|
|
118
|
+
|
|
119
|
+
# Data
|
|
120
|
+
for trace in self.iter_traces():
|
|
121
|
+
final_answer = trace.get("final_answer", {})
|
|
122
|
+
writer.writerow([
|
|
123
|
+
trace.get("query_id", ""),
|
|
124
|
+
trace.get("config", ""),
|
|
125
|
+
trace.get("timestamp", ""),
|
|
126
|
+
trace.get("total_tokens", 0),
|
|
127
|
+
trace.get("total_latency_ms", 0),
|
|
128
|
+
trace.get("budget_remaining", 0),
|
|
129
|
+
trace.get("retries", 0),
|
|
130
|
+
trace.get("success", False),
|
|
131
|
+
len(trace.get("roles", [])),
|
|
132
|
+
final_answer.get("confidence", 0)
|
|
133
|
+
])
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: parishad
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness
|
|
5
|
+
Project-URL: Homepage, https://github.com/parishad-council/parishad
|
|
6
|
+
Project-URL: Documentation, https://github.com/parishad-council/parishad#readme
|
|
7
|
+
Project-URL: Repository, https://github.com/parishad-council/parishad
|
|
8
|
+
Project-URL: Issues, https://github.com/parishad-council/parishad/issues
|
|
9
|
+
Author: Parishad Team
|
|
10
|
+
License-Expression: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: code-generation,council,llm,local-llm,multi-agent,reasoning
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Requires-Python: >=3.10
|
|
23
|
+
Requires-Dist: click>=8.0.0
|
|
24
|
+
Requires-Dist: httpx>=0.25.0
|
|
25
|
+
Requires-Dist: jsonschema>=4.0.0
|
|
26
|
+
Requires-Dist: numpy<2.0
|
|
27
|
+
Requires-Dist: openai>=1.0.0
|
|
28
|
+
Requires-Dist: pydantic-settings>=2.0.0
|
|
29
|
+
Requires-Dist: pydantic>=2.0.0
|
|
30
|
+
Requires-Dist: pyyaml>=6.0
|
|
31
|
+
Requires-Dist: rich>=13.0.0
|
|
32
|
+
Requires-Dist: tiktoken>=0.5.0
|
|
33
|
+
Provides-Extra: all
|
|
34
|
+
Requires-Dist: accelerate>=0.25.0; extra == 'all'
|
|
35
|
+
Requires-Dist: black>=23.0.0; extra == 'all'
|
|
36
|
+
Requires-Dist: chromadb>=0.5.0; extra == 'all'
|
|
37
|
+
Requires-Dist: datasets>=2.14.0; extra == 'all'
|
|
38
|
+
Requires-Dist: faiss-cpu>=1.7.0; extra == 'all'
|
|
39
|
+
Requires-Dist: llama-cpp-python>=0.2.0; extra == 'all'
|
|
40
|
+
Requires-Dist: mypy>=1.0.0; extra == 'all'
|
|
41
|
+
Requires-Dist: pandas>=2.0.0; extra == 'all'
|
|
42
|
+
Requires-Dist: pre-commit>=3.0.0; extra == 'all'
|
|
43
|
+
Requires-Dist: pypdf>=3.0.0; extra == 'all'
|
|
44
|
+
Requires-Dist: ruff>=0.1.0; extra == 'all'
|
|
45
|
+
Requires-Dist: sentence-transformers>=2.2.0; extra == 'all'
|
|
46
|
+
Requires-Dist: torch>=2.0.0; extra == 'all'
|
|
47
|
+
Requires-Dist: tqdm>=4.66.0; extra == 'all'
|
|
48
|
+
Requires-Dist: transformers>=4.35.0; extra == 'all'
|
|
49
|
+
Provides-Extra: benchmark
|
|
50
|
+
Requires-Dist: datasets>=2.14.0; extra == 'benchmark'
|
|
51
|
+
Requires-Dist: pandas>=2.0.0; extra == 'benchmark'
|
|
52
|
+
Requires-Dist: tqdm>=4.66.0; extra == 'benchmark'
|
|
53
|
+
Provides-Extra: cuda
|
|
54
|
+
Requires-Dist: accelerate>=0.25.0; extra == 'cuda'
|
|
55
|
+
Requires-Dist: auto-gptq>=0.6.0; extra == 'cuda'
|
|
56
|
+
Requires-Dist: autoawq>=0.1.0; extra == 'cuda'
|
|
57
|
+
Requires-Dist: bitsandbytes>=0.41.0; extra == 'cuda'
|
|
58
|
+
Requires-Dist: optimum>=1.16.0; extra == 'cuda'
|
|
59
|
+
Requires-Dist: protobuf>=4.0.0; extra == 'cuda'
|
|
60
|
+
Requires-Dist: torch>=2.0.0; extra == 'cuda'
|
|
61
|
+
Requires-Dist: transformers>=4.35.0; extra == 'cuda'
|
|
62
|
+
Provides-Extra: dev
|
|
63
|
+
Requires-Dist: black>=23.0.0; extra == 'dev'
|
|
64
|
+
Requires-Dist: mypy>=1.0.0; extra == 'dev'
|
|
65
|
+
Requires-Dist: pre-commit>=3.0.0; extra == 'dev'
|
|
66
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
67
|
+
Provides-Extra: distributed
|
|
68
|
+
Requires-Dist: grpcio-tools>=1.50.0; extra == 'distributed'
|
|
69
|
+
Requires-Dist: grpcio>=1.50.0; extra == 'distributed'
|
|
70
|
+
Provides-Extra: local
|
|
71
|
+
Requires-Dist: accelerate>=0.25.0; extra == 'local'
|
|
72
|
+
Requires-Dist: llama-cpp-python>=0.2.0; extra == 'local'
|
|
73
|
+
Requires-Dist: torch>=2.0.0; extra == 'local'
|
|
74
|
+
Requires-Dist: transformers>=4.35.0; extra == 'local'
|
|
75
|
+
Provides-Extra: mlx
|
|
76
|
+
Requires-Dist: mlx-lm>=0.1.0; extra == 'mlx'
|
|
77
|
+
Provides-Extra: perception
|
|
78
|
+
Requires-Dist: markitdown>=0.0.1; extra == 'perception'
|
|
79
|
+
Provides-Extra: retrieval
|
|
80
|
+
Requires-Dist: chromadb>=0.5.0; extra == 'retrieval'
|
|
81
|
+
Requires-Dist: faiss-cpu>=1.7.0; extra == 'retrieval'
|
|
82
|
+
Requires-Dist: pypdf>=3.0.0; extra == 'retrieval'
|
|
83
|
+
Requires-Dist: sentence-transformers>=2.2.0; extra == 'retrieval'
|
|
84
|
+
Description-Content-Type: text/markdown
|
|
85
|
+
|
|
86
|
+
<div align="center">
|
|
87
|
+
|
|
88
|
+

|
|
89
|
+
|
|
90
|
+
**A cost-aware, local-first council of heterogeneous LLMs for reliable reasoning, coding, and factual correctness.**
|
|
91
|
+
|
|
92
|
+
[](https://pypi.org/project/parishad/)
|
|
93
|
+
[](https://pypi.org/project/parishad/)
|
|
94
|
+
[](LICENSE)
|
|
95
|
+
[](https://github.com/psf/black)
|
|
96
|
+
[](http://mypy-lang.org/)
|
|
97
|
+
|
|
98
|
+
</div>
|
|
99
|
+
|
|
100
|
+
---
|
|
101
|
+
|
|
102
|
+
## 📖 Overview
|
|
103
|
+
|
|
104
|
+
**Parishad** (Hindi: *Council*) orchestrates multiple local language models into a structured reasoning pipeline. Instead of relying on a single massive model, Parishad coordinates specialized, smaller models (2B-34B parameters) into functional roles based on traditional Indian administrative titles.
|
|
105
|
+
|
|
106
|
+
This approach achieves **higher reliability** and **lower latency** on consumer hardware (Mac Silicon, NVIDIA GPUs) by assigning tasks to the most appropriate model slot (Small/Medium/Big) tailored to your compute budget.
|
|
107
|
+
|
|
108
|
+
## ✨ Key Features
|
|
109
|
+
|
|
110
|
+
- **🏛️ Heterogeneous Model Council**: Seamlessly orchestrate varying model sizes (e.g., Phi-3 for syntax, Llama-3 for reasoning, Mixtral for judgment).
|
|
111
|
+
- **🖥️ Beautiful TUI Dashboard**: Real-time interactive terminal interface with visual role tracking, progress bars, and emoji indicators.
|
|
112
|
+
- **👁️ Vision Capabilities**: Integrated `PerceptionTool` allows the council to "see" and analyze images via local VLMs.
|
|
113
|
+
- **💰 Cost-Aware Execution**: Define token budgets per query; the council adapts its strategy to stay within limits.
|
|
114
|
+
- **🛡️ Structured Verification**: The `Prerak` (Checker) role actively challenges hallucinations using deterministic tools and cross-examination.
|
|
115
|
+
- **🔌 Local-First Backends**: Native support for **Ollama**, **Llama.cpp**, **MLX**, and **Transformers**.
|
|
116
|
+
|
|
117
|
+
## 🏛️ The Three Sabhas (Council Configurations)
|
|
118
|
+
|
|
119
|
+
Parishad offers three distinct council sizes to balance speed, cost, and depth:
|
|
120
|
+
|
|
121
|
+
### 1. Laghu Sabha (Fast Council)
|
|
122
|
+
* **Roles**: 5 (Darbari, Majumdar, Sainik, Prerak, Raja)
|
|
123
|
+
* **Use Case**: Quick queries, simple code generation, factual checks.
|
|
124
|
+
* **Models**: Optimized for Small/Mid models.
|
|
125
|
+
|
|
126
|
+
### 2. Madhya Sabha (Core Council)
|
|
127
|
+
* **Roles**: 8 (Adds **Sar Senapati** [Coordinator], **Sacheev** [Secretary], **Dandadhyaksha** [Magistrate])
|
|
128
|
+
* **Use Case**: Balanced reasoning for standard development tasks.
|
|
129
|
+
* **Models**: Uses Mid/Big models for deeper analysis.
|
|
130
|
+
|
|
131
|
+
### 3. Maha Sabha (Extended Council)
|
|
132
|
+
* **Roles**: 10 (Adds **Pantapradhan** [Prime Minister], **Vidushak** [Lateral Thinker])
|
|
133
|
+
* **Use Case**: Complex system design, creative problem solving, and strategic planning.
|
|
134
|
+
* **Models**: Full spectrum orchestration (Small + Mid + Big).
|
|
135
|
+
|
|
136
|
+
## 🏗️ Architecture
|
|
137
|
+
|
|
138
|
+
Parishad organizes models into a directed graph of roles:
|
|
139
|
+
|
|
140
|
+
```text
|
|
141
|
+
┌───────────┐ ┌───────────┐ ┌───────────┐
|
|
142
|
+
│ DARBARI │ ──► │ MAJUMDAR │ ──► │ SAINIK │
|
|
143
|
+
│ (Refiner) │ │ (Planner) │ │ (Worker) │
|
|
144
|
+
└───────────┘ └───────────┘ └───────────┘
|
|
145
|
+
│
|
|
146
|
+
▼
|
|
147
|
+
┌───────────┐ ┌───────────┐ ┌───────────┐
|
|
148
|
+
│ RAJA │ ◄── │ PRERAK │ ◄── │ SYSTEM & │
|
|
149
|
+
│ (Judge) │ │ (Checker) │ │ TOOLS │
|
|
150
|
+
└───────────┘ └───────────┘ └───────────┘
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
- **Darbari**: Normalizes vague user queries into structured task specifications.
|
|
154
|
+
- **Majumdar**: Decomposes complex tasks into stepwise execution plans.
|
|
155
|
+
- **Sainik**: Executes steps, writing code or generating content using tools.
|
|
156
|
+
- **Prerak**: Validates output against facts, schema, or safety policies.
|
|
157
|
+
- **Raja**: Synthesizes the final answer and assigns a confidence score.
|
|
158
|
+
|
|
159
|
+
## 📦 Installation
|
|
160
|
+
|
|
161
|
+
### Standard Install
|
|
162
|
+
```bash
|
|
163
|
+
pip install parishad
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### Optimized Install Options
|
|
167
|
+
Choose the backend that matches your hardware:
|
|
168
|
+
|
|
169
|
+
```bash
|
|
170
|
+
# Apple Silicon (M1/M2/M3) - Recommended
|
|
171
|
+
pip install "parishad[mlx]"
|
|
172
|
+
|
|
173
|
+
# NVIDIA GPU / Local Parsing
|
|
174
|
+
pip install "parishad[local,cuda]"
|
|
175
|
+
|
|
176
|
+
# With Retrieval (RAG) capabilities
|
|
177
|
+
pip install "parishad[retrieval]"
|
|
178
|
+
|
|
179
|
+
# For Developers (Linting, Testing)
|
|
180
|
+
pip install "parishad[dev]"
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
## 🚀 Quick Start
|
|
184
|
+
|
|
185
|
+
### 1. Launch & Setup
|
|
186
|
+
Run the main command to initialize the council. This automatically handles permissions, scans for models (Ollama/HF), and launches the TUI.
|
|
187
|
+
|
|
188
|
+
```bash
|
|
189
|
+
parishad
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
**Pro Tip:** Use `/sabha` to switch between council sizes (e.g., `laghu` for speed, `maha` for deep reasoning) and `/scan` to find new local models.
|
|
193
|
+
|
|
194
|
+
### 2. CLI Direct Execution
|
|
195
|
+
Run a specific task without entering the interactive shell.
|
|
196
|
+
|
|
197
|
+
```bash
|
|
198
|
+
parishad run --sabha madhyam "Analyze the 'data.csv' file and plot trends"
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### 3. Python API
|
|
202
|
+
Integrate Parishad into your own applications.
|
|
203
|
+
|
|
204
|
+
```python
|
|
205
|
+
from parishad import Parishad
|
|
206
|
+
|
|
207
|
+
# Initialize the council
|
|
208
|
+
council = Parishad(config="core")
|
|
209
|
+
|
|
210
|
+
# Execute a complex query
|
|
211
|
+
result = council.run("Design a scalable REST API architecture for a bookstore.")
|
|
212
|
+
|
|
213
|
+
print(f"Final Answer: {result.final_answer}")
|
|
214
|
+
print(f"Confidence: {result.confidence_score}/10")
|
|
215
|
+
# Full execution trace available in result.trace
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
## 🎮 Interactive Features
|
|
219
|
+
|
|
220
|
+
Unlock the full power of the council with these TUI capabilities:
|
|
221
|
+
|
|
222
|
+
### ⚡ Slash Commands
|
|
223
|
+
Control the session without leaving the chat interface:
|
|
224
|
+
- **`/sabha [mode]`**: Switch council configuration instantly (e.g., `/sabha laghu` for speed, `/sabha maha` for power).
|
|
225
|
+
- **`/scan`**: Re-scan your system for newly downloaded models.
|
|
226
|
+
- **`/roles`**: View currently active roles, their models, and slot assignments.
|
|
227
|
+
- **`/history`**: View past queries and load previous traces.
|
|
228
|
+
- **`/clear`**: Reset the conversation context.
|
|
229
|
+
|
|
230
|
+
### 📎 Context Awareness (@ Mentions)
|
|
231
|
+
Parishad allows you to "chat with your files". Directly reference local files in your query:
|
|
232
|
+
- **`@path/to/file.py`**: Reads the file and adds it to the context.
|
|
233
|
+
- **`@src/`**: Scans the directory structure.
|
|
234
|
+
|
|
235
|
+
Example:
|
|
236
|
+
> *"Analyze @src/main.py and refactor the ErrorHandler class."*
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
## 🤝 Contributing
|
|
240
|
+
|
|
241
|
+
Contributions are welcome! Please follow these steps:
|
|
242
|
+
|
|
243
|
+
1. Fork the repository.
|
|
244
|
+
2. Install dev dependencies: `pip install -e ".[dev]"`
|
|
245
|
+
3. Ensure code style compliance: `black . && ruff check .`
|
|
246
|
+
4. Submit a Pull Request.
|
|
247
|
+
|
|
248
|
+
## 📄 License
|
|
249
|
+
|
|
250
|
+
This project is licensed under the [MIT License](LICENSE).
|
|
251
|
+
|
|
252
|
+
---
|
|
253
|
+
|
|
254
|
+
<div align="center">
|
|
255
|
+
<sub>Built with ❤️ by <a href="https://github.com/ommo007">Om Mohite</a> & <a href="https://github.com/Ashiiish-88">Ashish Prajapati</a>. Trusted by developers for local agentic workflows.</sub>
|
|
256
|
+
</div>
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
parishad/__init__.py,sha256=S-7Gzx7ahrOEsv93_xrRl5zExewYeFyDhKmPQCRm47g,1199
|
|
2
|
+
parishad/__main__.py,sha256=0qrXs91zon0xM17c5HTrkiSBZwiRfMJV2jOo0qP7MJ8,146
|
|
3
|
+
parishad/checker/__init__.py,sha256=m-Vl5emTwJWumsIbLY7g5pslU9m1uTcJNAxnGJPbkJU,540
|
|
4
|
+
parishad/checker/deterministic.py,sha256=reXH8aew_pBGn85MRMQcHRzi6fWU_JaGniimYUqsly0,20477
|
|
5
|
+
parishad/checker/ensemble.py,sha256=ozJwmQFNY4ojcYB3I0o5-3xUk43tdDqHm-jGVgO6Bhg,17506
|
|
6
|
+
parishad/checker/retrieval.py,sha256=jQ5Zf38Y1rcBrhmAOyNs4sTHFwF98Iqugsw1rGLHPks,16974
|
|
7
|
+
parishad/cli/__init__.py,sha256=iI_jctu7iGt7SZMfdkAQ75eFORLjy4soWC9DMzV8WaI,67
|
|
8
|
+
parishad/cli/code.py,sha256=P3sLqIXNLH_9cfxHZU-doPygCg_0KgffWwxDMpDMxdA,118649
|
|
9
|
+
parishad/cli/main.py,sha256=vmCV1O1u5fqWmMIjjzECeMOgUayGoI8n_oCaRgmf_Nw,38267
|
|
10
|
+
parishad/cli/prarambh.py,sha256=4vvRzJHWC6ECPdL4IKv5laZDy4v3ZYDq4vjqu3Hll30,4296
|
|
11
|
+
parishad/cli/sthapana.py,sha256=Sxk-TA-WCKW2CSPBQPhhEgaON60HKVDUaMorFjc-BNQ,13534
|
|
12
|
+
parishad/config/modes.py,sha256=eh4B5ZUUPN6HqYAj5hLudTqkyxBHKhIsx_Q_BvSduGk,4066
|
|
13
|
+
parishad/config/pipeline.core.yaml,sha256=NRi7-xeTvsBPhknT1OUPqURHVXB8uQmEfNTdTdYAhQw,2960
|
|
14
|
+
parishad/config/pipeline.extended.yaml,sha256=gnrN-Tai7oxpFw38WNVptWcb-tS-7gZCT_NBvRjKKaQ,4187
|
|
15
|
+
parishad/config/pipeline.fast.yaml,sha256=VzGFr480bjE96MqYn5Vwr_c6vHlzePGKPV3MSaW5XwY,2050
|
|
16
|
+
parishad/config/user_config.py,sha256=aKgiILWKDc3_OYaZz-u9a4Pm1PP1EULeX-q6KqMbcho,3165
|
|
17
|
+
parishad/data/catalog.py,sha256=hnhyHlAuETzXf1hlP_snXYhjq2nZG9Ab2OGXeMyYlDY,4623
|
|
18
|
+
parishad/data/models.json,sha256=oClNTo90Avt80n4AhImrZWQO2Uhx1RJanuD1q5_s-tY,19388
|
|
19
|
+
parishad/memory/__init__.py,sha256=AJEa0RNcqjep_3mkYniuhK-ZUj1mshCk0rPTrCQrxhY,2687
|
|
20
|
+
parishad/models/__init__.py,sha256=v0CqeJ69qwkUw4nC63VH6QOx4v8Mce1_akb-1VvzTa4,3682
|
|
21
|
+
parishad/models/costs.py,sha256=2ngHV1HsDkuUsTK8QCJGqXlJUWOlVVgoO72K8oJ0ZrA,11393
|
|
22
|
+
parishad/models/downloader.py,sha256=MIILM4je7UUzb56qO4iReTJQtI8SoaN4teeQAjRWxXw,55248
|
|
23
|
+
parishad/models/optimizations.py,sha256=Ww4jUIdZ4-6OPzA-jqktE_J5dfp5OKawErBLwLoE27Q,25789
|
|
24
|
+
parishad/models/profiles.py,sha256=E7gXpzxxX1k8r8cmBBeLc0zQMD8KbSurMfzLNXDyS2A,19504
|
|
25
|
+
parishad/models/reliability.py,sha256=gUKunkPM4GW0akO5RFtIwhGMKYCXAXdhrya7XAQyjzQ,26392
|
|
26
|
+
parishad/models/runner.py,sha256=J6WEpVvBASJwa7GDF3sUPwjWycHcIkTV4Cti7Cx5BEM,21239
|
|
27
|
+
parishad/models/tokenization.py,sha256=mTl0QhLMSlSjjHzZccbvgPhRJJ6D36SrYTNEEL8AlZU,7305
|
|
28
|
+
parishad/models/backends/__init__.py,sha256=6j9TxZRa3ycaHVYEUDMWLFpoZms9mprgxW4dQfFp9vo,6696
|
|
29
|
+
parishad/models/backends/base.py,sha256=iLGkeANbkpQwOBh8zmW0uGORUHSOQkklSzrIUkfW2vE,5278
|
|
30
|
+
parishad/models/backends/huggingface.py,sha256=2NTF_LG1rQawnzAZ6Qnd1k9pJpUtVBHTjpgLySZeMvM,11051
|
|
31
|
+
parishad/models/backends/llama_cpp.py,sha256=FBGzYoBszC4DYYQ_m7EQLUZQ_u6X1GsUuPMWdE2zvko,7902
|
|
32
|
+
parishad/models/backends/mlx_lm.py,sha256=QOFuU4P2NjDjdnZFNG8st6D9VC6nzSLD-g81nQzmxqQ,3901
|
|
33
|
+
parishad/models/backends/ollama.py,sha256=nyk7rxDXn6Cw03S6Z9Cm3SxaZp09m-1qf6JE1TGBeJc,8608
|
|
34
|
+
parishad/models/backends/openai_api.py,sha256=9CmBvhQEcPX0R1RtQnHWVcTdH9wMT6Pu1acF2vy8C2o,6041
|
|
35
|
+
parishad/models/backends/transformers_hf.py,sha256=z-nw18yY62BorAawrand59GgD_0dmnYqibQGcP6sadQ,6619
|
|
36
|
+
parishad/orchestrator/__init__.py,sha256=a52VEdW_98XwD3FseRu_9_sKagFs6z5PtXS_o4j2fbs,413
|
|
37
|
+
parishad/orchestrator/config_loader.py,sha256=wXTz9AWJWeminVWz3YR5jxhne5lCw-JjYvvQGbvOyNg,6975
|
|
38
|
+
parishad/orchestrator/engine.py,sha256=WAKcHqG20tRqt-Hg62iR-TOswmgc0Y0OclSuSlZe0lA,45644
|
|
39
|
+
parishad/orchestrator/exceptions.py,sha256=jLNkoKvhuKny2upGv04L7Dj4SAi0umVRC0RAukX5BBM,391
|
|
40
|
+
parishad/roles/__init__.py,sha256=OF8Zb-yNP9nX0gS9gnYkt0QoG_KyHYwtw_OiWplFnxc,1171
|
|
41
|
+
parishad/roles/base.py,sha256=u5nipc3S6Bv86eAYbntYkFj3AszzwmZ74BVukMdBMjo,24866
|
|
42
|
+
parishad/roles/dandadhyaksha.py,sha256=c3X0IYVGbzlBtsyskxIIU8PQgKZBNOpKPtj9tS0R8qE,5040
|
|
43
|
+
parishad/roles/darbari.py,sha256=k3V-tc5BwVLs_U9EpEphSHI525ZWJRB9pc0eCuXws3w,9283
|
|
44
|
+
parishad/roles/majumdar.py,sha256=-suvC6Rly3lGjn5_XraYc-1eOCYyb7HeSVAkq6AX_NY,9763
|
|
45
|
+
parishad/roles/pantapradhan.py,sha256=Hko47GQdbb561EHhFpWXrIIHw4bCHz8vu8sUtQmlicA,5025
|
|
46
|
+
parishad/roles/prerak.py,sha256=cpXPqibt1CT_Sz1q5F_m9mZKZVAGixPsIYtg3Vnwmo8,11879
|
|
47
|
+
parishad/roles/raja.py,sha256=iliR3v4YHnwSPPU1_GZUkNiTKgcoZ9EoiWJng_pfdBU,12096
|
|
48
|
+
parishad/roles/sacheev.py,sha256=terhWEEB_Zhti2Wb3LAK2aOYk8sFE72Q9cy7CfTor6I,6594
|
|
49
|
+
parishad/roles/sainik.py,sha256=a0EDIFEX4-KF3k0KQEKIhm2MYPXjcjjshhwY_tgJcp4,15403
|
|
50
|
+
parishad/roles/sar_senapati.py,sha256=XT7pPTYoJorusay1_v2Sk5-GgZX2auwRmJegLOvJGWA,5413
|
|
51
|
+
parishad/roles/vidushak.py,sha256=f6lmtZrIbAO97RMG2NQYSm14JbqguBA7ukOT-n6i0cM,2018
|
|
52
|
+
parishad/tools/__init__.py,sha256=Ni_RlR4AnOFIBivbM-sfpRdZI0Tpu9MXkbs3n4Ngx8I,279
|
|
53
|
+
parishad/tools/base.py,sha256=3uwWdSj1RRvbGCJpdBMHQ0LJx8fnE-d8uHQmDzkyqWk,1676
|
|
54
|
+
parishad/tools/fs.py,sha256=7OiCm2KNnq6hnZCMoMEbompA_TUYCbu1kg-8ZgwhXZ0,4427
|
|
55
|
+
parishad/tools/perception.py,sha256=Q0snYwzWCZbSqi8qcnDhxoZGx-n74cYRCJyS2ajt4wQ,3089
|
|
56
|
+
parishad/tools/retrieval.py,sha256=WGoji7n5pKgww3iBNN68UelSCOePxLIf33hQXu3RGlA,2472
|
|
57
|
+
parishad/tools/shell.py,sha256=RFESJun8TmaB97yaWl9LpsTrr-flR0EoqTs1wast7Xc,3258
|
|
58
|
+
parishad/utils/__init__.py,sha256=vRipAnXqboRXUMY2RvUuYVemwKxUDxgovjyyazvTYbo,218
|
|
59
|
+
parishad/utils/hardware.py,sha256=UngDKW8g-8ztEnCKiYTRX5AZBhoopts_n7nH_hSk3fU,3475
|
|
60
|
+
parishad/utils/logging.py,sha256=TT16YB6cOhBPyo9a6dZRww3SjStrZKihdtq9hwqBXJo,2144
|
|
61
|
+
parishad/utils/scanner.py,sha256=8wxaNgH3i_T4AdyBuLr9L4KcQ_AORguA6xvnOIyem8k,5841
|
|
62
|
+
parishad/utils/text.py,sha256=S_3Ox4T3C87XfyXdR2b3JMatpCiOozaqPUbVic7OIFM,1617
|
|
63
|
+
parishad/utils/tracing.py,sha256=x35BmMO9M83dVCy73kYqkOFE4zKMrUFe_RuV8qIWJaM,4304
|
|
64
|
+
parishad-0.1.0.dist-info/METADATA,sha256=ROTQw7pjwqJCYUzxl27LOLxv4RBB3GZtEzxh75dlFCo,11021
|
|
65
|
+
parishad-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
66
|
+
parishad-0.1.0.dist-info/entry_points.txt,sha256=cCF4Bg5sLxlLMJhnOnWNua3XYzAGlL5ri-55y0fWPek,51
|
|
67
|
+
parishad-0.1.0.dist-info/licenses/LICENSE,sha256=Xow-fDHX9pzrvBkPHImvQa-Uc1g6BDbz9IE4jPfB6D0,1073
|
|
68
|
+
parishad-0.1.0.dist-info/RECORD,,
|