parishad 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. parishad/__init__.py +70 -0
  2. parishad/__main__.py +10 -0
  3. parishad/checker/__init__.py +25 -0
  4. parishad/checker/deterministic.py +644 -0
  5. parishad/checker/ensemble.py +496 -0
  6. parishad/checker/retrieval.py +546 -0
  7. parishad/cli/__init__.py +6 -0
  8. parishad/cli/code.py +3254 -0
  9. parishad/cli/main.py +1158 -0
  10. parishad/cli/prarambh.py +99 -0
  11. parishad/cli/sthapana.py +368 -0
  12. parishad/config/modes.py +139 -0
  13. parishad/config/pipeline.core.yaml +128 -0
  14. parishad/config/pipeline.extended.yaml +172 -0
  15. parishad/config/pipeline.fast.yaml +89 -0
  16. parishad/config/user_config.py +115 -0
  17. parishad/data/catalog.py +118 -0
  18. parishad/data/models.json +108 -0
  19. parishad/memory/__init__.py +79 -0
  20. parishad/models/__init__.py +181 -0
  21. parishad/models/backends/__init__.py +247 -0
  22. parishad/models/backends/base.py +211 -0
  23. parishad/models/backends/huggingface.py +318 -0
  24. parishad/models/backends/llama_cpp.py +239 -0
  25. parishad/models/backends/mlx_lm.py +141 -0
  26. parishad/models/backends/ollama.py +253 -0
  27. parishad/models/backends/openai_api.py +193 -0
  28. parishad/models/backends/transformers_hf.py +198 -0
  29. parishad/models/costs.py +385 -0
  30. parishad/models/downloader.py +1557 -0
  31. parishad/models/optimizations.py +871 -0
  32. parishad/models/profiles.py +610 -0
  33. parishad/models/reliability.py +876 -0
  34. parishad/models/runner.py +651 -0
  35. parishad/models/tokenization.py +287 -0
  36. parishad/orchestrator/__init__.py +24 -0
  37. parishad/orchestrator/config_loader.py +210 -0
  38. parishad/orchestrator/engine.py +1113 -0
  39. parishad/orchestrator/exceptions.py +14 -0
  40. parishad/roles/__init__.py +71 -0
  41. parishad/roles/base.py +712 -0
  42. parishad/roles/dandadhyaksha.py +163 -0
  43. parishad/roles/darbari.py +246 -0
  44. parishad/roles/majumdar.py +274 -0
  45. parishad/roles/pantapradhan.py +150 -0
  46. parishad/roles/prerak.py +357 -0
  47. parishad/roles/raja.py +345 -0
  48. parishad/roles/sacheev.py +203 -0
  49. parishad/roles/sainik.py +427 -0
  50. parishad/roles/sar_senapati.py +164 -0
  51. parishad/roles/vidushak.py +69 -0
  52. parishad/tools/__init__.py +7 -0
  53. parishad/tools/base.py +57 -0
  54. parishad/tools/fs.py +110 -0
  55. parishad/tools/perception.py +96 -0
  56. parishad/tools/retrieval.py +74 -0
  57. parishad/tools/shell.py +103 -0
  58. parishad/utils/__init__.py +7 -0
  59. parishad/utils/hardware.py +122 -0
  60. parishad/utils/logging.py +79 -0
  61. parishad/utils/scanner.py +164 -0
  62. parishad/utils/text.py +61 -0
  63. parishad/utils/tracing.py +133 -0
  64. parishad-0.1.0.dist-info/METADATA +256 -0
  65. parishad-0.1.0.dist-info/RECORD +68 -0
  66. parishad-0.1.0.dist-info/WHEEL +4 -0
  67. parishad-0.1.0.dist-info/entry_points.txt +2 -0
  68. parishad-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,99 @@
1
+ """
2
+ Parishad Prarambh: The Launch Command.
3
+ Starts the council chat interface using the saved configuration.
4
+ """
5
+ import sys
6
+ from pathlib import Path
7
+ from rich.console import Console
8
+ from parishad import Parishad
9
+
10
+ console = Console()
11
+ CONFIG_PATH = Path("models.yaml")
12
+
13
+ def main():
14
+ console.print("[bold green]Parishad Prarambh[/bold green] - Starting the Council...")
15
+
16
+ if not CONFIG_PATH.exists():
17
+ console.print("[red]Error: Configuration file 'models.yaml' not found.[/red]")
18
+ console.print("Please run [bold cyan]parishad sthapana[/bold cyan] first to set up your council.")
19
+ sys.exit(1)
20
+
21
+ try:
22
+ # Initialize Parishad with the config
23
+ council = Parishad(
24
+ config="core", # Or extended, depending on Sabha choice?
25
+ # Actually engine determines extended/core based on roles used?
26
+ # The engine default is 'core'. We might need to store this in config too?
27
+ # For now, let's default to 'core' or check roles.
28
+ model_config_path=str(CONFIG_PATH),
29
+ trace_dir="traces"
30
+ )
31
+
32
+ console.print("[dim]Council assembled. Ready for queries.[/dim]")
33
+ console.print("Type 'exit' or 'quit' to leave.\n")
34
+
35
+ while True:
36
+ try:
37
+ query = console.input("[bold cyan]You > [/bold cyan]")
38
+ if query.lower() in ("exit", "quit"):
39
+ break
40
+ if not query.strip():
41
+ continue
42
+
43
+ console.print("\n[dim]The Council is deliberating...[/dim]")
44
+
45
+ trace = council.run(query)
46
+
47
+ if trace.final_answer:
48
+ console.print("\n[bold green]Council Verdict:[/bold green]")
49
+ # Parse the output if it is JSON (PRP returns JSON string)
50
+ raw_answer = trace.final_answer.final_answer
51
+ import json
52
+ import re
53
+
54
+ # Clean markdown code blocks
55
+ cleaned = re.sub(r'^```json\s*', '', raw_answer, flags=re.MULTILINE)
56
+ cleaned = re.sub(r'^```\s*', '', cleaned, flags=re.MULTILINE)
57
+ cleaned = re.sub(r'\s*```$', '', cleaned, flags=re.MULTILINE)
58
+
59
+ try:
60
+ parsed = json.loads(cleaned)
61
+ except json.JSONDecodeError:
62
+ # Fallback for trailing commas or python-style dicts
63
+ try:
64
+ import ast
65
+ parsed = ast.literal_eval(cleaned)
66
+ except (ValueError, SyntaxError):
67
+ console.print(raw_answer)
68
+ continue
69
+
70
+ if isinstance(parsed, dict) and ("final_answer" in parsed or "final_answer" in str(parsed)):
71
+ # Handle potential key mismatch or direct access
72
+ answer = parsed.get("final_answer")
73
+ if answer:
74
+ console.print(answer)
75
+
76
+ # Also print code block if present
77
+ if parsed.get("answer_type") == "code" and "code_block" in parsed:
78
+ from rich.syntax import Syntax
79
+ console.print(Syntax(parsed["code_block"], "python", theme="monokai", line_numbers=True))
80
+ else:
81
+ console.print(raw_answer)
82
+ else:
83
+ console.print("\n[red]The Council could not reach a verdict.[/red]")
84
+
85
+ console.print(f"\n[dim]Tokens: {trace.total_tokens} | Latency: {trace.total_latency_ms}ms[/dim]\n")
86
+
87
+ except KeyboardInterrupt:
88
+ break
89
+ except Exception as e:
90
+ console.print(f"\n[red]Error:[/red] {e}")
91
+
92
+ except Exception as e:
93
+ console.print(f"[red]Failed to initialize council:[/red] {e}")
94
+ sys.exit(1)
95
+
96
+ console.print("\n[bold]Council Adjourned.[/bold]")
97
+
98
+ if __name__ == "__main__":
99
+ main()
@@ -0,0 +1,368 @@
1
+ """
2
+ Parishad Sthapana (Enhanced): The Council Setup Wizard v3.
3
+ Features: Dashboard, Local-First Auto-Config, Smart Allocator, Claude-Code Tier TUI.
4
+ """
5
+ import sys
6
+ import time
7
+ import yaml
8
+ from pathlib import Path
9
+ from typing import List, Dict, Optional, Tuple
10
+
11
+ import rich
12
+ from rich.console import Console, Group
13
+ from rich.prompt import Prompt, Confirm
14
+ from rich.panel import Panel
15
+ from rich.table import Table
16
+ from rich.progress import (
17
+ Progress, SpinnerColumn, BarColumn, TextColumn,
18
+ DownloadColumn, TransferSpeedColumn, TimeRemainingColumn
19
+ )
20
+ from rich import box
21
+ from rich.layout import Layout
22
+ from rich.text import Text
23
+ from rich.align import Align
24
+
25
+ from ..utils.hardware import get_hardware_stats, HardwareStats
26
+ from ..data.catalog import SABHAS, MODELS, ModelEntry, SabhaConfig
27
+ from ..models.downloader import ModelManager, DownloadProgress, ModelInfo
28
+ from ..cli.prarambh import main as run_prarambh
29
+
30
+ console = Console()
31
+ DEFAULT_CONFIG_PATH = Path("models.yaml")
32
+
33
+ # --- VISUALS ---
34
+
35
+ def print_dashboard(stats: HardwareStats, manager: ModelManager):
36
+ """Print the "Control Center" dashboard."""
37
+ console.clear()
38
+
39
+ # 1. Hardware Panel
40
+ hw_table = Table.grid(padding=(0, 2), expand=True)
41
+ hw_table.add_column(style="cyan", justify="left", ratio=1)
42
+ hw_table.add_column(style="white", justify="right", ratio=2)
43
+ hw_table.add_row("OS/Arch", f"{stats.os} ({stats.arch})")
44
+ hw_table.add_row("RAM", f"{stats.ram_total_gb} GB")
45
+ gpu = stats.gpu_name or "None"
46
+ if stats.gpu_vram_gb: gpu += f" ({stats.gpu_vram_gb} GB)"
47
+ hw_table.add_row("GPU", gpu)
48
+
49
+ tier_style = {"entry": "yellow", "mid": "blue", "high": "green"}
50
+ tier_txt = f"[{tier_style[stats.tier]}] {stats.tier.upper()} TIER [/]"
51
+
52
+ # 2. Local Library Panel
53
+ local_models = manager.list_models()
54
+ lib_table = Table.grid(padding=(0, 2), expand=True)
55
+ lib_table.add_column(style="magenta", justify="left", ratio=1)
56
+ lib_table.add_column(style="white", justify="right", ratio=2)
57
+ lib_table.add_row("Installed Models", str(len(local_models)))
58
+
59
+ sources = set(m.source.value for m in local_models)
60
+ source_txt = ", ".join(sources) if sources else "[dim]None[/dim]"
61
+ lib_table.add_row("Sources", source_txt)
62
+
63
+ # Grid Layout
64
+ grid = Table.grid(expand=True, padding=2)
65
+ grid.add_column(ratio=1)
66
+ grid.add_column(ratio=1)
67
+
68
+ p_hw = Panel(
69
+ Group(
70
+ Align.center(tier_txt),
71
+ hw_table
72
+ ),
73
+ title="[bold]System Hardware[/bold]",
74
+ border_style="cyan",
75
+ padding=(1, 2)
76
+ )
77
+
78
+ p_lib = Panel(
79
+ lib_table,
80
+ title="[bold]Local Library[/bold]",
81
+ border_style="magenta",
82
+ padding=(1, 2)
83
+ )
84
+
85
+ grid.add_row(p_hw, p_lib)
86
+
87
+ console.print(Panel(
88
+ Align.center("[bold magenta]PARISHAD STHAPANA[/bold magenta] [dim]v3[/dim]"),
89
+ box=box.DOUBLE_EDGE,
90
+ border_style="magenta"
91
+ ))
92
+ console.print(grid)
93
+
94
+ if stats.is_apple_silicon:
95
+ console.print(Align.center("[dim]⚡ Apple Silicon Optimization Active (MLX)[/dim]"))
96
+ console.print()
97
+
98
+ # --- LOGIC ---
99
+
100
+ def suggest_configuration_from_local(local_models: List[ModelInfo], mode: str, tier: str) -> Optional[Dict[str, ModelEntry]]:
101
+ """Try to auto-allocate local models to slots."""
102
+ if not local_models:
103
+ return None
104
+
105
+ # Heuristic: Sort by size (bytes)
106
+ # This is rough but generally bigger file = smarter model
107
+ sorted_models = sorted(local_models, key=lambda m: m.size_bytes, reverse=True)
108
+
109
+ # Create pseudo catalog entries for local models so rest of logic works
110
+ candidates = []
111
+ for m in sorted_models:
112
+ # Infer RAM usage roughly (size in GB + overhead)
113
+ size_gb = m.size_bytes / (1024**3)
114
+ min_ram = int(size_gb * 1.2)
115
+
116
+ # Determine backend from format
117
+ backend = "transformers"
118
+ if m.format.value == "gguf":
119
+ backend = "llama_cpp"
120
+ elif m.format.value == "ollama":
121
+ backend = "ollama"
122
+
123
+ candidates.append(ModelEntry(
124
+ name=m.name,
125
+ backend=backend, # Correctly mapped backend
126
+ model_id=m.name, # Use registry name as ID
127
+ min_ram_gb=min_ram,
128
+ description="Locally installed model",
129
+ hw_tags=["cpu", "cuda", "mlx"] if m.format.value=="gguf" else ["cuda"]
130
+ ))
131
+
132
+ mapping = {}
133
+
134
+ if mode == "single":
135
+ best = candidates[0]
136
+ mapping = {"small": best, "mid": best, "big": best}
137
+
138
+ elif mode == "dual":
139
+ if len(candidates) < 2: return None
140
+ big = candidates[0]
141
+ mid = candidates[1]
142
+ mapping = {"small": mid, "mid": mid, "big": big}
143
+
144
+ elif mode == "triple":
145
+ if len(candidates) < 3: return None
146
+ big = candidates[0]
147
+ mid = candidates[1]
148
+ small = candidates[-1] # Smallest
149
+ mapping = {"small": small, "mid": mid, "big": big}
150
+
151
+ return mapping
152
+
153
+ def pick_model_smart(prompt: str, tier: str, hw_stats: HardwareStats, manager: ModelManager) -> ModelEntry:
154
+ """Smart selection table mixing Local and Marketplace."""
155
+
156
+ # 1. Market Candidates
157
+ candidates = MODELS.get(tier, MODELS["entry"])
158
+ preferred_tag = "mlx" if hw_stats.is_apple_silicon else "cuda" if hw_stats.gpu_name else "cpu"
159
+
160
+ market_primary = [m for m in candidates if preferred_tag in m.hw_tags]
161
+ market_secondary = [m for m in candidates if preferred_tag not in m.hw_tags and "cpu" in m.hw_tags]
162
+ market_list = market_primary + market_secondary
163
+
164
+ # 2. Local Candidates (that match heuristic for this slot?)
165
+ # For simplicity, we show relevant ones.
166
+ # Actually, let's just show the Market list but mark if installed.
167
+ # AND add any "Other Local" option? No, stick to curated for stability + "Custom Local"
168
+
169
+ console.print(f"\n[bold underline]{prompt}[/bold underline]")
170
+
171
+ table = Table(box=box.SIMPLE_HEAD)
172
+ table.add_column("ID", style="dim", width=3)
173
+ table.add_column("Model Name", style="bold")
174
+ table.add_column("Type", width=8)
175
+ table.add_column("Status", width=12)
176
+ table.add_column("Description")
177
+
178
+ options = []
179
+
180
+ # Process Market Options
181
+ for i, m in enumerate(market_list):
182
+ options.append(m)
183
+ path = manager.get_model_path(m.model_id)
184
+ status = "[green]✓ Installed[/green]" if path and path.exists() else "[dim]Download[/dim]"
185
+ table.add_row(str(i+1), m.name, m.backend, status, m.description)
186
+
187
+ # Process "Use Any Local" option?
188
+ # Maybe listing all local files is too messy if user has 50.
189
+ # Instead, we check if there are other locals not in catalog?
190
+ # For now, simplistic approach: Just catalog + status integration.
191
+
192
+ console.print(table)
193
+
194
+ # Choice logic
195
+ while True:
196
+ choice = Prompt.ask("Select Model", choices=[str(i) for i in range(1, len(options)+1)], default="1")
197
+ try:
198
+ return options[int(choice)-1]
199
+ except (ValueError, IndexError):
200
+ console.print("[red]Invalid selection[/red]")
201
+
202
+
203
+ def resolve_slots_ui(mode: str, tier: str, hw_stats: HardwareStats, manager: ModelManager) -> Dict[str, ModelEntry]:
204
+ selected = {}
205
+
206
+ display_mode = {"single": "Single Mode", "dual": "Dual Mode", "triple": "Triple Mode"}
207
+ console.print(f"\n[bold cyan]Configuring: {display_mode.get(mode, mode)}[/bold cyan]")
208
+
209
+ if mode == "single":
210
+ m = pick_model_smart("Select Main Model (Shared)", tier, hw_stats, manager)
211
+ selected = {"small": m, "mid": m, "big": m}
212
+
213
+ elif mode == "dual":
214
+ mid = pick_model_smart("Select Worker Model", tier, hw_stats, manager)
215
+ big = pick_model_smart("Select Planner Model", tier, hw_stats, manager)
216
+ selected = {"small": mid, "mid": mid, "big": big}
217
+
218
+ elif mode == "triple":
219
+ if tier == "entry":
220
+ # Entry tier triple mode forces small models
221
+ t_small, t_mid, t_big = "entry", "entry", "entry"
222
+ else:
223
+ t_small, t_mid, t_big = "entry", "mid", "high"
224
+
225
+ small = pick_model_smart("Select Fast Model (Refiner)", t_small, hw_stats, manager)
226
+ mid = pick_model_smart("Select Worker Model", t_mid, hw_stats, manager)
227
+ big = pick_model_smart("Select Planner Model", t_big, hw_stats, manager)
228
+ selected = {"small": small, "mid": mid, "big": big}
229
+
230
+ return selected
231
+
232
+ def download_phase(selected: Dict[str, ModelEntry], manager: ModelManager):
233
+ """Refined download phase."""
234
+ unique = {m.model_id: m for m in selected.values()}
235
+
236
+ to_download = []
237
+ for m in unique.values():
238
+ p = manager.get_model_path(m.model_id)
239
+ if not p or not p.exists():
240
+ to_download.append(m)
241
+
242
+ if not to_download:
243
+ console.print("\n[green]All models available locally. Skipping download.[/green]")
244
+ return
245
+
246
+ console.print(f"\n[bold yellow]Initiating Download ({len(to_download)} models)[/bold yellow]")
247
+
248
+ progress = Progress(
249
+ SpinnerColumn(),
250
+ TextColumn("[bold blue]{task.fields[name]}"),
251
+ BarColumn(),
252
+ "[progress.percentage]{task.percentage:>3.0f}%",
253
+ DownloadColumn(),
254
+ TransferSpeedColumn(),
255
+ console=console
256
+ )
257
+
258
+ with progress:
259
+ for m in to_download:
260
+ tid = progress.add_task("download", name=m.name, total=None)
261
+
262
+ def cb(p: DownloadProgress):
263
+ progress.update(tid, total=p.total_bytes, completed=p.downloaded_bytes)
264
+
265
+ try:
266
+ # Map backend
267
+ src = "auto"
268
+ if m.backend == "transformers": src = "huggingface"
269
+ elif m.backend == "ollama": src = "ollama"
270
+
271
+ manager.download(m.model_id, source=src, progress_callback=cb)
272
+ progress.update(tid, description=f"[green]✓ {m.name}[/green]")
273
+ except Exception as e:
274
+ progress.update(tid, description=f"[red]✗ {m.name} Failed[/red]")
275
+ console.print(f"[red]Error downloading {m.name}: {e}[/red]")
276
+
277
+
278
+ def main():
279
+ # 0. Init
280
+ manager = ModelManager()
281
+ console.print("[dim]Scanning local library...[/dim]")
282
+ manager.scan_for_models()
283
+ removed = manager.registry.verify_integrity()
284
+ if removed > 0:
285
+ console.print(f"[dim]Pruned {removed} invalid entries from registry.[/dim]")
286
+
287
+ stats = get_hardware_stats()
288
+
289
+ # 1. Dashboard
290
+ print_dashboard(stats, manager)
291
+ if not Confirm.ask("Begin Setup?", default=True): return
292
+
293
+ # 2. Sabha
294
+ console.print("\n[bold]1. Council Configuration[/bold]")
295
+ for key, s in SABHAS.items():
296
+ console.print(f"[cyan]{s.name}[/cyan] ({len(s.roles)} Roles): {s.description}")
297
+
298
+ sabha = SABHAS[Prompt.ask("Choice", choices=["laghu", "mantri", "maha"], default="laghu")]
299
+
300
+ # 3. Strategy
301
+ console.print("\n[bold]2. Model Strategy[/bold]")
302
+ console.print(Panel(
303
+ "[bold]Single Mode:[/bold] 1 Shared Model (Simple, Less Memory)\n"
304
+ "[bold]Dual Mode:[/bold] 1 Mid (Worker) + 1 Heavy (Planner/Judge)\n"
305
+ "[bold]Triple Mode:[/bold] 1 Light (Refiner) + 1 Mid (Worker) + 1 Heavy (Planner)",
306
+ title="Strategies", border_style="cyan"
307
+ ))
308
+ mode = Prompt.ask("Strategy", choices=["single", "dual", "triple"], default="triple" if stats.tier == "high" else "single")
309
+
310
+ # 4. Auto-Detect / Allocation
311
+ local_inventory = manager.list_models()
312
+ suggestion = None
313
+
314
+ if len(local_inventory) >= (3 if mode=="triple" else 1):
315
+ suggestion = suggest_configuration_from_local(local_inventory, mode, stats.tier)
316
+
317
+ selected_slots = {}
318
+
319
+ if suggestion:
320
+ console.print(Panel(
321
+ "\n".join([f"[bold]{k.upper()}:[/bold] {v.name}" for k,v in suggestion.items()]),
322
+ title="[bold green]Auto-Configuration Available[/bold green]",
323
+ border_style="green"
324
+ ))
325
+ if Confirm.ask("Use this configuration? (Uses existing models)", default=True):
326
+ selected_slots = suggestion
327
+
328
+ if not selected_slots:
329
+ # Manual Selection Flow
330
+ selected_slots = resolve_slots_ui(mode, stats.tier, stats, manager)
331
+
332
+ # 5. Download
333
+ download_phase(selected_slots, manager)
334
+
335
+ # 6. Generate Config
336
+ # Reuse existing generate_config logic but inline here for simplicity/import
337
+ cfg = {"slots": {}}
338
+ for slot, m in selected_slots.items():
339
+ # Backend resolution logic
340
+ bk = "llama_cpp" if m.backend in ["llama_cpp", "lm_studio", "ollama"] else m.backend
341
+ path = m.model_id
342
+
343
+ # Resolve path
344
+ rp = manager.get_model_path(m.model_id)
345
+ if rp:
346
+ path = str(rp)
347
+ if str(rp).endswith(".gguf"): bk = "llama_cpp"
348
+
349
+ cfg["slots"][slot] = {
350
+ "backend": bk,
351
+ "model_id": path,
352
+ "context_length": sabha.min_tokens_req,
353
+ "temperature": 0.5
354
+ }
355
+
356
+ with open(DEFAULT_CONFIG_PATH, "w") as f:
357
+ yaml.dump(cfg, f, sort_keys=False)
358
+
359
+ console.print(f"\n[bold green]Configuration saved to {DEFAULT_CONFIG_PATH}[/bold green]")
360
+
361
+ # 7. Launch
362
+ console.print(Panel("[bold white]Setup Complete[/bold white]", style="green"))
363
+ if Confirm.ask("Launch Prarambh?", default=True):
364
+ console.clear()
365
+ run_prarambh()
366
+
367
+ if __name__ == "__main__":
368
+ main()
@@ -0,0 +1,139 @@
1
+ """
2
+ Centralized mode configuration for Parishad Sabha.
3
+
4
+ Maps modes to pipeline configurations and role structures.
5
+ This is the single source of truth for the simplified 1/2/3 role system.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+ from typing import List, Dict
10
+
11
+
12
+ @dataclass
13
+ class ModeConfig:
14
+ """Configuration for a Parishad execution mode/Sabha."""
15
+ mode_key: str # CLI mode key: "fast", "balanced", "thorough"
16
+ sabha_id: str # Sabha ID for TUI: "laghu", "madhyam", "maha"
17
+ sabha_name: str # Display name
18
+ sabha_hindi: str # Hindi name
19
+ pipeline_config: str # Pipeline file name: "fast", "core", "extended"
20
+
21
+ # Role structure
22
+ role_count: int
23
+ role_names: List[str] # Actual role class names
24
+
25
+ # Display metadata
26
+ description: str
27
+ ram_gb: int
28
+ speed_label: str
29
+ emoji: str
30
+ model_slots: List[str]
31
+
32
+
33
+ # Single source of truth: Mode definitions with 1/2/3 role structure
34
+ MODES: Dict[str, ModeConfig] = {
35
+ "fast": ModeConfig(
36
+ mode_key="fast",
37
+ sabha_id="laghu",
38
+ sabha_name="Laghu Sabha",
39
+ sabha_hindi="लघु सभा",
40
+ pipeline_config="fast",
41
+ role_count=5,
42
+ role_names=["Darbari", "Sainik", "Raja"], # Representative list
43
+ description="Fastest - optimized council (5 roles)",
44
+ ram_gb=8,
45
+ speed_label="Fast",
46
+ emoji="🚀",
47
+ model_slots=["single"]
48
+ ),
49
+
50
+ "balanced": ModeConfig(
51
+ mode_key="balanced",
52
+ sabha_id="madhyam",
53
+ sabha_name="Madhyam Sabha",
54
+ sabha_hindi="मध्यम सभा",
55
+ pipeline_config="core",
56
+ role_count=8,
57
+ role_names=["Darbari", "Majumdar", "Sainik", "Prerak"],
58
+ description="Balanced - full core council (8 roles)",
59
+ ram_gb=16,
60
+ speed_label="Medium",
61
+ emoji="⚡",
62
+ model_slots=["heavy", "light"]
63
+ ),
64
+
65
+ "thorough": ModeConfig(
66
+ mode_key="thorough",
67
+ sabha_id="maha",
68
+ sabha_name="Maha Sabha",
69
+ sabha_hindi="महा सभा",
70
+ pipeline_config="extended",
71
+ role_count=10,
72
+ role_names=["Pantapradhan", "Vidushak", "Sainik", "Raja"],
73
+ description="Thorough - extended council (10 roles)",
74
+ ram_gb=32,
75
+ speed_label="Slow",
76
+ emoji="👑",
77
+ model_slots=["heavy", "mid", "light"]
78
+ ),
79
+ }
80
+
81
+
82
+ # Backward compatibility mappings
83
+ SABHA_ID_TO_MODE = {
84
+ "laghu": "fast",
85
+ "madhyam": "balanced",
86
+ "maha": "thorough",
87
+ }
88
+
89
+ MODE_TO_SABHA_ID = {
90
+ "fast": "laghu",
91
+ "balanced": "madhyam",
92
+ "thorough": "maha",
93
+ # Also support old names if they exist
94
+ "core": "madhyam",
95
+ "extended": "maha",
96
+ }
97
+
98
+
99
+ def get_mode_config(mode_or_sabha: str) -> ModeConfig:
100
+ """
101
+ Get mode configuration by mode key or Sabha ID.
102
+
103
+ Args:
104
+ mode_or_sabha: Mode key ("fast"/"balanced"/"thorough") or
105
+ Sabha ID ("laghu"/"madhyam"/"maha") or
106
+ Old name ("core"/"extended")
107
+
108
+ Returns:
109
+ ModeConfig for the requested mode
110
+
111
+ Raises:
112
+ ValueError: If mode/sabha is unknown
113
+ """
114
+ # Direct mode lookup
115
+ if mode_or_sabha in MODES:
116
+ return MODES[mode_or_sabha]
117
+
118
+ # Sabha ID lookup
119
+ if mode_or_sabha in SABHA_ID_TO_MODE:
120
+ mode_key = SABHA_ID_TO_MODE[mode_or_sabha]
121
+ return MODES[mode_key]
122
+
123
+ # Old pipeline name lookup
124
+ if mode_or_sabha in MODE_TO_SABHA_ID:
125
+ sabha_id = MODE_TO_SABHA_ID[mode_or_sabha]
126
+ mode_key = SABHA_ID_TO_MODE[sabha_id]
127
+ return MODES[mode_key]
128
+
129
+ raise ValueError(
130
+ f"Unknown mode/sabha: '{mode_or_sabha}'. "
131
+ f"Valid modes: {list(MODES.keys())}, "
132
+ f"sabha IDs: {list(SABHA_ID_TO_MODE.keys())}"
133
+ )
134
+
135
+
136
+ def get_pipeline_name(mode_or_sabha: str) -> str:
137
+ """Get pipeline config file name for a mode."""
138
+ config = get_mode_config(mode_or_sabha)
139
+ return config.pipeline_config