parishad 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. parishad/__init__.py +70 -0
  2. parishad/__main__.py +10 -0
  3. parishad/checker/__init__.py +25 -0
  4. parishad/checker/deterministic.py +644 -0
  5. parishad/checker/ensemble.py +496 -0
  6. parishad/checker/retrieval.py +546 -0
  7. parishad/cli/__init__.py +6 -0
  8. parishad/cli/code.py +3254 -0
  9. parishad/cli/main.py +1158 -0
  10. parishad/cli/prarambh.py +99 -0
  11. parishad/cli/sthapana.py +368 -0
  12. parishad/config/modes.py +139 -0
  13. parishad/config/pipeline.core.yaml +128 -0
  14. parishad/config/pipeline.extended.yaml +172 -0
  15. parishad/config/pipeline.fast.yaml +89 -0
  16. parishad/config/user_config.py +115 -0
  17. parishad/data/catalog.py +118 -0
  18. parishad/data/models.json +108 -0
  19. parishad/memory/__init__.py +79 -0
  20. parishad/models/__init__.py +181 -0
  21. parishad/models/backends/__init__.py +247 -0
  22. parishad/models/backends/base.py +211 -0
  23. parishad/models/backends/huggingface.py +318 -0
  24. parishad/models/backends/llama_cpp.py +239 -0
  25. parishad/models/backends/mlx_lm.py +141 -0
  26. parishad/models/backends/ollama.py +253 -0
  27. parishad/models/backends/openai_api.py +193 -0
  28. parishad/models/backends/transformers_hf.py +198 -0
  29. parishad/models/costs.py +385 -0
  30. parishad/models/downloader.py +1557 -0
  31. parishad/models/optimizations.py +871 -0
  32. parishad/models/profiles.py +610 -0
  33. parishad/models/reliability.py +876 -0
  34. parishad/models/runner.py +651 -0
  35. parishad/models/tokenization.py +287 -0
  36. parishad/orchestrator/__init__.py +24 -0
  37. parishad/orchestrator/config_loader.py +210 -0
  38. parishad/orchestrator/engine.py +1113 -0
  39. parishad/orchestrator/exceptions.py +14 -0
  40. parishad/roles/__init__.py +71 -0
  41. parishad/roles/base.py +712 -0
  42. parishad/roles/dandadhyaksha.py +163 -0
  43. parishad/roles/darbari.py +246 -0
  44. parishad/roles/majumdar.py +274 -0
  45. parishad/roles/pantapradhan.py +150 -0
  46. parishad/roles/prerak.py +357 -0
  47. parishad/roles/raja.py +345 -0
  48. parishad/roles/sacheev.py +203 -0
  49. parishad/roles/sainik.py +427 -0
  50. parishad/roles/sar_senapati.py +164 -0
  51. parishad/roles/vidushak.py +69 -0
  52. parishad/tools/__init__.py +7 -0
  53. parishad/tools/base.py +57 -0
  54. parishad/tools/fs.py +110 -0
  55. parishad/tools/perception.py +96 -0
  56. parishad/tools/retrieval.py +74 -0
  57. parishad/tools/shell.py +103 -0
  58. parishad/utils/__init__.py +7 -0
  59. parishad/utils/hardware.py +122 -0
  60. parishad/utils/logging.py +79 -0
  61. parishad/utils/scanner.py +164 -0
  62. parishad/utils/text.py +61 -0
  63. parishad/utils/tracing.py +133 -0
  64. parishad-0.1.0.dist-info/METADATA +256 -0
  65. parishad-0.1.0.dist-info/RECORD +68 -0
  66. parishad-0.1.0.dist-info/WHEEL +4 -0
  67. parishad-0.1.0.dist-info/entry_points.txt +2 -0
  68. parishad-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,610 @@
1
+ """
2
+ Profile management for Parishad model configuration.
3
+
4
+ Provides:
5
+ - ProfileManager: Central class for profile switching and management
6
+ - Environment detection: Auto-detect available backends and hardware
7
+ - Graceful fallbacks: Fall back to simpler profiles when backends unavailable
8
+ - Profile validation: Validate profiles before loading
9
+
10
+ Environment Variables:
11
+ - PARISHAD_PROFILE: Override default profile selection
12
+ - PARISHAD_CONFIG_PATH: Override default config file path
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import logging
18
+ import os
19
+ from dataclasses import dataclass, field
20
+ from enum import Enum
21
+ from pathlib import Path
22
+ from typing import Any, Callable, Optional
23
+
24
+ from .backends import get_available_backends, is_backend_available
25
+
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ # =============================================================================
31
+ # Enums and Types
32
+ # =============================================================================
33
+
34
+
35
+ class ProfileMode(Enum):
36
+ """Profile execution modes."""
37
+ LOCAL = "local" # Local model inference
38
+ API = "api" # API-based inference
39
+ HYBRID = "hybrid" # Mix of local and API
40
+
41
+
42
+ class HardwareCapability(Enum):
43
+ """Detected hardware capabilities."""
44
+ CPU_ONLY = "cpu_only"
45
+ NVIDIA_GPU = "nvidia_gpu"
46
+ AMD_GPU = "amd_gpu"
47
+ APPLE_SILICON = "apple_silicon"
48
+ UNKNOWN = "unknown"
49
+
50
+
51
+ # =============================================================================
52
+ # Environment Detection
53
+ # =============================================================================
54
+
55
+
56
+ @dataclass
57
+ class EnvironmentInfo:
58
+ """
59
+ Detected environment information.
60
+
61
+ Contains information about available backends, hardware, and system state.
62
+ """
63
+ available_backends: list[str] = field(default_factory=list)
64
+ hardware: HardwareCapability = HardwareCapability.UNKNOWN
65
+ gpu_memory_gb: Optional[float] = None
66
+ cpu_cores: int = 1
67
+ system_memory_gb: float = 8.0
68
+
69
+ # API key availability
70
+ has_openai_key: bool = False
71
+ has_anthropic_key: bool = False
72
+
73
+ # Python packages
74
+ has_torch: bool = False
75
+ has_transformers: bool = False
76
+ has_llama_cpp: bool = False
77
+ has_tiktoken: bool = False
78
+
79
+ @property
80
+ def can_run_local(self) -> bool:
81
+ """Check if local inference is possible."""
82
+ return self.has_llama_cpp or self.has_transformers
83
+
84
+ @property
85
+ def can_run_api(self) -> bool:
86
+ """Check if API inference is possible."""
87
+ return self.has_openai_key or self.has_anthropic_key
88
+
89
+ @property
90
+ def has_gpu(self) -> bool:
91
+ """Check if GPU is available."""
92
+ return self.hardware in (
93
+ HardwareCapability.NVIDIA_GPU,
94
+ HardwareCapability.AMD_GPU,
95
+ HardwareCapability.APPLE_SILICON,
96
+ )
97
+
98
+
99
+ def detect_environment() -> EnvironmentInfo:
100
+ """
101
+ Detect the current environment capabilities.
102
+
103
+ Returns:
104
+ EnvironmentInfo with detected capabilities
105
+ """
106
+ info = EnvironmentInfo()
107
+
108
+ # Get available backends
109
+ info.available_backends = get_available_backends()
110
+
111
+ # Check backend-specific packages
112
+ info.has_llama_cpp = is_backend_available("llama_cpp")
113
+
114
+ try:
115
+ import torch
116
+ info.has_torch = True
117
+
118
+ # Detect GPU
119
+ if torch.cuda.is_available():
120
+ info.hardware = HardwareCapability.NVIDIA_GPU
121
+ try:
122
+ info.gpu_memory_gb = torch.cuda.get_device_properties(0).total_memory / (1024**3)
123
+ except Exception:
124
+ pass
125
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
126
+ info.hardware = HardwareCapability.APPLE_SILICON
127
+ elif hasattr(torch, 'hip') and torch.hip.is_available():
128
+ info.hardware = HardwareCapability.AMD_GPU
129
+ else:
130
+ info.hardware = HardwareCapability.CPU_ONLY
131
+ except ImportError:
132
+ pass
133
+
134
+ try:
135
+ import transformers # noqa: F401
136
+ info.has_transformers = True
137
+ except ImportError:
138
+ pass
139
+
140
+ try:
141
+ import tiktoken # noqa: F401
142
+ info.has_tiktoken = True
143
+ except ImportError:
144
+ pass
145
+
146
+ # Check API keys
147
+ info.has_openai_key = bool(os.environ.get("OPENAI_API_KEY"))
148
+ info.has_anthropic_key = bool(os.environ.get("ANTHROPIC_API_KEY"))
149
+
150
+ # System resources
151
+ info.cpu_cores = os.cpu_count() or 1
152
+
153
+ try:
154
+ import psutil
155
+ info.system_memory_gb = psutil.virtual_memory().total / (1024**3)
156
+ except ImportError:
157
+ pass
158
+
159
+ return info
160
+
161
+
162
+ # =============================================================================
163
+ # Profile Definitions
164
+ # =============================================================================
165
+
166
+
167
+ @dataclass
168
+ class ProfileDefinition:
169
+ """
170
+ Definition of a profile with its requirements and fallbacks.
171
+ """
172
+ name: str
173
+ mode: ProfileMode
174
+ description: str
175
+
176
+ # Required backends
177
+ required_backends: list[str] = field(default_factory=list)
178
+
179
+ # Fallback chain - try these profiles in order if this one fails
180
+ fallback_chain: list[str] = field(default_factory=list)
181
+
182
+ # Minimum requirements
183
+ min_memory_gb: float = 0.0
184
+ requires_gpu: bool = False
185
+ requires_api_key: Optional[str] = None
186
+
187
+ # Priority (higher = preferred)
188
+ priority: int = 0
189
+
190
+ def check_requirements(self, env: EnvironmentInfo) -> tuple[bool, str]:
191
+ """
192
+ Check if environment meets profile requirements.
193
+
194
+ Args:
195
+ env: Environment info
196
+
197
+ Returns:
198
+ Tuple of (is_compatible, reason_if_not)
199
+ """
200
+ # Check backends
201
+ for backend in self.required_backends:
202
+ if backend not in env.available_backends:
203
+ return False, f"Backend '{backend}' not available"
204
+
205
+ # Check GPU
206
+ if self.requires_gpu and not env.has_gpu:
207
+ return False, "GPU required but not available"
208
+
209
+ # Check memory
210
+ if self.min_memory_gb > 0 and env.system_memory_gb < self.min_memory_gb:
211
+ return False, f"Requires {self.min_memory_gb}GB RAM, have {env.system_memory_gb:.1f}GB"
212
+
213
+ # Check API keys
214
+ if self.requires_api_key == "openai" and not env.has_openai_key:
215
+ return False, "OPENAI_API_KEY not set"
216
+ if self.requires_api_key == "anthropic" and not env.has_anthropic_key:
217
+ return False, "ANTHROPIC_API_KEY not set"
218
+
219
+ return True, ""
220
+
221
+
222
+ # Built-in profile definitions
223
+ BUILTIN_PROFILES: dict[str, ProfileDefinition] = {
224
+ "local_gpu": ProfileDefinition(
225
+ name="local_gpu",
226
+ mode=ProfileMode.LOCAL,
227
+ description="Local inference with GPU acceleration (Metal/CUDA)",
228
+ required_backends=["llama_cpp"],
229
+ fallback_chain=["local_small"],
230
+ min_memory_gb=4.0,
231
+ requires_gpu=True,
232
+ priority=25, # Higher than local_small, will be preferred
233
+ ),
234
+ "local_small": ProfileDefinition(
235
+ name="local_small",
236
+ mode=ProfileMode.LOCAL,
237
+ description="Local inference with small models (1.5B-7B)",
238
+ required_backends=["llama_cpp"],
239
+ fallback_chain=[],
240
+ min_memory_gb=4.0,
241
+ priority=10,
242
+ ),
243
+ "local_medium": ProfileDefinition(
244
+ name="local_medium",
245
+ mode=ProfileMode.LOCAL,
246
+ description="Local inference with medium models (7B-14B)",
247
+ required_backends=["llama_cpp"],
248
+ fallback_chain=["local_small"],
249
+ min_memory_gb=8.0,
250
+ priority=20,
251
+ ),
252
+ "local_large": ProfileDefinition(
253
+ name="local_large",
254
+ mode=ProfileMode.LOCAL,
255
+ description="Local inference with large models (14B+)",
256
+ required_backends=["llama_cpp"],
257
+ fallback_chain=["local_medium", "local_small"],
258
+ min_memory_gb=16.0,
259
+ requires_gpu=True,
260
+ priority=30,
261
+ ),
262
+ "transformers": ProfileDefinition(
263
+ name="transformers",
264
+ mode=ProfileMode.LOCAL,
265
+ description="HuggingFace Transformers models",
266
+ required_backends=["transformers"],
267
+ fallback_chain=["local_small"],
268
+ min_memory_gb=8.0,
269
+ priority=15,
270
+ ),
271
+ "openai": ProfileDefinition(
272
+ name="openai",
273
+ mode=ProfileMode.API,
274
+ description="OpenAI API models (GPT-4, etc.)",
275
+ required_backends=["openai"],
276
+ fallback_chain=[],
277
+ requires_api_key="openai",
278
+ priority=50,
279
+ ),
280
+ "anthropic": ProfileDefinition(
281
+ name="anthropic",
282
+ mode=ProfileMode.API,
283
+ description="Anthropic API models (Claude, etc.)",
284
+ required_backends=[], # No special backend needed
285
+ fallback_chain=["openai"],
286
+ requires_api_key="anthropic",
287
+ priority=50,
288
+ ),
289
+ }
290
+
291
+
292
+ # =============================================================================
293
+ # Profile Manager
294
+ # =============================================================================
295
+
296
+
297
+ class ProfileManager:
298
+ """
299
+ Central manager for profile switching and management.
300
+
301
+ Handles:
302
+ - Profile selection based on environment
303
+ - Graceful fallbacks when backends unavailable
304
+ - Environment variable overrides
305
+ - Profile validation
306
+
307
+ Usage:
308
+ manager = ProfileManager()
309
+ profile = manager.select_profile() # Auto-select best profile
310
+ runner = manager.create_runner(profile)
311
+ """
312
+
313
+ def __init__(
314
+ self,
315
+ config_path: Optional[str | Path] = None,
316
+ env_info: Optional[EnvironmentInfo] = None,
317
+ ):
318
+ """
319
+ Initialize ProfileManager.
320
+
321
+ Args:
322
+ config_path: Path to models.yaml config file
323
+ env_info: Pre-detected environment info (auto-detects if None)
324
+ """
325
+ self.config_path = Path(config_path) if config_path else None
326
+ self.env_info = env_info or detect_environment()
327
+ self._profiles: dict[str, ProfileDefinition] = BUILTIN_PROFILES.copy()
328
+ self._loaded_profiles: dict[str, Any] = {}
329
+
330
+ # Check for config path override
331
+ if not self.config_path:
332
+ env_path = os.environ.get("PARISHAD_CONFIG_PATH")
333
+ if env_path:
334
+ self.config_path = Path(env_path)
335
+
336
+ def register_profile(self, profile: ProfileDefinition) -> None:
337
+ """
338
+ Register a custom profile definition.
339
+
340
+ Args:
341
+ profile: Profile definition to register
342
+ """
343
+ self._profiles[profile.name] = profile
344
+ logger.debug(f"Registered profile: {profile.name}")
345
+
346
+ def get_profile_definition(self, name: str) -> Optional[ProfileDefinition]:
347
+ """Get profile definition by name."""
348
+ return self._profiles.get(name)
349
+
350
+ def list_profiles(self) -> list[str]:
351
+ """List all registered profile names."""
352
+ return list(self._profiles.keys())
353
+
354
+ def list_compatible_profiles(self) -> list[str]:
355
+ """List profiles that are compatible with current environment."""
356
+ compatible = []
357
+ for name, profile in self._profiles.items():
358
+ is_compatible, _ = profile.check_requirements(self.env_info)
359
+ if is_compatible:
360
+ compatible.append(name)
361
+ return compatible
362
+
363
+ def validate_profile(self, name: str) -> tuple[bool, str]:
364
+ """
365
+ Validate that a profile can be used.
366
+
367
+ Args:
368
+ name: Profile name to validate
369
+
370
+ Returns:
371
+ Tuple of (is_valid, reason_if_invalid)
372
+ """
373
+ profile = self._profiles.get(name)
374
+ if not profile:
375
+ return False, f"Unknown profile: {name}"
376
+
377
+ return profile.check_requirements(self.env_info)
378
+
379
+ def select_profile(
380
+ self,
381
+ preferred: Optional[str] = None,
382
+ mode: Optional[ProfileMode] = None,
383
+ allow_fallback: bool = True,
384
+ ) -> str:
385
+ """
386
+ Select the best profile for the current environment.
387
+
388
+ Args:
389
+ preferred: Preferred profile name (overrides auto-selection)
390
+ mode: Preferred mode (LOCAL, API, etc.)
391
+ allow_fallback: If True, fall back to compatible profile if preferred fails
392
+
393
+ Returns:
394
+ Selected profile name
395
+
396
+ Raises:
397
+ RuntimeError: If no compatible profile found
398
+ """
399
+ # Check environment variable override
400
+ env_profile = os.environ.get("PARISHAD_PROFILE")
401
+ if env_profile:
402
+ is_valid, reason = self.validate_profile(env_profile)
403
+ if is_valid:
404
+ logger.info(f"Using profile from PARISHAD_PROFILE: {env_profile}")
405
+ return env_profile
406
+ elif not allow_fallback:
407
+ raise RuntimeError(
408
+ f"PARISHAD_PROFILE={env_profile} is not compatible: {reason}"
409
+ )
410
+ logger.warning(
411
+ f"PARISHAD_PROFILE={env_profile} not compatible ({reason}), "
412
+ "falling back to auto-selection"
413
+ )
414
+
415
+ # Try preferred profile
416
+ if preferred:
417
+ is_valid, reason = self.validate_profile(preferred)
418
+ if is_valid:
419
+ return preferred
420
+ elif not allow_fallback:
421
+ raise RuntimeError(f"Profile '{preferred}' not compatible: {reason}")
422
+
423
+ # Try fallback chain
424
+ profile_def = self._profiles.get(preferred)
425
+ if profile_def:
426
+ for fallback in profile_def.fallback_chain:
427
+ is_valid, _ = self.validate_profile(fallback)
428
+ if is_valid:
429
+ logger.warning(
430
+ f"Profile '{preferred}' not available ({reason}), "
431
+ f"falling back to '{fallback}'"
432
+ )
433
+ return fallback
434
+
435
+ # Auto-select best profile
436
+ compatible = []
437
+ for name, profile in self._profiles.items():
438
+ is_valid, _ = profile.check_requirements(self.env_info)
439
+ if is_valid:
440
+ # Filter by mode if specified
441
+ if mode and profile.mode != mode:
442
+ continue
443
+ compatible.append((name, profile))
444
+
445
+ if not compatible:
446
+ if mode:
447
+ raise RuntimeError(
448
+ f"No compatible profiles found for mode '{mode.value}'. "
449
+ f"Available backends: {self.env_info.available_backends}"
450
+ )
451
+ raise RuntimeError(
452
+ "No compatible profiles found. "
453
+ f"Available backends: {self.env_info.available_backends}"
454
+ )
455
+
456
+ # Sort by priority (highest first)
457
+ compatible.sort(key=lambda x: x[1].priority, reverse=True)
458
+
459
+ selected = compatible[0][0]
460
+ logger.info(f"Auto-selected profile: {selected}")
461
+ return selected
462
+
463
+ def create_runner(
464
+ self,
465
+ profile_name: Optional[str] = None,
466
+ **kwargs,
467
+ ) -> "ModelRunner": # type: ignore
468
+ """
469
+ Create a ModelRunner with the specified or auto-selected profile.
470
+
471
+ Args:
472
+ profile_name: Profile to use (auto-selects if None)
473
+ **kwargs: Additional arguments for ModelRunner
474
+
475
+ Returns:
476
+ Configured ModelRunner
477
+ """
478
+ from .runner import ModelRunner
479
+
480
+ # Select profile if not specified
481
+ if not profile_name:
482
+ profile_name = self.select_profile()
483
+
484
+ # Load from config file
485
+ if self.config_path and self.config_path.exists():
486
+ return ModelRunner.from_profile(profile_name, self.config_path, **kwargs)
487
+
488
+ # No config file
489
+ logger.warning(
490
+ f"Config file not found and no profile selected. "
491
+ f"Set PARISHAD_CONFIG_PATH or provide config_path."
492
+ )
493
+ return ModelRunner(**kwargs)
494
+
495
+ def get_profile_info(self, name: str) -> dict[str, Any]:
496
+ """
497
+ Get detailed information about a profile.
498
+
499
+ Args:
500
+ name: Profile name
501
+
502
+ Returns:
503
+ Dict with profile details
504
+ """
505
+ profile = self._profiles.get(name)
506
+ if not profile:
507
+ return {"error": f"Unknown profile: {name}"}
508
+
509
+ is_compatible, reason = profile.check_requirements(self.env_info)
510
+
511
+ return {
512
+ "name": profile.name,
513
+ "mode": profile.mode.value,
514
+ "description": profile.description,
515
+ "compatible": is_compatible,
516
+ "incompatibility_reason": reason if not is_compatible else None,
517
+ "required_backends": profile.required_backends,
518
+ "fallback_chain": profile.fallback_chain,
519
+ "priority": profile.priority,
520
+ }
521
+
522
+ def get_environment_summary(self) -> dict[str, Any]:
523
+ """Get a summary of the detected environment."""
524
+ return {
525
+ "available_backends": self.env_info.available_backends,
526
+ "hardware": self.env_info.hardware.value,
527
+ "has_gpu": self.env_info.has_gpu,
528
+ "gpu_memory_gb": self.env_info.gpu_memory_gb,
529
+ "cpu_cores": self.env_info.cpu_cores,
530
+ "system_memory_gb": round(self.env_info.system_memory_gb, 1),
531
+ "can_run_local": self.env_info.can_run_local,
532
+ "can_run_api": self.env_info.can_run_api,
533
+ "api_keys": {
534
+ "openai": self.env_info.has_openai_key,
535
+ "anthropic": self.env_info.has_anthropic_key,
536
+ },
537
+ }
538
+
539
+
540
+ # =============================================================================
541
+ # Convenience Functions
542
+ # =============================================================================
543
+
544
+
545
+ def get_default_profile() -> str:
546
+ """
547
+ Get the default profile for the current environment.
548
+
549
+ Checks PARISHAD_PROFILE env var first, then auto-selects.
550
+ """
551
+ manager = ProfileManager()
552
+ return manager.select_profile()
553
+
554
+
555
+ def get_profile_manager(config_path: Optional[str | Path] = None) -> ProfileManager:
556
+ """
557
+ Create a ProfileManager with optional config path.
558
+
559
+ Args:
560
+ config_path: Optional path to models.yaml
561
+
562
+ Returns:
563
+ Configured ProfileManager
564
+ """
565
+ return ProfileManager(config_path=config_path)
566
+
567
+
568
+ def quick_runner(
569
+ profile: Optional[str] = None,
570
+ config_path: Optional[str | Path] = None,
571
+ ) -> "ModelRunner": # type: ignore
572
+ """
573
+ Quickly create a ModelRunner with sensible defaults.
574
+
575
+ Args:
576
+ profile: Profile name (auto-selects if None)
577
+ config_path: Config file path
578
+
579
+ Returns:
580
+ Ready-to-use ModelRunner
581
+
582
+ Example:
583
+ runner = quick_runner("local_small", "config/models.yaml")
584
+ text, tokens, model = runner.generate(
585
+ "You are helpful.",
586
+ "Hello!",
587
+ Slot.SMALL
588
+ )
589
+ """
590
+ manager = ProfileManager(config_path=config_path)
591
+ return manager.create_runner(profile)
592
+
593
+
594
+ __all__ = [
595
+ # Enums
596
+ "ProfileMode",
597
+ "HardwareCapability",
598
+ # Data classes
599
+ "EnvironmentInfo",
600
+ "ProfileDefinition",
601
+ # Manager
602
+ "ProfileManager",
603
+ # Functions
604
+ "detect_environment",
605
+ "get_default_profile",
606
+ "get_profile_manager",
607
+ "quick_runner",
608
+ # Constants
609
+ "BUILTIN_PROFILES",
610
+ ]