invarlock 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. invarlock/__init__.py +33 -0
  2. invarlock/__main__.py +10 -0
  3. invarlock/_data/runtime/profiles/ci_cpu.yaml +15 -0
  4. invarlock/_data/runtime/profiles/release.yaml +23 -0
  5. invarlock/_data/runtime/tiers.yaml +76 -0
  6. invarlock/adapters/__init__.py +102 -0
  7. invarlock/adapters/_capabilities.py +45 -0
  8. invarlock/adapters/auto.py +99 -0
  9. invarlock/adapters/base.py +530 -0
  10. invarlock/adapters/base_types.py +85 -0
  11. invarlock/adapters/hf_bert.py +852 -0
  12. invarlock/adapters/hf_gpt2.py +403 -0
  13. invarlock/adapters/hf_llama.py +485 -0
  14. invarlock/adapters/hf_mixin.py +383 -0
  15. invarlock/adapters/hf_onnx.py +112 -0
  16. invarlock/adapters/hf_t5.py +137 -0
  17. invarlock/adapters/py.typed +1 -0
  18. invarlock/assurance/__init__.py +43 -0
  19. invarlock/cli/__init__.py +8 -0
  20. invarlock/cli/__main__.py +8 -0
  21. invarlock/cli/_evidence.py +25 -0
  22. invarlock/cli/_json.py +75 -0
  23. invarlock/cli/adapter_auto.py +162 -0
  24. invarlock/cli/app.py +287 -0
  25. invarlock/cli/commands/__init__.py +26 -0
  26. invarlock/cli/commands/certify.py +403 -0
  27. invarlock/cli/commands/doctor.py +1358 -0
  28. invarlock/cli/commands/explain_gates.py +151 -0
  29. invarlock/cli/commands/export_html.py +100 -0
  30. invarlock/cli/commands/plugins.py +1331 -0
  31. invarlock/cli/commands/report.py +354 -0
  32. invarlock/cli/commands/run.py +4146 -0
  33. invarlock/cli/commands/verify.py +1040 -0
  34. invarlock/cli/config.py +396 -0
  35. invarlock/cli/constants.py +68 -0
  36. invarlock/cli/device.py +92 -0
  37. invarlock/cli/doctor_helpers.py +74 -0
  38. invarlock/cli/errors.py +6 -0
  39. invarlock/cli/overhead_utils.py +60 -0
  40. invarlock/cli/provenance.py +66 -0
  41. invarlock/cli/utils.py +41 -0
  42. invarlock/config.py +56 -0
  43. invarlock/core/__init__.py +62 -0
  44. invarlock/core/abi.py +15 -0
  45. invarlock/core/api.py +274 -0
  46. invarlock/core/auto_tuning.py +317 -0
  47. invarlock/core/bootstrap.py +226 -0
  48. invarlock/core/checkpoint.py +221 -0
  49. invarlock/core/contracts.py +73 -0
  50. invarlock/core/error_utils.py +64 -0
  51. invarlock/core/events.py +298 -0
  52. invarlock/core/exceptions.py +95 -0
  53. invarlock/core/registry.py +481 -0
  54. invarlock/core/retry.py +146 -0
  55. invarlock/core/runner.py +2041 -0
  56. invarlock/core/types.py +154 -0
  57. invarlock/edits/__init__.py +12 -0
  58. invarlock/edits/_edit_utils.py +249 -0
  59. invarlock/edits/_external_utils.py +268 -0
  60. invarlock/edits/noop.py +47 -0
  61. invarlock/edits/py.typed +1 -0
  62. invarlock/edits/quant_rtn.py +801 -0
  63. invarlock/edits/registry.py +166 -0
  64. invarlock/eval/__init__.py +23 -0
  65. invarlock/eval/bench.py +1207 -0
  66. invarlock/eval/bootstrap.py +50 -0
  67. invarlock/eval/data.py +2052 -0
  68. invarlock/eval/metrics.py +2167 -0
  69. invarlock/eval/primary_metric.py +767 -0
  70. invarlock/eval/probes/__init__.py +24 -0
  71. invarlock/eval/probes/fft.py +139 -0
  72. invarlock/eval/probes/mi.py +213 -0
  73. invarlock/eval/probes/post_attention.py +323 -0
  74. invarlock/eval/providers/base.py +67 -0
  75. invarlock/eval/providers/seq2seq.py +111 -0
  76. invarlock/eval/providers/text_lm.py +113 -0
  77. invarlock/eval/providers/vision_text.py +93 -0
  78. invarlock/eval/py.typed +1 -0
  79. invarlock/guards/__init__.py +18 -0
  80. invarlock/guards/_contracts.py +9 -0
  81. invarlock/guards/invariants.py +640 -0
  82. invarlock/guards/policies.py +805 -0
  83. invarlock/guards/py.typed +1 -0
  84. invarlock/guards/rmt.py +2097 -0
  85. invarlock/guards/spectral.py +1419 -0
  86. invarlock/guards/tier_config.py +354 -0
  87. invarlock/guards/variance.py +3298 -0
  88. invarlock/guards_ref/__init__.py +15 -0
  89. invarlock/guards_ref/rmt_ref.py +40 -0
  90. invarlock/guards_ref/spectral_ref.py +135 -0
  91. invarlock/guards_ref/variance_ref.py +60 -0
  92. invarlock/model_profile.py +353 -0
  93. invarlock/model_utils.py +221 -0
  94. invarlock/observability/__init__.py +10 -0
  95. invarlock/observability/alerting.py +535 -0
  96. invarlock/observability/core.py +546 -0
  97. invarlock/observability/exporters.py +565 -0
  98. invarlock/observability/health.py +588 -0
  99. invarlock/observability/metrics.py +457 -0
  100. invarlock/observability/py.typed +1 -0
  101. invarlock/observability/utils.py +553 -0
  102. invarlock/plugins/__init__.py +12 -0
  103. invarlock/plugins/hello_guard.py +33 -0
  104. invarlock/plugins/hf_awq_adapter.py +82 -0
  105. invarlock/plugins/hf_bnb_adapter.py +79 -0
  106. invarlock/plugins/hf_gptq_adapter.py +78 -0
  107. invarlock/plugins/py.typed +1 -0
  108. invarlock/py.typed +1 -0
  109. invarlock/reporting/__init__.py +7 -0
  110. invarlock/reporting/certificate.py +3221 -0
  111. invarlock/reporting/certificate_schema.py +244 -0
  112. invarlock/reporting/dataset_hashing.py +215 -0
  113. invarlock/reporting/guards_analysis.py +948 -0
  114. invarlock/reporting/html.py +32 -0
  115. invarlock/reporting/normalizer.py +235 -0
  116. invarlock/reporting/policy_utils.py +517 -0
  117. invarlock/reporting/primary_metric_utils.py +265 -0
  118. invarlock/reporting/render.py +1442 -0
  119. invarlock/reporting/report.py +903 -0
  120. invarlock/reporting/report_types.py +278 -0
  121. invarlock/reporting/utils.py +175 -0
  122. invarlock/reporting/validate.py +631 -0
  123. invarlock/security.py +176 -0
  124. invarlock/sparsity_utils.py +323 -0
  125. invarlock/utils/__init__.py +150 -0
  126. invarlock/utils/digest.py +45 -0
  127. invarlock-0.2.0.dist-info/METADATA +586 -0
  128. invarlock-0.2.0.dist-info/RECORD +132 -0
  129. invarlock-0.2.0.dist-info/WHEEL +5 -0
  130. invarlock-0.2.0.dist-info/entry_points.txt +20 -0
  131. invarlock-0.2.0.dist-info/licenses/LICENSE +201 -0
  132. invarlock-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,530 @@
1
+ """
2
+ InvarLock Adapters Base
3
+ ===================
4
+
5
+ Base adapter interface and utilities for InvarLock adapters.
6
+ Simplified implementation for production framework.
7
+ """
8
+
9
+ import contextlib
10
+ import json
11
+ import time
12
+ from abc import ABC, abstractmethod
13
+ from enum import Enum
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from invarlock.utils import get_memory_usage
21
+
22
+ TensorType = torch.Tensor
23
+ ModuleType = nn.Module
24
+
25
+
26
+ def _collect_memory_usage() -> dict[str, float]:
27
+ """Collect process (and optional CUDA) memory usage in megabytes."""
28
+ usage = get_memory_usage()
29
+ memory_mb = float(usage.get("rss_mb", 0.0))
30
+ result: dict[str, float] = {
31
+ "memory_mb": memory_mb,
32
+ "rss_mb": float(usage.get("rss_mb", 0.0)),
33
+ "vms_mb": float(usage.get("vms_mb", 0.0)),
34
+ }
35
+
36
+ if torch.cuda.is_available():
37
+ result["cuda_allocated_mb"] = torch.cuda.memory_allocated() / (1024**2)
38
+ result["cuda_reserved_mb"] = torch.cuda.memory_reserved() / (1024**2)
39
+
40
+ return result
41
+
42
+
43
+ class AdapterType(Enum):
44
+ """Adapter type enumeration."""
45
+
46
+ TRANSFORMER = "transformer"
47
+ GENERIC = "generic"
48
+
49
+
50
+ class DeviceType(Enum):
51
+ """Device type enumeration."""
52
+
53
+ CPU = "cpu"
54
+ CUDA = "cuda"
55
+ AUTO = "auto"
56
+
57
+
58
+ class AdapterState(Enum):
59
+ """Adapter state enumeration."""
60
+
61
+ INITIALIZED = "initialized"
62
+ LOADED = "loaded"
63
+ ERROR = "error"
64
+
65
+
66
+ class PerformanceMetrics:
67
+ """Performance metrics container."""
68
+
69
+ def __init__(self):
70
+ self.metrics = {}
71
+
72
+ def __getitem__(self, key):
73
+ return self.metrics.get(key, {})
74
+
75
+ def __contains__(self, key):
76
+ return key in self.metrics
77
+
78
+
79
+ class CacheConfig:
80
+ """Cache configuration."""
81
+
82
+ def __init__(
83
+ self, enabled=True, max_size_mb=1024, ttl_seconds=3600, cache_dir=None
84
+ ):
85
+ self.enabled = enabled
86
+ self.max_size_mb = max_size_mb
87
+ self.ttl_seconds = ttl_seconds
88
+ self.cache_dir = cache_dir
89
+
90
+
91
+ class MonitorConfig:
92
+ """Monitor configuration."""
93
+
94
+ def __init__(
95
+ self, enabled=True, track_performance=True, track_memory=True, log_level="INFO"
96
+ ):
97
+ self.enabled = enabled
98
+ self.track_performance = track_performance
99
+ self.track_memory = track_memory
100
+ self.log_level = log_level
101
+
102
+
103
+ class AdapterInterface(ABC):
104
+ """Abstract adapter interface."""
105
+
106
+ @abstractmethod
107
+ def load_model(self, model_id: str, **kwargs) -> ModuleType | Any:
108
+ """Load a model."""
109
+ pass
110
+
111
+ @abstractmethod
112
+ def generate(self, prompt: str, **kwargs) -> str:
113
+ """Generate text."""
114
+ pass
115
+
116
+ @abstractmethod
117
+ def tokenize(self, text: str, **kwargs) -> dict[str, Any]:
118
+ """Tokenize text."""
119
+ pass
120
+
121
+ @abstractmethod
122
+ def get_capabilities(self) -> dict[str, Any]:
123
+ """Get adapter capabilities."""
124
+ pass
125
+
126
+
127
+ class BaseAdapter(AdapterInterface):
128
+ """Base adapter implementation."""
129
+
130
+ def __init__(self, config: dict[str, Any]):
131
+ self.config = config
132
+ self.state = AdapterState.INITIALIZED
133
+ self._monitoring_enabled = False
134
+ self._performance_metrics = PerformanceMetrics()
135
+
136
+ def cleanup(self) -> None:
137
+ """Cleanup adapter resources."""
138
+ pass
139
+
140
+ def enable_monitoring(self) -> None:
141
+ """Enable performance monitoring."""
142
+ self._monitoring_enabled = True
143
+
144
+ def get_performance_metrics(self) -> PerformanceMetrics:
145
+ """Get performance metrics."""
146
+ return self._performance_metrics
147
+
148
+ def get_memory_usage(self) -> dict[str, float]:
149
+ """Get memory usage information."""
150
+ return _collect_memory_usage()
151
+
152
+ @abstractmethod
153
+ def load_model(self, model_id: str, **kwargs) -> ModuleType | Any:
154
+ """Load a model."""
155
+ pass
156
+
157
+ @abstractmethod
158
+ def generate(self, prompt: str, **kwargs) -> str:
159
+ """Generate text."""
160
+ pass
161
+
162
+ @abstractmethod
163
+ def tokenize(self, text: str, **kwargs) -> dict[str, Any]:
164
+ """Tokenize text."""
165
+ pass
166
+
167
+ @abstractmethod
168
+ def get_capabilities(self) -> dict[str, Any]:
169
+ """Get adapter capabilities."""
170
+ pass
171
+
172
+
173
+ class AdapterConfig:
174
+ """Adapter configuration management."""
175
+
176
+ def __init__(
177
+ self,
178
+ name: str,
179
+ adapter_type: str,
180
+ version: str = "0.2.0",
181
+ device: dict[str, Any] | None = None,
182
+ cache: dict[str, Any] | None = None,
183
+ monitoring: dict[str, Any] | None = None,
184
+ optimization: dict[str, Any] | None = None,
185
+ ):
186
+ self.name = name
187
+ self.version = version
188
+ self.adapter_type = adapter_type
189
+ self.device = device or {"type": "auto"}
190
+ self.cache = cache or {"enabled": True}
191
+ self.monitoring = monitoring or {"enabled": True}
192
+ self.optimization = optimization or {"enabled": False}
193
+
194
+ def validate(self) -> dict[str, Any]:
195
+ """Validate configuration."""
196
+ valid = True
197
+ errors = []
198
+
199
+ if self.device.get("memory_fraction", 0.8) > 1.0:
200
+ valid = False
201
+ errors.append("memory_fraction must be <= 1.0")
202
+
203
+ return {"valid": valid, "errors": errors}
204
+
205
+ def resolve_device(self) -> str:
206
+ """Resolve device configuration."""
207
+ index = int(self.device.get("index", 0))
208
+
209
+ if torch.cuda.is_available():
210
+ device_count = torch.cuda.device_count()
211
+ if device_count <= 0:
212
+ return "cuda:0"
213
+ index = max(0, min(index, device_count - 1))
214
+ return f"cuda:{index}"
215
+
216
+ return "cpu"
217
+
218
+ def to_dict(self) -> dict[str, Any]:
219
+ """Convert to dictionary."""
220
+ return {
221
+ "name": self.name,
222
+ "version": self.version,
223
+ "adapter_type": self.adapter_type,
224
+ "device": self.device,
225
+ "cache": self.cache,
226
+ "monitoring": self.monitoring,
227
+ "optimization": self.optimization,
228
+ }
229
+
230
+ @classmethod
231
+ def from_dict(cls, config_dict: dict[str, Any]) -> "AdapterConfig":
232
+ """Create from dictionary."""
233
+ return cls(**config_dict)
234
+
235
+
236
+ class DeviceManager:
237
+ """Device management utilities."""
238
+
239
+ def __init__(self, device_config: dict[str, Any]):
240
+ self.device_type = device_config.get("type", "auto")
241
+ self.device_index = device_config.get("index", 0)
242
+ self.memory_fraction = device_config.get("memory_fraction", 0.8)
243
+ self.allow_growth = device_config.get("allow_growth", True)
244
+
245
+ def get_available_devices(self) -> list[str]:
246
+ """Get available devices."""
247
+ devices = ["cpu"]
248
+ if torch.cuda.is_available():
249
+ for i in range(torch.cuda.device_count()):
250
+ devices.append(f"cuda:{i}")
251
+ return devices
252
+
253
+ def get_memory_info(self) -> dict[str, float]:
254
+ """Get memory information."""
255
+ if torch.cuda.is_available() and torch.cuda.device_count() > 0:
256
+ device_idx = (
257
+ self.device_index
258
+ if self.device_index < torch.cuda.device_count()
259
+ else 0
260
+ )
261
+ return {
262
+ "total_mb": torch.cuda.get_device_properties(device_idx).total_memory
263
+ / (1024**2),
264
+ "allocated_mb": torch.cuda.memory_allocated(device_idx) / (1024**2),
265
+ "reserved_mb": torch.cuda.memory_reserved(device_idx) / (1024**2),
266
+ }
267
+ usage = _collect_memory_usage()
268
+ return {
269
+ "total_mb": usage.get("memory_mb", 0.0),
270
+ "allocated_mb": usage.get("memory_mb", 0.0),
271
+ "reserved_mb": usage.get("vms_mb", 0.0),
272
+ }
273
+
274
+ def set_memory_fraction(self, fraction: float) -> None:
275
+ """Set memory fraction."""
276
+ self.memory_fraction = fraction
277
+
278
+ def set_memory_growth(self, allow: bool) -> None:
279
+ """Set memory growth."""
280
+ self.allow_growth = allow
281
+
282
+ @contextlib.contextmanager
283
+ def device_context(self, device: str):
284
+ """Device context manager."""
285
+ if device.startswith("cuda") and torch.cuda.is_available():
286
+ with torch.cuda.device(device):
287
+ yield
288
+ return
289
+
290
+ yield
291
+
292
+
293
+ class AdapterCache:
294
+ """Adapter caching functionality."""
295
+
296
+ def __init__(self, cache_config: dict[str, Any]):
297
+ self.enabled = cache_config.get("enabled", True)
298
+ self.max_size_mb = cache_config.get("max_size_mb", 1024)
299
+ self.ttl_seconds = cache_config.get("ttl_seconds", 3600)
300
+ self._cache: dict[str, Any] = {}
301
+ self._timestamps: dict[str, float] = {}
302
+
303
+ def put(self, key: str, value: Any):
304
+ """Put value in cache."""
305
+ if self.enabled:
306
+ self._cache[key] = value
307
+ self._timestamps[key] = time.time()
308
+
309
+ def get(self, key: str) -> Any | None:
310
+ """Get value from cache."""
311
+ if not self.enabled or key not in self._cache:
312
+ return None
313
+
314
+ # Check TTL
315
+ if time.time() - self._timestamps[key] > self.ttl_seconds:
316
+ del self._cache[key]
317
+ del self._timestamps[key]
318
+ return None
319
+
320
+ return self._cache[key]
321
+
322
+ def save(self):
323
+ """Save cache to disk (stub)."""
324
+ pass
325
+
326
+ def load(self):
327
+ """Load cache from disk (stub)."""
328
+ pass
329
+
330
+
331
+ class PerformanceTracker:
332
+ """Performance tracking functionality."""
333
+
334
+ def __init__(self, monitor_config: dict[str, Any]):
335
+ self.enabled = monitor_config.get("enabled", True)
336
+ self.track_performance = monitor_config.get("track_performance", True)
337
+ self.track_memory = monitor_config.get("track_memory", True)
338
+ self._metrics: dict[str, Any] = {}
339
+
340
+ @contextlib.contextmanager
341
+ def time_operation(self, operation_name: str):
342
+ """Time an operation."""
343
+ start_time = time.time()
344
+ try:
345
+ yield
346
+ finally:
347
+ duration = time.time() - start_time
348
+ if operation_name not in self._metrics:
349
+ self._metrics[operation_name] = {
350
+ "count": 0,
351
+ "total_duration": 0.0,
352
+ "durations": [],
353
+ }
354
+
355
+ self._metrics[operation_name]["count"] += 1
356
+ self._metrics[operation_name]["total_duration"] += duration
357
+ self._metrics[operation_name]["durations"].append(duration)
358
+ self._metrics[operation_name]["duration"] = duration
359
+ self._metrics[operation_name]["average_duration"] = (
360
+ self._metrics[operation_name]["total_duration"]
361
+ / self._metrics[operation_name]["count"]
362
+ )
363
+ self._metrics[operation_name]["min_duration"] = min(
364
+ self._metrics[operation_name]["durations"]
365
+ )
366
+ self._metrics[operation_name]["max_duration"] = max(
367
+ self._metrics[operation_name]["durations"]
368
+ )
369
+
370
+ def record_memory_usage(self, label: str):
371
+ """Record memory usage."""
372
+ if "memory_usage" not in self._metrics:
373
+ self._metrics["memory_usage"] = {}
374
+ self._metrics["memory_usage"][label] = _collect_memory_usage()
375
+
376
+ def get_metrics(self) -> dict[str, Any]:
377
+ """Get all metrics."""
378
+ return self._metrics
379
+
380
+ def export_metrics(self, path: Path):
381
+ """Export metrics to file."""
382
+ with open(path, "w") as f:
383
+ json.dump(self._metrics, f, indent=2)
384
+
385
+
386
+ class AdapterManager:
387
+ """Adapter manager for multiple adapters."""
388
+
389
+ def __init__(self):
390
+ self.adapters = {}
391
+
392
+ def register(self, name: str, adapter: BaseAdapter):
393
+ """Register an adapter."""
394
+ self.adapters[name] = adapter
395
+
396
+ def get(self, name: str) -> BaseAdapter | None:
397
+ """Get an adapter."""
398
+ return self.adapters.get(name)
399
+
400
+ def list_adapters(self) -> list[str]:
401
+ """List all adapter names."""
402
+ return list(self.adapters.keys())
403
+
404
+ def initialize_adapter(self, name: str, model_id: str):
405
+ """Initialize a specific adapter."""
406
+ adapter = self.adapters.get(name)
407
+ if adapter:
408
+ adapter.load_model(model_id)
409
+ adapter.state = AdapterState.LOADED
410
+
411
+ def cleanup_adapter(self, name: str):
412
+ """Cleanup a specific adapter."""
413
+ adapter = self.adapters.get(name)
414
+ if adapter:
415
+ adapter.cleanup()
416
+
417
+ def initialize_all(self, model_id: str):
418
+ """Initialize all adapters."""
419
+ for name in self.adapters:
420
+ self.initialize_adapter(name, model_id)
421
+
422
+ def cleanup_all(self):
423
+ """Cleanup all adapters."""
424
+ for name in self.adapters:
425
+ self.cleanup_adapter(name)
426
+
427
+ def check_adapter_health(self, name: str) -> dict[str, Any]:
428
+ """Check adapter health."""
429
+ adapter = self.adapters.get(name)
430
+ if adapter:
431
+ return {"status": "healthy", "state": adapter.state.value}
432
+ return {"status": "not_found"}
433
+
434
+ def check_overall_health(self) -> dict[str, Any]:
435
+ """Check overall health."""
436
+ adapters_health = {}
437
+ for name in self.adapters:
438
+ adapters_health[name] = self.check_adapter_health(name)
439
+ return {"adapters": adapters_health}
440
+
441
+
442
+ # Alias for backward compatibility
443
+ AdapterMonitor = PerformanceTracker
444
+
445
+
446
+ class AdapterUtils:
447
+ """Adapter utility functions."""
448
+
449
+ @staticmethod
450
+ def validate_config(config: dict[str, Any]) -> dict[str, Any]:
451
+ """Validate adapter configuration."""
452
+ valid = True
453
+ errors = []
454
+
455
+ if not config.get("name"):
456
+ valid = False
457
+ errors.append("name is required")
458
+
459
+ if not config.get("adapter_type"):
460
+ valid = False
461
+ errors.append("adapter_type is required")
462
+
463
+ return {"valid": valid, "errors": errors}
464
+
465
+ @staticmethod
466
+ def infer_adapter_type(model_id: str) -> str:
467
+ """Infer adapter type from model ID."""
468
+ if "gpt" in model_id.lower():
469
+ return "huggingface"
470
+ elif "davinci" in model_id.lower():
471
+ return "openai"
472
+ else:
473
+ return "generic"
474
+
475
+ @staticmethod
476
+ def select_optimal_device() -> str:
477
+ """Select optimal device."""
478
+ if torch.cuda.is_available():
479
+ return "cuda:0"
480
+ return "cpu"
481
+
482
+ @staticmethod
483
+ def estimate_memory_usage(model_params: dict[str, Any]) -> float:
484
+ """Estimate memory usage."""
485
+ num_params = model_params.get("num_parameters", 0)
486
+ precision = model_params.get("precision", "float32")
487
+
488
+ bytes_per_param = 4 if precision == "float32" else 2
489
+ base_memory = (num_params * bytes_per_param) / (1024**2) # MB
490
+
491
+ # Add overhead
492
+ return float(base_memory * 1.2)
493
+
494
+ @staticmethod
495
+ def check_compatibility(
496
+ requirements: dict[str, str], system_info: dict[str, str]
497
+ ) -> dict[str, Any]:
498
+ """Check compatibility."""
499
+ compatible = True
500
+ issues = []
501
+
502
+ # Simple version checking (would need proper semver in production)
503
+ for requirement, _version in requirements.items():
504
+ if requirement in system_info:
505
+ system_version = system_info[requirement]
506
+ # Simplified check - just compare strings
507
+ if "python" in requirement and system_version < "3.8":
508
+ compatible = False
509
+ issues.append(f"Python version {system_version} < 3.8")
510
+
511
+ return {"compatible": compatible, "issues": issues}
512
+
513
+ @staticmethod
514
+ def migrate_config(
515
+ old_config: dict[str, Any], target_version: str
516
+ ) -> dict[str, Any]:
517
+ """Migrate configuration."""
518
+ new_config = old_config.copy()
519
+ new_config["version"] = target_version
520
+
521
+ # Migration logic would go here
522
+ if "model_path" in old_config:
523
+ new_config["model_id"] = old_config["model_path"]
524
+ del new_config["model_path"]
525
+
526
+ if "device_id" in old_config:
527
+ new_config["device"] = {"type": "cuda", "index": old_config["device_id"]}
528
+ del new_config["device_id"]
529
+
530
+ return new_config
@@ -0,0 +1,85 @@
1
+ """
2
+ InvarLock Adapters Base Types
3
+ =========================
4
+
5
+ Type definitions for the InvarLock adapter system.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+ from enum import Enum
10
+ from typing import Any
11
+
12
+
13
+ class AdapterType(Enum):
14
+ """Adapter type enumeration."""
15
+
16
+ TRANSFORMER = "transformer"
17
+ GENERIC = "generic"
18
+ HUGGINGFACE = "huggingface"
19
+ OPENAI = "openai"
20
+
21
+
22
+ class DeviceType(Enum):
23
+ """Device type enumeration."""
24
+
25
+ CPU = "cpu"
26
+ CUDA = "cuda"
27
+ AUTO = "auto"
28
+
29
+
30
+ class AdapterState(Enum):
31
+ """Adapter state enumeration."""
32
+
33
+ INITIALIZED = "initialized"
34
+ LOADED = "loaded"
35
+ ERROR = "error"
36
+ READY = "ready"
37
+
38
+
39
+ @dataclass
40
+ class PerformanceMetrics:
41
+ """Performance metrics container."""
42
+
43
+ operation_count: int = 0
44
+ total_duration: float = 0.0
45
+ average_duration: float = 0.0
46
+ memory_usage_mb: float = 0.0
47
+
48
+ def __getitem__(self, key: str) -> Any:
49
+ """Allow dict-like access."""
50
+ return getattr(self, key, {})
51
+
52
+ def __contains__(self, key: str) -> bool:
53
+ """Allow 'in' operator."""
54
+ return hasattr(self, key)
55
+
56
+
57
+ @dataclass
58
+ class CacheConfig:
59
+ """Cache configuration."""
60
+
61
+ enabled: bool = True
62
+ max_size_mb: int = 1024
63
+ ttl_seconds: int = 3600
64
+ cache_dir: str | None = None
65
+
66
+
67
+ @dataclass
68
+ class MonitorConfig:
69
+ """Monitor configuration."""
70
+
71
+ enabled: bool = True
72
+ track_performance: bool = True
73
+ track_memory: bool = True
74
+ log_level: str = "INFO"
75
+
76
+
77
+ # Export all types
78
+ __all__ = [
79
+ "AdapterType",
80
+ "DeviceType",
81
+ "AdapterState",
82
+ "PerformanceMetrics",
83
+ "CacheConfig",
84
+ "MonitorConfig",
85
+ ]