hqde 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hqde might be problematic. Click here for more details.
- hqde/__init__.py +62 -0
- hqde/__main__.py +0 -0
- hqde/core/__init__.py +23 -0
- hqde/core/hqde_system.py +380 -0
- hqde/distributed/__init__.py +18 -0
- hqde/distributed/fault_tolerance.py +346 -0
- hqde/distributed/hierarchical_aggregator.py +399 -0
- hqde/distributed/load_balancer.py +498 -0
- hqde/distributed/mapreduce_ensemble.py +394 -0
- hqde/py.typed +0 -0
- hqde/quantum/__init__.py +17 -0
- hqde/quantum/quantum_aggregator.py +291 -0
- hqde/quantum/quantum_noise.py +284 -0
- hqde/quantum/quantum_optimization.py +336 -0
- hqde/utils/__init__.py +20 -0
- hqde/utils/config_manager.py +9 -0
- hqde/utils/data_utils.py +13 -0
- hqde/utils/performance_monitor.py +465 -0
- hqde/utils/visualization.py +9 -0
- hqde-0.1.0.dist-info/METADATA +237 -0
- hqde-0.1.0.dist-info/RECORD +24 -0
- hqde-0.1.0.dist-info/WHEEL +5 -0
- hqde-0.1.0.dist-info/licenses/LICENSE +21 -0
- hqde-0.1.0.dist-info/top_level.txt +1 -0
hqde/__init__.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""
|
|
2
|
+
HQDE: Hierarchical Quantum-Distributed Ensemble Learning Framework
|
|
3
|
+
|
|
4
|
+
A comprehensive framework for distributed ensemble learning with quantum-inspired
|
|
5
|
+
algorithms, adaptive quantization, and efficient hierarchical aggregation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "0.1.0"
|
|
9
|
+
__author__ = "HQDE Team"
|
|
10
|
+
|
|
11
|
+
# Core components
|
|
12
|
+
from .core.hqde_system import HQDESystem, create_hqde_system
|
|
13
|
+
from .core.hqde_system import (
|
|
14
|
+
AdaptiveQuantizer,
|
|
15
|
+
QuantumInspiredAggregator,
|
|
16
|
+
DistributedEnsembleManager
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# Quantum-inspired components
|
|
20
|
+
from .quantum import (
|
|
21
|
+
QuantumEnsembleAggregator,
|
|
22
|
+
QuantumNoiseGenerator,
|
|
23
|
+
QuantumEnsembleOptimizer
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Distributed components
|
|
27
|
+
from .distributed import (
|
|
28
|
+
MapReduceEnsembleManager,
|
|
29
|
+
HierarchicalAggregator,
|
|
30
|
+
ByzantineFaultTolerantAggregator,
|
|
31
|
+
DynamicLoadBalancer
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Utilities
|
|
35
|
+
from .utils import (
|
|
36
|
+
PerformanceMonitor,
|
|
37
|
+
SystemMetrics
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
__all__ = [
|
|
41
|
+
# Core
|
|
42
|
+
'HQDESystem',
|
|
43
|
+
'create_hqde_system',
|
|
44
|
+
'AdaptiveQuantizer',
|
|
45
|
+
'QuantumInspiredAggregator',
|
|
46
|
+
'DistributedEnsembleManager',
|
|
47
|
+
|
|
48
|
+
# Quantum
|
|
49
|
+
'QuantumEnsembleAggregator',
|
|
50
|
+
'QuantumNoiseGenerator',
|
|
51
|
+
'QuantumEnsembleOptimizer',
|
|
52
|
+
|
|
53
|
+
# Distributed
|
|
54
|
+
'MapReduceEnsembleManager',
|
|
55
|
+
'HierarchicalAggregator',
|
|
56
|
+
'ByzantineFaultTolerantAggregator',
|
|
57
|
+
'DynamicLoadBalancer',
|
|
58
|
+
|
|
59
|
+
# Utils
|
|
60
|
+
'PerformanceMonitor',
|
|
61
|
+
'SystemMetrics'
|
|
62
|
+
]
|
hqde/__main__.py
ADDED
|
File without changes
|
hqde/core/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core HQDE system components.
|
|
3
|
+
|
|
4
|
+
This module contains the main HQDE system implementation including
|
|
5
|
+
adaptive quantization, quantum-inspired aggregation, and distributed
|
|
6
|
+
ensemble management.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from .hqde_system import (
|
|
10
|
+
HQDESystem,
|
|
11
|
+
AdaptiveQuantizer,
|
|
12
|
+
QuantumInspiredAggregator,
|
|
13
|
+
DistributedEnsembleManager,
|
|
14
|
+
create_hqde_system
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
'HQDESystem',
|
|
19
|
+
'AdaptiveQuantizer',
|
|
20
|
+
'QuantumInspiredAggregator',
|
|
21
|
+
'DistributedEnsembleManager',
|
|
22
|
+
'create_hqde_system'
|
|
23
|
+
]
|
hqde/core/hqde_system.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
"""
|
|
2
|
+
HQDE (Hierarchical Quantum-Distributed Ensemble Learning) Core System
|
|
3
|
+
|
|
4
|
+
This module implements the main HQDE framework with quantum-inspired algorithms,
|
|
5
|
+
distributed ensemble learning, and adaptive quantization.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import torch
|
|
9
|
+
import torch.nn as nn
|
|
10
|
+
import numpy as np
|
|
11
|
+
import ray
|
|
12
|
+
from typing import Dict, List, Optional, Tuple, Any
|
|
13
|
+
from collections import defaultdict
|
|
14
|
+
import logging
|
|
15
|
+
import time
|
|
16
|
+
import psutil
|
|
17
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
18
|
+
|
|
19
|
+
class AdaptiveQuantizer:
|
|
20
|
+
"""Adaptive weight quantization based on real-time importance scoring."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, base_bits: int = 8, min_bits: int = 4, max_bits: int = 16):
|
|
23
|
+
self.base_bits = base_bits
|
|
24
|
+
self.min_bits = min_bits
|
|
25
|
+
self.max_bits = max_bits
|
|
26
|
+
self.compression_cache = {}
|
|
27
|
+
|
|
28
|
+
def compute_importance_score(self, weights: torch.Tensor, gradients: Optional[torch.Tensor] = None) -> torch.Tensor:
|
|
29
|
+
"""Compute importance scores based on gradient magnitude and weight variance."""
|
|
30
|
+
with torch.no_grad():
|
|
31
|
+
# Weight-based importance
|
|
32
|
+
weight_importance = torch.abs(weights)
|
|
33
|
+
|
|
34
|
+
# Gradient-based importance if available
|
|
35
|
+
if gradients is not None:
|
|
36
|
+
grad_importance = torch.abs(gradients)
|
|
37
|
+
combined_importance = 0.7 * weight_importance + 0.3 * grad_importance
|
|
38
|
+
else:
|
|
39
|
+
combined_importance = weight_importance
|
|
40
|
+
|
|
41
|
+
# Normalize to [0, 1]
|
|
42
|
+
if combined_importance.numel() > 0:
|
|
43
|
+
min_val = combined_importance.min()
|
|
44
|
+
max_val = combined_importance.max()
|
|
45
|
+
if max_val > min_val:
|
|
46
|
+
importance = (combined_importance - min_val) / (max_val - min_val)
|
|
47
|
+
else:
|
|
48
|
+
importance = torch.ones_like(combined_importance) * 0.5
|
|
49
|
+
else:
|
|
50
|
+
importance = torch.ones_like(combined_importance) * 0.5
|
|
51
|
+
|
|
52
|
+
return importance
|
|
53
|
+
|
|
54
|
+
def adaptive_quantize(self, weights: torch.Tensor, importance_score: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, Any]]:
|
|
55
|
+
"""Perform adaptive quantization based on importance scores."""
|
|
56
|
+
# Determine bits per parameter based on importance
|
|
57
|
+
bits_per_param = self.min_bits + (self.max_bits - self.min_bits) * importance_score
|
|
58
|
+
bits_per_param = torch.clamp(bits_per_param, self.min_bits, self.max_bits).int()
|
|
59
|
+
|
|
60
|
+
# For simplicity, use uniform quantization with average bits
|
|
61
|
+
avg_bits = int(bits_per_param.float().mean().item())
|
|
62
|
+
|
|
63
|
+
# Quantize weights
|
|
64
|
+
weight_min = weights.min()
|
|
65
|
+
weight_max = weights.max()
|
|
66
|
+
|
|
67
|
+
if weight_max > weight_min:
|
|
68
|
+
scale = (weight_max - weight_min) / (2**avg_bits - 1)
|
|
69
|
+
zero_point = weight_min
|
|
70
|
+
|
|
71
|
+
quantized = torch.round((weights - zero_point) / scale)
|
|
72
|
+
quantized = torch.clamp(quantized, 0, 2**avg_bits - 1)
|
|
73
|
+
|
|
74
|
+
# Dequantize for use
|
|
75
|
+
dequantized = quantized * scale + zero_point
|
|
76
|
+
else:
|
|
77
|
+
dequantized = weights.clone()
|
|
78
|
+
scale = torch.tensor(1.0)
|
|
79
|
+
zero_point = torch.tensor(0.0)
|
|
80
|
+
|
|
81
|
+
metadata = {
|
|
82
|
+
'scale': scale,
|
|
83
|
+
'zero_point': zero_point,
|
|
84
|
+
'avg_bits': avg_bits,
|
|
85
|
+
'compression_ratio': 32.0 / avg_bits # Assuming original is float32
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return dequantized, metadata
|
|
89
|
+
|
|
90
|
+
class QuantumInspiredAggregator:
|
|
91
|
+
"""Quantum-inspired ensemble aggregation with controlled noise injection."""
|
|
92
|
+
|
|
93
|
+
def __init__(self, noise_scale: float = 0.01, exploration_factor: float = 0.1):
|
|
94
|
+
self.noise_scale = noise_scale
|
|
95
|
+
self.exploration_factor = exploration_factor
|
|
96
|
+
|
|
97
|
+
def quantum_noise_injection(self, weights: torch.Tensor) -> torch.Tensor:
|
|
98
|
+
"""Add quantum-inspired noise for exploration."""
|
|
99
|
+
noise = torch.randn_like(weights) * self.noise_scale
|
|
100
|
+
return weights + noise
|
|
101
|
+
|
|
102
|
+
def efficiency_weighted_aggregation(self, weight_list: List[torch.Tensor],
|
|
103
|
+
efficiency_scores: List[float]) -> torch.Tensor:
|
|
104
|
+
"""Aggregate weights using efficiency-based weighting."""
|
|
105
|
+
if not weight_list or not efficiency_scores:
|
|
106
|
+
raise ValueError("Empty weight list or efficiency scores")
|
|
107
|
+
|
|
108
|
+
# Normalize efficiency scores
|
|
109
|
+
efficiency_tensor = torch.tensor(efficiency_scores, dtype=torch.float32)
|
|
110
|
+
efficiency_weights = torch.softmax(efficiency_tensor, dim=0)
|
|
111
|
+
|
|
112
|
+
# Weighted aggregation
|
|
113
|
+
aggregated = torch.zeros_like(weight_list[0])
|
|
114
|
+
for weight, eff_weight in zip(weight_list, efficiency_weights):
|
|
115
|
+
aggregated += eff_weight * weight
|
|
116
|
+
|
|
117
|
+
# Add quantum noise for exploration
|
|
118
|
+
aggregated = self.quantum_noise_injection(aggregated)
|
|
119
|
+
|
|
120
|
+
return aggregated
|
|
121
|
+
|
|
122
|
+
class DistributedEnsembleManager:
|
|
123
|
+
"""Manages distributed ensemble learning with Ray."""
|
|
124
|
+
|
|
125
|
+
def __init__(self, num_workers: int = 4):
|
|
126
|
+
self.num_workers = num_workers
|
|
127
|
+
self.workers = []
|
|
128
|
+
self.quantizer = AdaptiveQuantizer()
|
|
129
|
+
self.aggregator = QuantumInspiredAggregator()
|
|
130
|
+
|
|
131
|
+
# Initialize Ray if not already initialized
|
|
132
|
+
if not ray.is_initialized():
|
|
133
|
+
ray.init(ignore_reinit_error=True)
|
|
134
|
+
|
|
135
|
+
def create_ensemble_workers(self, model_class, model_kwargs: Dict[str, Any]):
|
|
136
|
+
"""Create distributed ensemble workers."""
|
|
137
|
+
@ray.remote
|
|
138
|
+
class EnsembleWorker:
|
|
139
|
+
def __init__(self, model_class, model_kwargs):
|
|
140
|
+
self.model = model_class(**model_kwargs)
|
|
141
|
+
self.efficiency_score = 1.0
|
|
142
|
+
self.quantizer = AdaptiveQuantizer()
|
|
143
|
+
|
|
144
|
+
def train_step(self, data_batch):
|
|
145
|
+
# Simulate training step
|
|
146
|
+
loss = torch.randn(1).item()
|
|
147
|
+
self.efficiency_score = max(0.1, self.efficiency_score * 0.99 + 0.01 * (1.0 / (1.0 + loss)))
|
|
148
|
+
return loss
|
|
149
|
+
|
|
150
|
+
def get_weights(self):
|
|
151
|
+
return {name: param.data.clone() for name, param in self.model.named_parameters()}
|
|
152
|
+
|
|
153
|
+
def set_weights(self, weights_dict):
|
|
154
|
+
for name, param in self.model.named_parameters():
|
|
155
|
+
if name in weights_dict:
|
|
156
|
+
param.data.copy_(weights_dict[name])
|
|
157
|
+
|
|
158
|
+
def get_efficiency_score(self):
|
|
159
|
+
return self.efficiency_score
|
|
160
|
+
|
|
161
|
+
self.workers = [EnsembleWorker.remote(model_class, model_kwargs)
|
|
162
|
+
for _ in range(self.num_workers)]
|
|
163
|
+
|
|
164
|
+
def aggregate_weights(self) -> Dict[str, torch.Tensor]:
|
|
165
|
+
"""Aggregate weights from all workers."""
|
|
166
|
+
# Get weights and efficiency scores from workers
|
|
167
|
+
weight_futures = [worker.get_weights.remote() for worker in self.workers]
|
|
168
|
+
efficiency_futures = [worker.get_efficiency_score.remote() for worker in self.workers]
|
|
169
|
+
|
|
170
|
+
all_weights = ray.get(weight_futures)
|
|
171
|
+
efficiency_scores = ray.get(efficiency_futures)
|
|
172
|
+
|
|
173
|
+
if not all_weights:
|
|
174
|
+
return {}
|
|
175
|
+
|
|
176
|
+
# Aggregate each parameter separately
|
|
177
|
+
aggregated_weights = {}
|
|
178
|
+
param_names = all_weights[0].keys()
|
|
179
|
+
|
|
180
|
+
for param_name in param_names:
|
|
181
|
+
# Collect parameter tensors from all workers
|
|
182
|
+
param_tensors = [weights[param_name] for weights in all_weights]
|
|
183
|
+
|
|
184
|
+
# Compute importance scores for quantization
|
|
185
|
+
stacked_params = torch.stack(param_tensors)
|
|
186
|
+
importance_scores = self.quantizer.compute_importance_score(stacked_params)
|
|
187
|
+
|
|
188
|
+
# Quantize and aggregate
|
|
189
|
+
quantized_params = []
|
|
190
|
+
for i, param in enumerate(param_tensors):
|
|
191
|
+
quantized, metadata = self.quantizer.adaptive_quantize(
|
|
192
|
+
param, importance_scores[i]
|
|
193
|
+
)
|
|
194
|
+
quantized_params.append(quantized)
|
|
195
|
+
|
|
196
|
+
# Efficiency-weighted aggregation
|
|
197
|
+
aggregated_param = self.aggregator.efficiency_weighted_aggregation(
|
|
198
|
+
quantized_params, efficiency_scores
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
aggregated_weights[param_name] = aggregated_param
|
|
202
|
+
|
|
203
|
+
return aggregated_weights
|
|
204
|
+
|
|
205
|
+
def broadcast_weights(self, weights: Dict[str, torch.Tensor]):
|
|
206
|
+
"""Broadcast aggregated weights to all workers."""
|
|
207
|
+
futures = [worker.set_weights.remote(weights) for worker in self.workers]
|
|
208
|
+
ray.get(futures)
|
|
209
|
+
|
|
210
|
+
def train_ensemble(self, data_loader, num_epochs: int = 10):
|
|
211
|
+
"""Train the ensemble using distributed workers."""
|
|
212
|
+
for epoch in range(num_epochs):
|
|
213
|
+
# Simulate training on each worker
|
|
214
|
+
training_futures = []
|
|
215
|
+
for worker in self.workers:
|
|
216
|
+
# In a real implementation, you'd distribute different data batches
|
|
217
|
+
training_futures.append(worker.train_step.remote(None))
|
|
218
|
+
|
|
219
|
+
# Wait for training to complete
|
|
220
|
+
losses = ray.get(training_futures)
|
|
221
|
+
|
|
222
|
+
# Aggregate weights
|
|
223
|
+
aggregated_weights = self.aggregate_weights()
|
|
224
|
+
|
|
225
|
+
# Broadcast aggregated weights
|
|
226
|
+
if aggregated_weights:
|
|
227
|
+
self.broadcast_weights(aggregated_weights)
|
|
228
|
+
|
|
229
|
+
print(f"Epoch {epoch + 1}/{num_epochs}, Average Loss: {np.mean(losses):.4f}")
|
|
230
|
+
|
|
231
|
+
def shutdown(self):
|
|
232
|
+
"""Shutdown the distributed ensemble manager."""
|
|
233
|
+
ray.shutdown()
|
|
234
|
+
|
|
235
|
+
class HQDESystem:
|
|
236
|
+
"""Main HQDE (Hierarchical Quantum-Distributed Ensemble Learning) System."""
|
|
237
|
+
|
|
238
|
+
def __init__(self,
|
|
239
|
+
model_class,
|
|
240
|
+
model_kwargs: Dict[str, Any],
|
|
241
|
+
num_workers: int = 4,
|
|
242
|
+
quantization_config: Optional[Dict[str, Any]] = None,
|
|
243
|
+
aggregation_config: Optional[Dict[str, Any]] = None):
|
|
244
|
+
"""
|
|
245
|
+
Initialize HQDE System.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
model_class: The model class to use for ensemble members
|
|
249
|
+
model_kwargs: Keyword arguments for model initialization
|
|
250
|
+
num_workers: Number of distributed workers
|
|
251
|
+
quantization_config: Configuration for adaptive quantization
|
|
252
|
+
aggregation_config: Configuration for quantum-inspired aggregation
|
|
253
|
+
"""
|
|
254
|
+
self.model_class = model_class
|
|
255
|
+
self.model_kwargs = model_kwargs
|
|
256
|
+
self.num_workers = num_workers
|
|
257
|
+
|
|
258
|
+
# Initialize components
|
|
259
|
+
self.quantizer = AdaptiveQuantizer(**(quantization_config or {}))
|
|
260
|
+
self.aggregator = QuantumInspiredAggregator(**(aggregation_config or {}))
|
|
261
|
+
self.ensemble_manager = DistributedEnsembleManager(num_workers)
|
|
262
|
+
|
|
263
|
+
# Performance monitoring
|
|
264
|
+
self.metrics = {
|
|
265
|
+
'training_time': 0.0,
|
|
266
|
+
'communication_overhead': 0.0,
|
|
267
|
+
'memory_usage': 0.0,
|
|
268
|
+
'compression_ratio': 1.0
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
self.logger = logging.getLogger(__name__)
|
|
272
|
+
|
|
273
|
+
def initialize_ensemble(self):
|
|
274
|
+
"""Initialize the distributed ensemble."""
|
|
275
|
+
self.logger.info(f"Initializing HQDE ensemble with {self.num_workers} workers")
|
|
276
|
+
self.ensemble_manager.create_ensemble_workers(self.model_class, self.model_kwargs)
|
|
277
|
+
|
|
278
|
+
def train(self, data_loader, num_epochs: int = 10, validation_loader=None):
|
|
279
|
+
"""Train the HQDE ensemble."""
|
|
280
|
+
start_time = time.time()
|
|
281
|
+
|
|
282
|
+
# Monitor initial memory usage
|
|
283
|
+
initial_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
|
284
|
+
|
|
285
|
+
self.logger.info(f"Starting HQDE training for {num_epochs} epochs")
|
|
286
|
+
|
|
287
|
+
# Train the ensemble
|
|
288
|
+
self.ensemble_manager.train_ensemble(data_loader, num_epochs)
|
|
289
|
+
|
|
290
|
+
# Calculate metrics
|
|
291
|
+
end_time = time.time()
|
|
292
|
+
final_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
|
293
|
+
|
|
294
|
+
self.metrics.update({
|
|
295
|
+
'training_time': end_time - start_time,
|
|
296
|
+
'memory_usage': final_memory - initial_memory
|
|
297
|
+
})
|
|
298
|
+
|
|
299
|
+
self.logger.info(f"HQDE training completed in {self.metrics['training_time']:.2f} seconds")
|
|
300
|
+
self.logger.info(f"Memory usage: {self.metrics['memory_usage']:.2f} MB")
|
|
301
|
+
|
|
302
|
+
return self.metrics
|
|
303
|
+
|
|
304
|
+
def predict(self, data_loader):
|
|
305
|
+
"""Make predictions using the trained ensemble."""
|
|
306
|
+
# This is a simplified prediction method
|
|
307
|
+
# In a real implementation, you'd aggregate predictions from all workers
|
|
308
|
+
predictions = []
|
|
309
|
+
|
|
310
|
+
# Get weights from first worker as representative
|
|
311
|
+
if self.ensemble_manager.workers:
|
|
312
|
+
weights = ray.get(self.ensemble_manager.workers[0].get_weights.remote())
|
|
313
|
+
# Simulate predictions using these weights
|
|
314
|
+
for batch in data_loader:
|
|
315
|
+
# In practice, you'd run the model forward pass
|
|
316
|
+
batch_predictions = torch.randn(len(batch), 10) # Simulated predictions
|
|
317
|
+
predictions.append(batch_predictions)
|
|
318
|
+
|
|
319
|
+
return torch.cat(predictions, dim=0) if predictions else torch.empty(0)
|
|
320
|
+
|
|
321
|
+
def get_performance_metrics(self) -> Dict[str, float]:
|
|
322
|
+
"""Get performance metrics from the HQDE system."""
|
|
323
|
+
return self.metrics.copy()
|
|
324
|
+
|
|
325
|
+
def save_model(self, filepath: str):
|
|
326
|
+
"""Save the trained ensemble model."""
|
|
327
|
+
# Get aggregated weights
|
|
328
|
+
aggregated_weights = self.ensemble_manager.aggregate_weights()
|
|
329
|
+
|
|
330
|
+
model_state = {
|
|
331
|
+
'aggregated_weights': aggregated_weights,
|
|
332
|
+
'model_kwargs': self.model_kwargs,
|
|
333
|
+
'metrics': self.metrics,
|
|
334
|
+
'num_workers': self.num_workers
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
torch.save(model_state, filepath)
|
|
338
|
+
self.logger.info(f"HQDE model saved to {filepath}")
|
|
339
|
+
|
|
340
|
+
def load_model(self, filepath: str):
|
|
341
|
+
"""Load a trained ensemble model."""
|
|
342
|
+
model_state = torch.load(filepath)
|
|
343
|
+
|
|
344
|
+
self.model_kwargs = model_state['model_kwargs']
|
|
345
|
+
self.metrics = model_state['metrics']
|
|
346
|
+
self.num_workers = model_state['num_workers']
|
|
347
|
+
|
|
348
|
+
# Reinitialize ensemble with loaded state
|
|
349
|
+
self.initialize_ensemble()
|
|
350
|
+
|
|
351
|
+
# Set weights if available
|
|
352
|
+
if 'aggregated_weights' in model_state:
|
|
353
|
+
self.ensemble_manager.broadcast_weights(model_state['aggregated_weights'])
|
|
354
|
+
|
|
355
|
+
self.logger.info(f"HQDE model loaded from {filepath}")
|
|
356
|
+
|
|
357
|
+
def cleanup(self):
|
|
358
|
+
"""Cleanup resources."""
|
|
359
|
+
self.ensemble_manager.shutdown()
|
|
360
|
+
|
|
361
|
+
# Factory function for easy instantiation
|
|
362
|
+
def create_hqde_system(model_class,
|
|
363
|
+
model_kwargs: Dict[str, Any],
|
|
364
|
+
num_workers: int = 4,
|
|
365
|
+
**kwargs) -> HQDESystem:
|
|
366
|
+
"""
|
|
367
|
+
Factory function to create and initialize an HQDE system.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
model_class: The model class for ensemble members
|
|
371
|
+
model_kwargs: Model initialization parameters
|
|
372
|
+
num_workers: Number of distributed workers
|
|
373
|
+
**kwargs: Additional configuration parameters
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Initialized HQDESystem instance
|
|
377
|
+
"""
|
|
378
|
+
system = HQDESystem(model_class, model_kwargs, num_workers, **kwargs)
|
|
379
|
+
system.initialize_ensemble()
|
|
380
|
+
return system
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Distributed computing components for HQDE framework.
|
|
3
|
+
|
|
4
|
+
This module provides distributed ensemble management, hierarchical aggregation,
|
|
5
|
+
and MapReduce-inspired weight management for scalable ensemble learning.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .mapreduce_ensemble import MapReduceEnsembleManager
|
|
9
|
+
from .hierarchical_aggregator import HierarchicalAggregator
|
|
10
|
+
from .fault_tolerance import ByzantineFaultTolerantAggregator
|
|
11
|
+
from .load_balancer import DynamicLoadBalancer
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
'MapReduceEnsembleManager',
|
|
15
|
+
'HierarchicalAggregator',
|
|
16
|
+
'ByzantineFaultTolerantAggregator',
|
|
17
|
+
'DynamicLoadBalancer'
|
|
18
|
+
]
|