hqde 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hqde might be problematic. Click here for more details.
- hqde/__init__.py +62 -0
- hqde/__main__.py +0 -0
- hqde/core/__init__.py +23 -0
- hqde/core/hqde_system.py +380 -0
- hqde/distributed/__init__.py +18 -0
- hqde/distributed/fault_tolerance.py +346 -0
- hqde/distributed/hierarchical_aggregator.py +399 -0
- hqde/distributed/load_balancer.py +498 -0
- hqde/distributed/mapreduce_ensemble.py +394 -0
- hqde/py.typed +0 -0
- hqde/quantum/__init__.py +17 -0
- hqde/quantum/quantum_aggregator.py +291 -0
- hqde/quantum/quantum_noise.py +284 -0
- hqde/quantum/quantum_optimization.py +336 -0
- hqde/utils/__init__.py +20 -0
- hqde/utils/config_manager.py +9 -0
- hqde/utils/data_utils.py +13 -0
- hqde/utils/performance_monitor.py +465 -0
- hqde/utils/visualization.py +9 -0
- hqde-0.1.0.dist-info/METADATA +237 -0
- hqde-0.1.0.dist-info/RECORD +24 -0
- hqde-0.1.0.dist-info/WHEEL +5 -0
- hqde-0.1.0.dist-info/licenses/LICENSE +21 -0
- hqde-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum-inspired ensemble aggregation module.
|
|
3
|
+
|
|
4
|
+
This module implements quantum-inspired algorithms for aggregating ensemble
|
|
5
|
+
predictions and weights, including entanglement simulation and quantum
|
|
6
|
+
superposition for model combination.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
import torch.nn as nn
|
|
11
|
+
import numpy as np
|
|
12
|
+
from typing import List, Dict, Optional, Tuple
|
|
13
|
+
import math
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class EntangledEnsembleManager:
|
|
17
|
+
"""Manages quantum entanglement-inspired ensemble correlations."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, num_ensembles: int, entanglement_strength: float = 0.1):
|
|
20
|
+
self.num_ensembles = num_ensembles
|
|
21
|
+
self.entanglement_strength = entanglement_strength
|
|
22
|
+
self.entanglement_matrix = self._initialize_entanglement()
|
|
23
|
+
|
|
24
|
+
def _initialize_entanglement(self) -> torch.Tensor:
|
|
25
|
+
"""Initialize entanglement matrix for ensemble correlations."""
|
|
26
|
+
# Create symmetric entanglement matrix
|
|
27
|
+
matrix = torch.randn(self.num_ensembles, self.num_ensembles)
|
|
28
|
+
matrix = (matrix + matrix.T) / 2 # Make symmetric
|
|
29
|
+
|
|
30
|
+
# Apply entanglement strength
|
|
31
|
+
matrix = matrix * self.entanglement_strength
|
|
32
|
+
|
|
33
|
+
# Ensure diagonal is 1 (self-entanglement)
|
|
34
|
+
matrix.fill_diagonal_(1.0)
|
|
35
|
+
|
|
36
|
+
return matrix
|
|
37
|
+
|
|
38
|
+
def compute_entanglement_weights(self, ensemble_states: List[torch.Tensor]) -> torch.Tensor:
|
|
39
|
+
"""Compute entanglement-based weights for ensemble aggregation."""
|
|
40
|
+
if len(ensemble_states) != self.num_ensembles:
|
|
41
|
+
raise ValueError(f"Expected {self.num_ensembles} ensemble states, got {len(ensemble_states)}")
|
|
42
|
+
|
|
43
|
+
# Compute state similarities for entanglement
|
|
44
|
+
similarities = torch.zeros(self.num_ensembles, self.num_ensembles)
|
|
45
|
+
|
|
46
|
+
for i, state_i in enumerate(ensemble_states):
|
|
47
|
+
for j, state_j in enumerate(ensemble_states):
|
|
48
|
+
if i <= j: # Only compute upper triangle
|
|
49
|
+
# Cosine similarity between states
|
|
50
|
+
similarity = torch.cosine_similarity(
|
|
51
|
+
state_i.flatten(), state_j.flatten(), dim=0
|
|
52
|
+
)
|
|
53
|
+
similarities[i, j] = similarities[j, i] = similarity
|
|
54
|
+
|
|
55
|
+
# Apply entanglement matrix
|
|
56
|
+
entangled_weights = torch.softmax(
|
|
57
|
+
torch.diagonal(similarities @ self.entanglement_matrix), dim=0
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
return entangled_weights
|
|
61
|
+
|
|
62
|
+
def apply_entanglement(self, ensemble_predictions: List[torch.Tensor],
|
|
63
|
+
entanglement_weights: torch.Tensor) -> torch.Tensor:
|
|
64
|
+
"""Apply entanglement correlations to ensemble predictions."""
|
|
65
|
+
# Weight predictions by entanglement
|
|
66
|
+
weighted_predictions = []
|
|
67
|
+
for pred, weight in zip(ensemble_predictions, entanglement_weights):
|
|
68
|
+
weighted_predictions.append(pred * weight)
|
|
69
|
+
|
|
70
|
+
# Quantum superposition-like combination
|
|
71
|
+
superposition = torch.stack(weighted_predictions, dim=0).sum(dim=0)
|
|
72
|
+
|
|
73
|
+
return superposition
|
|
74
|
+
|
|
75
|
+
def quantum_measurement(self, superposition_state: torch.Tensor) -> torch.Tensor:
|
|
76
|
+
"""Simulate quantum measurement collapse to final prediction."""
|
|
77
|
+
# Add quantum measurement noise
|
|
78
|
+
measurement_noise = torch.randn_like(superposition_state) * 0.01
|
|
79
|
+
measured_state = superposition_state + measurement_noise
|
|
80
|
+
|
|
81
|
+
return measured_state
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class QuantumEnsembleAggregator:
|
|
85
|
+
"""Main quantum-inspired ensemble aggregation system."""
|
|
86
|
+
|
|
87
|
+
def __init__(self,
|
|
88
|
+
num_ensembles: int,
|
|
89
|
+
entanglement_strength: float = 0.1,
|
|
90
|
+
quantum_noise_scale: float = 0.01,
|
|
91
|
+
use_quantum_annealing: bool = False):
|
|
92
|
+
"""
|
|
93
|
+
Initialize quantum ensemble aggregator.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
num_ensembles: Number of ensemble members
|
|
97
|
+
entanglement_strength: Strength of quantum entanglement simulation
|
|
98
|
+
quantum_noise_scale: Scale of quantum noise injection
|
|
99
|
+
use_quantum_annealing: Whether to use quantum annealing for optimization
|
|
100
|
+
"""
|
|
101
|
+
self.num_ensembles = num_ensembles
|
|
102
|
+
self.quantum_noise_scale = quantum_noise_scale
|
|
103
|
+
self.use_quantum_annealing = use_quantum_annealing
|
|
104
|
+
|
|
105
|
+
# Initialize quantum components
|
|
106
|
+
self.entanglement_manager = EntangledEnsembleManager(
|
|
107
|
+
num_ensembles, entanglement_strength
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def quantum_superposition_aggregation(self,
|
|
111
|
+
ensemble_predictions: List[torch.Tensor],
|
|
112
|
+
confidence_scores: Optional[List[float]] = None) -> torch.Tensor:
|
|
113
|
+
"""
|
|
114
|
+
Aggregate ensemble predictions using quantum superposition.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
ensemble_predictions: List of prediction tensors from ensemble members
|
|
118
|
+
confidence_scores: Optional confidence scores for each ensemble member
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Aggregated prediction tensor
|
|
122
|
+
"""
|
|
123
|
+
if len(ensemble_predictions) != self.num_ensembles:
|
|
124
|
+
raise ValueError(f"Expected {self.num_ensembles} predictions, got {len(ensemble_predictions)}")
|
|
125
|
+
|
|
126
|
+
# Use confidence scores or equal weights
|
|
127
|
+
if confidence_scores is None:
|
|
128
|
+
confidence_scores = [1.0] * self.num_ensembles
|
|
129
|
+
|
|
130
|
+
# Normalize confidence scores to create quantum amplitudes
|
|
131
|
+
confidence_tensor = torch.tensor(confidence_scores, dtype=torch.float32)
|
|
132
|
+
amplitudes = torch.sqrt(torch.softmax(confidence_tensor, dim=0))
|
|
133
|
+
|
|
134
|
+
# Create quantum superposition
|
|
135
|
+
superposition = torch.zeros_like(ensemble_predictions[0])
|
|
136
|
+
for pred, amplitude in zip(ensemble_predictions, amplitudes):
|
|
137
|
+
superposition += amplitude * pred
|
|
138
|
+
|
|
139
|
+
# Add quantum noise for exploration
|
|
140
|
+
quantum_noise = torch.randn_like(superposition) * self.quantum_noise_scale
|
|
141
|
+
superposition_with_noise = superposition + quantum_noise
|
|
142
|
+
|
|
143
|
+
return superposition_with_noise
|
|
144
|
+
|
|
145
|
+
def entanglement_based_aggregation(self,
|
|
146
|
+
ensemble_predictions: List[torch.Tensor],
|
|
147
|
+
ensemble_states: List[torch.Tensor]) -> torch.Tensor:
|
|
148
|
+
"""
|
|
149
|
+
Aggregate predictions using quantum entanglement simulation.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
ensemble_predictions: Prediction tensors from ensemble members
|
|
153
|
+
ensemble_states: Internal state tensors from ensemble members
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Entanglement-weighted aggregated predictions
|
|
157
|
+
"""
|
|
158
|
+
# Compute entanglement weights
|
|
159
|
+
entanglement_weights = self.entanglement_manager.compute_entanglement_weights(ensemble_states)
|
|
160
|
+
|
|
161
|
+
# Apply entanglement to predictions
|
|
162
|
+
entangled_superposition = self.entanglement_manager.apply_entanglement(
|
|
163
|
+
ensemble_predictions, entanglement_weights
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Quantum measurement
|
|
167
|
+
final_prediction = self.entanglement_manager.quantum_measurement(entangled_superposition)
|
|
168
|
+
|
|
169
|
+
return final_prediction
|
|
170
|
+
|
|
171
|
+
def quantum_voting_aggregation(self,
|
|
172
|
+
ensemble_predictions: List[torch.Tensor],
|
|
173
|
+
voting_weights: Optional[List[float]] = None) -> torch.Tensor:
|
|
174
|
+
"""
|
|
175
|
+
Quantum-inspired voting aggregation with coherent superposition.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
ensemble_predictions: Prediction tensors from ensemble members
|
|
179
|
+
voting_weights: Optional voting weights for each ensemble member
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Quantum-voted aggregated predictions
|
|
183
|
+
"""
|
|
184
|
+
if voting_weights is None:
|
|
185
|
+
voting_weights = [1.0] * len(ensemble_predictions)
|
|
186
|
+
|
|
187
|
+
# Convert to quantum phase representation
|
|
188
|
+
quantum_phases = []
|
|
189
|
+
for pred, weight in zip(ensemble_predictions, voting_weights):
|
|
190
|
+
# Convert predictions to phase representation
|
|
191
|
+
phase = torch.atan2(torch.sin(pred * math.pi), torch.cos(pred * math.pi))
|
|
192
|
+
weighted_phase = phase * weight
|
|
193
|
+
quantum_phases.append(weighted_phase)
|
|
194
|
+
|
|
195
|
+
# Combine phases using quantum interference
|
|
196
|
+
combined_phase = torch.stack(quantum_phases, dim=0).mean(dim=0)
|
|
197
|
+
|
|
198
|
+
# Convert back to prediction space
|
|
199
|
+
final_prediction = torch.sin(combined_phase) + 1j * torch.cos(combined_phase)
|
|
200
|
+
|
|
201
|
+
# Take real part as final prediction
|
|
202
|
+
return final_prediction.real
|
|
203
|
+
|
|
204
|
+
def adaptive_quantum_aggregation(self,
|
|
205
|
+
ensemble_predictions: List[torch.Tensor],
|
|
206
|
+
ensemble_uncertainties: List[torch.Tensor],
|
|
207
|
+
aggregation_mode: str = "auto") -> Tuple[torch.Tensor, Dict[str, float]]:
|
|
208
|
+
"""
|
|
209
|
+
Adaptive quantum aggregation that chooses the best method based on uncertainty.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
ensemble_predictions: Prediction tensors from ensemble members
|
|
213
|
+
ensemble_uncertainties: Uncertainty estimates from ensemble members
|
|
214
|
+
aggregation_mode: Aggregation mode ("auto", "superposition", "entanglement", "voting")
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Tuple of (aggregated_predictions, aggregation_metrics)
|
|
218
|
+
"""
|
|
219
|
+
# Compute average uncertainty
|
|
220
|
+
avg_uncertainty = torch.stack(ensemble_uncertainties).mean().item()
|
|
221
|
+
|
|
222
|
+
# Choose aggregation method based on uncertainty and mode
|
|
223
|
+
if aggregation_mode == "auto":
|
|
224
|
+
if avg_uncertainty > 0.5:
|
|
225
|
+
chosen_method = "entanglement" # High uncertainty - use entanglement
|
|
226
|
+
elif avg_uncertainty > 0.2:
|
|
227
|
+
chosen_method = "superposition" # Medium uncertainty - use superposition
|
|
228
|
+
else:
|
|
229
|
+
chosen_method = "voting" # Low uncertainty - use voting
|
|
230
|
+
else:
|
|
231
|
+
chosen_method = aggregation_mode
|
|
232
|
+
|
|
233
|
+
# Apply chosen aggregation method
|
|
234
|
+
if chosen_method == "superposition":
|
|
235
|
+
confidence_scores = [1.0 / (1.0 + u.mean().item()) for u in ensemble_uncertainties]
|
|
236
|
+
aggregated = self.quantum_superposition_aggregation(ensemble_predictions, confidence_scores)
|
|
237
|
+
elif chosen_method == "entanglement":
|
|
238
|
+
# Use uncertainties as state representations
|
|
239
|
+
aggregated = self.entanglement_based_aggregation(ensemble_predictions, ensemble_uncertainties)
|
|
240
|
+
elif chosen_method == "voting":
|
|
241
|
+
voting_weights = [1.0 / (1.0 + u.mean().item()) for u in ensemble_uncertainties]
|
|
242
|
+
aggregated = self.quantum_voting_aggregation(ensemble_predictions, voting_weights)
|
|
243
|
+
else:
|
|
244
|
+
raise ValueError(f"Unknown aggregation method: {chosen_method}")
|
|
245
|
+
|
|
246
|
+
# Compute aggregation metrics
|
|
247
|
+
metrics = {
|
|
248
|
+
"method_used": chosen_method,
|
|
249
|
+
"average_uncertainty": avg_uncertainty,
|
|
250
|
+
"ensemble_diversity": self._compute_ensemble_diversity(ensemble_predictions),
|
|
251
|
+
"quantum_coherence": self._compute_quantum_coherence(ensemble_predictions)
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
return aggregated, metrics
|
|
255
|
+
|
|
256
|
+
def _compute_ensemble_diversity(self, ensemble_predictions: List[torch.Tensor]) -> float:
|
|
257
|
+
"""Compute diversity measure for ensemble predictions."""
|
|
258
|
+
if len(ensemble_predictions) < 2:
|
|
259
|
+
return 0.0
|
|
260
|
+
|
|
261
|
+
# Pairwise correlation diversity
|
|
262
|
+
diversities = []
|
|
263
|
+
for i in range(len(ensemble_predictions)):
|
|
264
|
+
for j in range(i + 1, len(ensemble_predictions)):
|
|
265
|
+
pred_i = ensemble_predictions[i].flatten()
|
|
266
|
+
pred_j = ensemble_predictions[j].flatten()
|
|
267
|
+
|
|
268
|
+
correlation = torch.corrcoef(torch.stack([pred_i, pred_j]))[0, 1]
|
|
269
|
+
diversity = 1.0 - abs(correlation.item())
|
|
270
|
+
diversities.append(diversity)
|
|
271
|
+
|
|
272
|
+
return np.mean(diversities)
|
|
273
|
+
|
|
274
|
+
def _compute_quantum_coherence(self, ensemble_predictions: List[torch.Tensor]) -> float:
|
|
275
|
+
"""Compute quantum coherence measure for ensemble."""
|
|
276
|
+
if len(ensemble_predictions) < 2:
|
|
277
|
+
return 1.0
|
|
278
|
+
|
|
279
|
+
# Compute coherence as phase alignment
|
|
280
|
+
phases = []
|
|
281
|
+
for pred in ensemble_predictions:
|
|
282
|
+
phase = torch.angle(torch.complex(pred, torch.zeros_like(pred)))
|
|
283
|
+
phases.append(phase.flatten())
|
|
284
|
+
|
|
285
|
+
stacked_phases = torch.stack(phases)
|
|
286
|
+
phase_variance = torch.var(stacked_phases, dim=0).mean()
|
|
287
|
+
|
|
288
|
+
# Coherence is inversely related to phase variance
|
|
289
|
+
coherence = torch.exp(-phase_variance).item()
|
|
290
|
+
|
|
291
|
+
return coherence
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum noise generation module for HQDE framework.
|
|
3
|
+
|
|
4
|
+
This module provides various quantum-inspired noise generation techniques
|
|
5
|
+
for ensemble learning, including quantum differential privacy and
|
|
6
|
+
exploration-enhancing noise injection.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
import numpy as np
|
|
11
|
+
from typing import Optional, Tuple, Dict, Any
|
|
12
|
+
import math
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QuantumNoiseGenerator:
|
|
16
|
+
"""Generates quantum-inspired noise for ensemble learning enhancement."""
|
|
17
|
+
|
|
18
|
+
def __init__(self,
|
|
19
|
+
noise_scale: float = 0.01,
|
|
20
|
+
quantum_coherence_time: float = 1.0,
|
|
21
|
+
decoherence_rate: float = 0.1):
|
|
22
|
+
"""
|
|
23
|
+
Initialize quantum noise generator.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
noise_scale: Base scale for quantum noise
|
|
27
|
+
quantum_coherence_time: Simulated quantum coherence time
|
|
28
|
+
decoherence_rate: Rate of quantum decoherence
|
|
29
|
+
"""
|
|
30
|
+
self.noise_scale = noise_scale
|
|
31
|
+
self.coherence_time = quantum_coherence_time
|
|
32
|
+
self.decoherence_rate = decoherence_rate
|
|
33
|
+
self.time_step = 0
|
|
34
|
+
|
|
35
|
+
def generate_quantum_dp_noise(self,
|
|
36
|
+
tensor_shape: torch.Size,
|
|
37
|
+
epsilon: float = 1.0,
|
|
38
|
+
delta: float = 1e-5) -> torch.Tensor:
|
|
39
|
+
"""
|
|
40
|
+
Generate quantum differential privacy noise.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
tensor_shape: Shape of the tensor to add noise to
|
|
44
|
+
epsilon: Privacy parameter epsilon
|
|
45
|
+
delta: Privacy parameter delta
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Quantum differential privacy noise tensor
|
|
49
|
+
"""
|
|
50
|
+
# Quantum-enhanced Gaussian mechanism
|
|
51
|
+
sensitivity = 1.0 # Assumed L2 sensitivity
|
|
52
|
+
sigma = sensitivity * math.sqrt(2 * math.log(1.25 / delta)) / epsilon
|
|
53
|
+
|
|
54
|
+
# Generate base Gaussian noise
|
|
55
|
+
base_noise = torch.randn(tensor_shape) * sigma
|
|
56
|
+
|
|
57
|
+
# Add quantum coherent oscillations
|
|
58
|
+
coherent_freq = 2 * math.pi / self.coherence_time
|
|
59
|
+
time_factor = math.exp(-self.decoherence_rate * self.time_step)
|
|
60
|
+
|
|
61
|
+
# Quantum phase factor
|
|
62
|
+
quantum_phase = torch.exp(1j * coherent_freq * self.time_step * torch.randn(tensor_shape))
|
|
63
|
+
|
|
64
|
+
# Combine classical and quantum components
|
|
65
|
+
quantum_noise = base_noise * time_factor * quantum_phase.real
|
|
66
|
+
|
|
67
|
+
return quantum_noise
|
|
68
|
+
|
|
69
|
+
def generate_exploration_noise(self,
|
|
70
|
+
current_weights: torch.Tensor,
|
|
71
|
+
exploration_strength: float = 0.1) -> torch.Tensor:
|
|
72
|
+
"""
|
|
73
|
+
Generate exploration noise based on current weight distribution.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
current_weights: Current weight tensor
|
|
77
|
+
exploration_strength: Strength of exploration noise
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Exploration noise tensor
|
|
81
|
+
"""
|
|
82
|
+
# Adaptive noise based on weight variance
|
|
83
|
+
weight_std = torch.std(current_weights)
|
|
84
|
+
adaptive_scale = self.noise_scale * exploration_strength * weight_std
|
|
85
|
+
|
|
86
|
+
# Generate correlated quantum noise
|
|
87
|
+
base_noise = torch.randn_like(current_weights)
|
|
88
|
+
|
|
89
|
+
# Add quantum correlations through convolution
|
|
90
|
+
if len(current_weights.shape) >= 2:
|
|
91
|
+
# Create quantum correlation kernel
|
|
92
|
+
kernel_size = min(3, min(current_weights.shape[-2:]))
|
|
93
|
+
if kernel_size > 1:
|
|
94
|
+
correlation_kernel = torch.ones(1, 1, kernel_size, kernel_size) / (kernel_size ** 2)
|
|
95
|
+
|
|
96
|
+
# Apply correlation if tensor has appropriate dimensions
|
|
97
|
+
if len(current_weights.shape) == 4: # Conv weights
|
|
98
|
+
correlated_noise = torch.nn.functional.conv2d(
|
|
99
|
+
base_noise.unsqueeze(0).unsqueeze(0),
|
|
100
|
+
correlation_kernel,
|
|
101
|
+
padding=kernel_size//2
|
|
102
|
+
).squeeze()
|
|
103
|
+
else:
|
|
104
|
+
correlated_noise = base_noise
|
|
105
|
+
else:
|
|
106
|
+
correlated_noise = base_noise
|
|
107
|
+
else:
|
|
108
|
+
correlated_noise = base_noise
|
|
109
|
+
|
|
110
|
+
exploration_noise = correlated_noise * adaptive_scale
|
|
111
|
+
|
|
112
|
+
return exploration_noise
|
|
113
|
+
|
|
114
|
+
def generate_entanglement_noise(self,
|
|
115
|
+
ensemble_weights: list[torch.Tensor],
|
|
116
|
+
entanglement_strength: float = 0.1) -> list[torch.Tensor]:
|
|
117
|
+
"""
|
|
118
|
+
Generate entangled noise for ensemble members.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
ensemble_weights: List of weight tensors from ensemble members
|
|
122
|
+
entanglement_strength: Strength of entanglement correlations
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
List of entangled noise tensors
|
|
126
|
+
"""
|
|
127
|
+
num_ensemble = len(ensemble_weights)
|
|
128
|
+
if num_ensemble < 2:
|
|
129
|
+
return [self.generate_exploration_noise(w) for w in ensemble_weights]
|
|
130
|
+
|
|
131
|
+
# Generate shared quantum state
|
|
132
|
+
shared_shape = ensemble_weights[0].shape
|
|
133
|
+
shared_quantum_state = torch.randn(shared_shape)
|
|
134
|
+
|
|
135
|
+
entangled_noises = []
|
|
136
|
+
for i, weights in enumerate(ensemble_weights):
|
|
137
|
+
# Individual quantum state
|
|
138
|
+
individual_state = torch.randn_like(weights)
|
|
139
|
+
|
|
140
|
+
# Entanglement coupling
|
|
141
|
+
entanglement_phase = 2 * math.pi * i / num_ensemble
|
|
142
|
+
coupling_factor = math.cos(entanglement_phase) * entanglement_strength
|
|
143
|
+
|
|
144
|
+
# Combine shared and individual components
|
|
145
|
+
entangled_noise = (
|
|
146
|
+
(1 - entanglement_strength) * individual_state +
|
|
147
|
+
coupling_factor * shared_quantum_state
|
|
148
|
+
) * self.noise_scale
|
|
149
|
+
|
|
150
|
+
entangled_noises.append(entangled_noise)
|
|
151
|
+
|
|
152
|
+
return entangled_noises
|
|
153
|
+
|
|
154
|
+
def generate_quantum_regularization_noise(self,
|
|
155
|
+
weights: torch.Tensor,
|
|
156
|
+
regularization_strength: float = 0.01) -> torch.Tensor:
|
|
157
|
+
"""
|
|
158
|
+
Generate quantum-inspired regularization noise.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
weights: Weight tensor to regularize
|
|
162
|
+
regularization_strength: Strength of regularization
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Quantum regularization noise
|
|
166
|
+
"""
|
|
167
|
+
# Quantum harmonic oscillator noise
|
|
168
|
+
harmonic_noise = torch.randn_like(weights)
|
|
169
|
+
|
|
170
|
+
# Apply quantum energy levels (discrete frequency components)
|
|
171
|
+
for n in range(1, 5): # First few energy levels
|
|
172
|
+
frequency = math.sqrt(n) * 2 * math.pi
|
|
173
|
+
energy_component = torch.sin(frequency * self.time_step) * math.exp(-n * 0.1)
|
|
174
|
+
harmonic_noise += energy_component * torch.randn_like(weights)
|
|
175
|
+
|
|
176
|
+
regularization_noise = harmonic_noise * regularization_strength * self.noise_scale
|
|
177
|
+
|
|
178
|
+
return regularization_noise
|
|
179
|
+
|
|
180
|
+
def generate_adaptive_quantum_noise(self,
|
|
181
|
+
weights: torch.Tensor,
|
|
182
|
+
gradient: Optional[torch.Tensor] = None,
|
|
183
|
+
loss_value: Optional[float] = None,
|
|
184
|
+
**kwargs) -> Tuple[torch.Tensor, Dict[str, Any]]:
|
|
185
|
+
"""
|
|
186
|
+
Generate adaptive quantum noise based on training dynamics.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
weights: Current weight tensor
|
|
190
|
+
gradient: Current gradient tensor (optional)
|
|
191
|
+
loss_value: Current loss value (optional)
|
|
192
|
+
**kwargs: Additional parameters
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Tuple of (quantum_noise, noise_metadata)
|
|
196
|
+
"""
|
|
197
|
+
# Base quantum noise
|
|
198
|
+
quantum_noise = torch.randn_like(weights) * self.noise_scale
|
|
199
|
+
|
|
200
|
+
# Adapt based on gradient information
|
|
201
|
+
if gradient is not None:
|
|
202
|
+
gradient_magnitude = torch.norm(gradient).item()
|
|
203
|
+
|
|
204
|
+
# Increase noise when gradients are small (exploration)
|
|
205
|
+
# Decrease noise when gradients are large (exploitation)
|
|
206
|
+
gradient_factor = 1.0 / (1.0 + gradient_magnitude)
|
|
207
|
+
quantum_noise *= gradient_factor
|
|
208
|
+
|
|
209
|
+
# Adapt based on loss value
|
|
210
|
+
if loss_value is not None:
|
|
211
|
+
# Increase noise for high loss (need more exploration)
|
|
212
|
+
loss_factor = 1.0 + math.exp(-loss_value)
|
|
213
|
+
quantum_noise *= loss_factor
|
|
214
|
+
|
|
215
|
+
# Add quantum decoherence effect
|
|
216
|
+
decoherence_factor = math.exp(-self.decoherence_rate * self.time_step)
|
|
217
|
+
quantum_noise *= decoherence_factor
|
|
218
|
+
|
|
219
|
+
# Update time step for next call
|
|
220
|
+
self.time_step += 1
|
|
221
|
+
|
|
222
|
+
# Prepare metadata
|
|
223
|
+
metadata = {
|
|
224
|
+
'noise_scale_used': self.noise_scale,
|
|
225
|
+
'decoherence_factor': decoherence_factor,
|
|
226
|
+
'time_step': self.time_step,
|
|
227
|
+
'adaptive_factors': {
|
|
228
|
+
'gradient_factor': gradient_factor if gradient is not None else 1.0,
|
|
229
|
+
'loss_factor': loss_factor if loss_value is not None else 1.0
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
return quantum_noise, metadata
|
|
234
|
+
|
|
235
|
+
def apply_quantum_noise_schedule(self,
|
|
236
|
+
weights: torch.Tensor,
|
|
237
|
+
schedule_type: str = "exponential",
|
|
238
|
+
schedule_params: Optional[Dict[str, float]] = None) -> torch.Tensor:
|
|
239
|
+
"""
|
|
240
|
+
Apply quantum noise with a specific schedule.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
weights: Weight tensor to add noise to
|
|
244
|
+
schedule_type: Type of noise schedule ("exponential", "cosine", "linear")
|
|
245
|
+
schedule_params: Parameters for the schedule
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Noise tensor according to schedule
|
|
249
|
+
"""
|
|
250
|
+
if schedule_params is None:
|
|
251
|
+
schedule_params = {}
|
|
252
|
+
|
|
253
|
+
# Calculate schedule factor
|
|
254
|
+
if schedule_type == "exponential":
|
|
255
|
+
decay_rate = schedule_params.get("decay_rate", 0.1)
|
|
256
|
+
schedule_factor = math.exp(-decay_rate * self.time_step)
|
|
257
|
+
elif schedule_type == "cosine":
|
|
258
|
+
period = schedule_params.get("period", 100)
|
|
259
|
+
schedule_factor = 0.5 * (1 + math.cos(2 * math.pi * self.time_step / period))
|
|
260
|
+
elif schedule_type == "linear":
|
|
261
|
+
max_steps = schedule_params.get("max_steps", 1000)
|
|
262
|
+
schedule_factor = max(0, 1 - self.time_step / max_steps)
|
|
263
|
+
else:
|
|
264
|
+
schedule_factor = 1.0
|
|
265
|
+
|
|
266
|
+
# Generate scheduled quantum noise
|
|
267
|
+
base_noise = torch.randn_like(weights)
|
|
268
|
+
scheduled_noise = base_noise * self.noise_scale * schedule_factor
|
|
269
|
+
|
|
270
|
+
return scheduled_noise
|
|
271
|
+
|
|
272
|
+
def reset_time_step(self):
|
|
273
|
+
"""Reset the internal time step counter."""
|
|
274
|
+
self.time_step = 0
|
|
275
|
+
|
|
276
|
+
def get_noise_statistics(self) -> Dict[str, float]:
|
|
277
|
+
"""Get current noise generator statistics."""
|
|
278
|
+
return {
|
|
279
|
+
'current_time_step': self.time_step,
|
|
280
|
+
'noise_scale': self.noise_scale,
|
|
281
|
+
'coherence_time': self.coherence_time,
|
|
282
|
+
'decoherence_rate': self.decoherence_rate,
|
|
283
|
+
'current_decoherence_factor': math.exp(-self.decoherence_rate * self.time_step)
|
|
284
|
+
}
|