hqde 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hqde might be problematic. Click here for more details.

@@ -0,0 +1,336 @@
1
+ """
2
+ Quantum-inspired optimization module for HQDE framework.
3
+
4
+ This module implements quantum annealing and quantum-inspired optimization
5
+ algorithms for ensemble selection and hyperparameter optimization.
6
+ """
7
+
8
+ import torch
9
+ import numpy as np
10
+ from typing import List, Dict, Optional, Tuple, Callable, Any
11
+ import math
12
+ import random
13
+
14
+
15
+ class QuantumEnsembleOptimizer:
16
+ """Quantum-inspired optimizer for ensemble composition and hyperparameters."""
17
+
18
+ def __init__(self,
19
+ temperature_schedule: str = "exponential",
20
+ initial_temperature: float = 10.0,
21
+ final_temperature: float = 0.01,
22
+ annealing_steps: int = 1000):
23
+ """
24
+ Initialize quantum ensemble optimizer.
25
+
26
+ Args:
27
+ temperature_schedule: Type of temperature schedule for annealing
28
+ initial_temperature: Initial temperature for quantum annealing
29
+ final_temperature: Final temperature for quantum annealing
30
+ annealing_steps: Number of annealing steps
31
+ """
32
+ self.temperature_schedule = temperature_schedule
33
+ self.initial_temperature = initial_temperature
34
+ self.final_temperature = final_temperature
35
+ self.annealing_steps = annealing_steps
36
+
37
+ def get_temperature(self, step: int) -> float:
38
+ """Get temperature at a given annealing step."""
39
+ if step >= self.annealing_steps:
40
+ return self.final_temperature
41
+
42
+ progress = step / self.annealing_steps
43
+
44
+ if self.temperature_schedule == "exponential":
45
+ return self.initial_temperature * (self.final_temperature / self.initial_temperature) ** progress
46
+ elif self.temperature_schedule == "linear":
47
+ return self.initial_temperature * (1 - progress) + self.final_temperature * progress
48
+ elif self.temperature_schedule == "cosine":
49
+ return self.final_temperature + 0.5 * (self.initial_temperature - self.final_temperature) * \
50
+ (1 + math.cos(math.pi * progress))
51
+ else:
52
+ return self.initial_temperature * math.exp(-progress * 3)
53
+
54
+ def formulate_qubo(self,
55
+ candidate_models: List[Dict[str, Any]],
56
+ constraints: Dict[str, Any]) -> torch.Tensor:
57
+ """
58
+ Formulate Quadratic Unconstrained Binary Optimization (QUBO) matrix.
59
+
60
+ Args:
61
+ candidate_models: List of candidate model configurations
62
+ constraints: Optimization constraints (memory, accuracy, etc.)
63
+
64
+ Returns:
65
+ QUBO matrix for quantum annealing
66
+ """
67
+ num_models = len(candidate_models)
68
+ qubo_matrix = torch.zeros(num_models, num_models)
69
+
70
+ # Extract model properties
71
+ accuracies = [model.get('accuracy', 0.5) for model in candidate_models]
72
+ memory_costs = [model.get('memory_cost', 1.0) for model in candidate_models]
73
+ compute_costs = [model.get('compute_cost', 1.0) for model in candidate_models]
74
+
75
+ # Objective: maximize accuracy, minimize costs
76
+ max_memory = constraints.get('max_memory', float('inf'))
77
+ max_compute = constraints.get('max_compute', float('inf'))
78
+ ensemble_size_target = constraints.get('ensemble_size', num_models // 2)
79
+
80
+ # Diagonal terms (individual model contributions)
81
+ for i in range(num_models):
82
+ # Reward high accuracy
83
+ accuracy_reward = accuracies[i] * 10.0
84
+
85
+ # Penalize high costs
86
+ memory_penalty = memory_costs[i] / max_memory * 5.0 if max_memory != float('inf') else 0
87
+ compute_penalty = compute_costs[i] / max_compute * 5.0 if max_compute != float('inf') else 0
88
+
89
+ qubo_matrix[i, i] = accuracy_reward - memory_penalty - compute_penalty
90
+
91
+ # Off-diagonal terms (model interactions)
92
+ for i in range(num_models):
93
+ for j in range(i + 1, num_models):
94
+ # Encourage diversity in ensemble
95
+ accuracy_diff = abs(accuracies[i] - accuracies[j])
96
+ diversity_bonus = accuracy_diff * 2.0
97
+
98
+ # Penalize resource conflicts
99
+ resource_conflict = (memory_costs[i] + memory_costs[j]) / max_memory * 2.0
100
+ resource_conflict += (compute_costs[i] + compute_costs[j]) / max_compute * 2.0
101
+
102
+ interaction_term = diversity_bonus - resource_conflict
103
+ qubo_matrix[i, j] = qubo_matrix[j, i] = interaction_term
104
+
105
+ # Add ensemble size constraint
106
+ ensemble_size_penalty = 2.0
107
+ for i in range(num_models):
108
+ for j in range(num_models):
109
+ if i != j:
110
+ qubo_matrix[i, j] -= ensemble_size_penalty / ensemble_size_target
111
+
112
+ return qubo_matrix
113
+
114
+ def quantum_annealing_solve(self,
115
+ qubo_matrix: torch.Tensor,
116
+ num_runs: int = 10) -> torch.Tensor:
117
+ """
118
+ Solve QUBO using simulated quantum annealing.
119
+
120
+ Args:
121
+ qubo_matrix: QUBO matrix to optimize
122
+ num_runs: Number of annealing runs
123
+
124
+ Returns:
125
+ Best solution found (binary vector)
126
+ """
127
+ num_variables = qubo_matrix.shape[0]
128
+ best_solution = None
129
+ best_energy = float('inf')
130
+
131
+ for run in range(num_runs):
132
+ # Initialize random solution
133
+ solution = torch.randint(0, 2, (num_variables,), dtype=torch.float32)
134
+
135
+ # Annealing process
136
+ for step in range(self.annealing_steps):
137
+ temperature = self.get_temperature(step)
138
+
139
+ # Select random variable to flip
140
+ var_idx = random.randint(0, num_variables - 1)
141
+
142
+ # Calculate energy change
143
+ old_energy = self._calculate_qubo_energy(solution, qubo_matrix)
144
+
145
+ # Flip bit
146
+ solution[var_idx] = 1 - solution[var_idx]
147
+ new_energy = self._calculate_qubo_energy(solution, qubo_matrix)
148
+
149
+ energy_diff = new_energy - old_energy
150
+
151
+ # Accept or reject move
152
+ if energy_diff < 0 or random.random() < math.exp(-energy_diff / temperature):
153
+ # Accept move (bit stays flipped)
154
+ pass
155
+ else:
156
+ # Reject move (flip bit back)
157
+ solution[var_idx] = 1 - solution[var_idx]
158
+
159
+ # Check if this is the best solution
160
+ final_energy = self._calculate_qubo_energy(solution, qubo_matrix)
161
+ if final_energy < best_energy:
162
+ best_energy = final_energy
163
+ best_solution = solution.clone()
164
+
165
+ return best_solution
166
+
167
+ def _calculate_qubo_energy(self, solution: torch.Tensor, qubo_matrix: torch.Tensor) -> float:
168
+ """Calculate energy of a QUBO solution."""
169
+ return (solution @ qubo_matrix @ solution).item()
170
+
171
+ def optimize_ensemble_composition(self,
172
+ candidate_models: List[Dict[str, Any]],
173
+ constraints: Dict[str, Any],
174
+ use_quantum_annealing: bool = True) -> Tuple[List[int], Dict[str, Any]]:
175
+ """
176
+ Optimize ensemble composition using quantum-inspired methods.
177
+
178
+ Args:
179
+ candidate_models: List of candidate model configurations
180
+ constraints: Optimization constraints
181
+ use_quantum_annealing: Whether to use quantum annealing
182
+
183
+ Returns:
184
+ Tuple of (selected_model_indices, optimization_metrics)
185
+ """
186
+ # Formulate as QUBO problem
187
+ qubo_matrix = self.formulate_qubo(candidate_models, constraints)
188
+
189
+ if use_quantum_annealing:
190
+ # Use quantum annealing
191
+ solution = self.quantum_annealing_solve(qubo_matrix)
192
+ else:
193
+ # Use classical optimization (greedy)
194
+ solution = self._greedy_solve(qubo_matrix)
195
+
196
+ # Extract selected models
197
+ selected_indices = [i for i, selected in enumerate(solution) if selected > 0.5]
198
+
199
+ # Calculate optimization metrics
200
+ metrics = self._calculate_optimization_metrics(
201
+ selected_indices, candidate_models, constraints
202
+ )
203
+
204
+ return selected_indices, metrics
205
+
206
+ def _greedy_solve(self, qubo_matrix: torch.Tensor) -> torch.Tensor:
207
+ """Greedy solution for QUBO (fallback method)."""
208
+ num_variables = qubo_matrix.shape[0]
209
+ solution = torch.zeros(num_variables)
210
+
211
+ # Greedily select variables that improve objective
212
+ for _ in range(num_variables):
213
+ best_var = -1
214
+ best_improvement = 0
215
+
216
+ for var in range(num_variables):
217
+ if solution[var] == 0: # Variable not selected
218
+ # Calculate improvement if we select this variable
219
+ test_solution = solution.clone()
220
+ test_solution[var] = 1
221
+
222
+ old_energy = self._calculate_qubo_energy(solution, qubo_matrix)
223
+ new_energy = self._calculate_qubo_energy(test_solution, qubo_matrix)
224
+ improvement = old_energy - new_energy
225
+
226
+ if improvement > best_improvement:
227
+ best_improvement = improvement
228
+ best_var = var
229
+
230
+ if best_var >= 0 and best_improvement > 0:
231
+ solution[best_var] = 1
232
+ else:
233
+ break
234
+
235
+ return solution
236
+
237
+ def _calculate_optimization_metrics(self,
238
+ selected_indices: List[int],
239
+ candidate_models: List[Dict[str, Any]],
240
+ constraints: Dict[str, Any]) -> Dict[str, Any]:
241
+ """Calculate metrics for the optimization result."""
242
+ if not selected_indices:
243
+ return {
244
+ 'ensemble_size': 0,
245
+ 'total_accuracy': 0.0,
246
+ 'total_memory_cost': 0.0,
247
+ 'total_compute_cost': 0.0,
248
+ 'diversity_score': 0.0,
249
+ 'constraint_satisfaction': 0.0
250
+ }
251
+
252
+ selected_models = [candidate_models[i] for i in selected_indices]
253
+
254
+ # Calculate metrics
255
+ ensemble_size = len(selected_indices)
256
+ total_accuracy = sum(model.get('accuracy', 0) for model in selected_models)
257
+ total_memory_cost = sum(model.get('memory_cost', 0) for model in selected_models)
258
+ total_compute_cost = sum(model.get('compute_cost', 0) for model in selected_models)
259
+
260
+ # Diversity score (variance in accuracies)
261
+ accuracies = [model.get('accuracy', 0) for model in selected_models]
262
+ diversity_score = np.var(accuracies) if len(accuracies) > 1 else 0.0
263
+
264
+ # Constraint satisfaction
265
+ max_memory = constraints.get('max_memory', float('inf'))
266
+ max_compute = constraints.get('max_compute', float('inf'))
267
+
268
+ memory_satisfaction = 1.0 if total_memory_cost <= max_memory else max_memory / total_memory_cost
269
+ compute_satisfaction = 1.0 if total_compute_cost <= max_compute else max_compute / total_compute_cost
270
+ constraint_satisfaction = min(memory_satisfaction, compute_satisfaction)
271
+
272
+ return {
273
+ 'ensemble_size': ensemble_size,
274
+ 'total_accuracy': total_accuracy,
275
+ 'total_memory_cost': total_memory_cost,
276
+ 'total_compute_cost': total_compute_cost,
277
+ 'diversity_score': diversity_score,
278
+ 'constraint_satisfaction': constraint_satisfaction,
279
+ 'average_accuracy': total_accuracy / ensemble_size if ensemble_size > 0 else 0.0
280
+ }
281
+
282
+ def optimize_hyperparameters(self,
283
+ objective_function: Callable,
284
+ parameter_space: Dict[str, Tuple[float, float]],
285
+ num_iterations: int = 100) -> Tuple[Dict[str, float], float]:
286
+ """
287
+ Optimize hyperparameters using quantum-inspired search.
288
+
289
+ Args:
290
+ objective_function: Function to optimize (should return higher values for better solutions)
291
+ parameter_space: Dictionary of parameter ranges {name: (min, max)}
292
+ num_iterations: Number of optimization iterations
293
+
294
+ Returns:
295
+ Tuple of (best_parameters, best_score)
296
+ """
297
+ best_params = None
298
+ best_score = float('-inf')
299
+
300
+ # Initialize with random parameters
301
+ current_params = {}
302
+ for param_name, (min_val, max_val) in parameter_space.items():
303
+ current_params[param_name] = random.uniform(min_val, max_val)
304
+
305
+ current_score = objective_function(current_params)
306
+
307
+ # Quantum-inspired optimization loop
308
+ for iteration in range(num_iterations):
309
+ temperature = self.get_temperature(iteration * self.annealing_steps // num_iterations)
310
+
311
+ # Generate quantum fluctuation in parameters
312
+ new_params = {}
313
+ for param_name, (min_val, max_val) in parameter_space.items():
314
+ # Quantum tunneling effect: allow exploration beyond local minima
315
+ quantum_noise = np.random.normal(0, temperature * (max_val - min_val) * 0.1)
316
+ new_value = current_params[param_name] + quantum_noise
317
+
318
+ # Ensure within bounds
319
+ new_value = max(min_val, min(max_val, new_value))
320
+ new_params[param_name] = new_value
321
+
322
+ # Evaluate new parameters
323
+ new_score = objective_function(new_params)
324
+
325
+ # Quantum acceptance criteria
326
+ score_diff = new_score - current_score
327
+ if score_diff > 0 or random.random() < math.exp(score_diff / temperature):
328
+ current_params = new_params
329
+ current_score = new_score
330
+
331
+ # Update best solution
332
+ if current_score > best_score:
333
+ best_score = current_score
334
+ best_params = current_params.copy()
335
+
336
+ return best_params, best_score
hqde/utils/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ """
2
+ Utility modules for HQDE framework.
3
+
4
+ This module provides various utility functions for performance monitoring,
5
+ data preprocessing, visualization, and system configuration.
6
+ """
7
+
8
+ from .performance_monitor import PerformanceMonitor, SystemMetrics
9
+ from .data_utils import DataLoader, DataPreprocessor
10
+ from .visualization import HQDEVisualizer
11
+ from .config_manager import ConfigManager
12
+
13
+ __all__ = [
14
+ 'PerformanceMonitor',
15
+ 'SystemMetrics',
16
+ 'DataLoader',
17
+ 'DataPreprocessor',
18
+ 'HQDEVisualizer',
19
+ 'ConfigManager'
20
+ ]
@@ -0,0 +1,9 @@
1
+ """
2
+ Configuration management utilities for HQDE framework.
3
+
4
+ Placeholder implementation for configuration management.
5
+ """
6
+
7
+ class ConfigManager:
8
+ """Placeholder ConfigManager class."""
9
+ pass
@@ -0,0 +1,13 @@
1
+ """
2
+ Data utilities for HQDE framework.
3
+
4
+ Placeholder implementation for data loading and preprocessing utilities.
5
+ """
6
+
7
+ class DataLoader:
8
+ """Placeholder DataLoader class."""
9
+ pass
10
+
11
+ class DataPreprocessor:
12
+ """Placeholder DataPreprocessor class."""
13
+ pass