morphml 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of morphml might be problematic. Click here for more details.
- morphml/__init__.py +14 -0
- morphml/api/__init__.py +26 -0
- morphml/api/app.py +326 -0
- morphml/api/auth.py +193 -0
- morphml/api/client.py +338 -0
- morphml/api/models.py +132 -0
- morphml/api/rate_limit.py +192 -0
- morphml/benchmarking/__init__.py +36 -0
- morphml/benchmarking/comparison.py +430 -0
- morphml/benchmarks/__init__.py +56 -0
- morphml/benchmarks/comparator.py +409 -0
- morphml/benchmarks/datasets.py +280 -0
- morphml/benchmarks/metrics.py +199 -0
- morphml/benchmarks/openml_suite.py +201 -0
- morphml/benchmarks/problems.py +289 -0
- morphml/benchmarks/suite.py +318 -0
- morphml/cli/__init__.py +5 -0
- morphml/cli/commands/experiment.py +329 -0
- morphml/cli/main.py +457 -0
- morphml/cli/quickstart.py +312 -0
- morphml/config.py +278 -0
- morphml/constraints/__init__.py +19 -0
- morphml/constraints/handler.py +205 -0
- morphml/constraints/predicates.py +285 -0
- morphml/core/__init__.py +3 -0
- morphml/core/crossover.py +449 -0
- morphml/core/dsl/README.md +359 -0
- morphml/core/dsl/__init__.py +72 -0
- morphml/core/dsl/ast_nodes.py +364 -0
- morphml/core/dsl/compiler.py +318 -0
- morphml/core/dsl/layers.py +368 -0
- morphml/core/dsl/lexer.py +336 -0
- morphml/core/dsl/parser.py +455 -0
- morphml/core/dsl/search_space.py +386 -0
- morphml/core/dsl/syntax.py +199 -0
- morphml/core/dsl/type_system.py +361 -0
- morphml/core/dsl/validator.py +386 -0
- morphml/core/graph/__init__.py +40 -0
- morphml/core/graph/edge.py +124 -0
- morphml/core/graph/graph.py +507 -0
- morphml/core/graph/mutations.py +409 -0
- morphml/core/graph/node.py +196 -0
- morphml/core/graph/serialization.py +361 -0
- morphml/core/graph/visualization.py +431 -0
- morphml/core/objectives/__init__.py +20 -0
- morphml/core/search/__init__.py +33 -0
- morphml/core/search/individual.py +252 -0
- morphml/core/search/parameters.py +453 -0
- morphml/core/search/population.py +375 -0
- morphml/core/search/search_engine.py +340 -0
- morphml/distributed/__init__.py +76 -0
- morphml/distributed/fault_tolerance.py +497 -0
- morphml/distributed/health_monitor.py +348 -0
- morphml/distributed/master.py +709 -0
- morphml/distributed/proto/README.md +224 -0
- morphml/distributed/proto/__init__.py +74 -0
- morphml/distributed/proto/worker.proto +170 -0
- morphml/distributed/proto/worker_pb2.py +79 -0
- morphml/distributed/proto/worker_pb2_grpc.py +423 -0
- morphml/distributed/resource_manager.py +416 -0
- morphml/distributed/scheduler.py +567 -0
- morphml/distributed/storage/__init__.py +33 -0
- morphml/distributed/storage/artifacts.py +381 -0
- morphml/distributed/storage/cache.py +366 -0
- morphml/distributed/storage/checkpointing.py +329 -0
- morphml/distributed/storage/database.py +459 -0
- morphml/distributed/worker.py +549 -0
- morphml/evaluation/__init__.py +5 -0
- morphml/evaluation/heuristic.py +237 -0
- morphml/exceptions.py +55 -0
- morphml/execution/__init__.py +5 -0
- morphml/execution/local_executor.py +350 -0
- morphml/integrations/__init__.py +28 -0
- morphml/integrations/jax_adapter.py +206 -0
- morphml/integrations/pytorch_adapter.py +530 -0
- morphml/integrations/sklearn_adapter.py +206 -0
- morphml/integrations/tensorflow_adapter.py +230 -0
- morphml/logging_config.py +93 -0
- morphml/meta_learning/__init__.py +66 -0
- morphml/meta_learning/architecture_similarity.py +277 -0
- morphml/meta_learning/experiment_database.py +240 -0
- morphml/meta_learning/knowledge_base/__init__.py +19 -0
- morphml/meta_learning/knowledge_base/embedder.py +179 -0
- morphml/meta_learning/knowledge_base/knowledge_base.py +313 -0
- morphml/meta_learning/knowledge_base/meta_features.py +265 -0
- morphml/meta_learning/knowledge_base/vector_store.py +271 -0
- morphml/meta_learning/predictors/__init__.py +27 -0
- morphml/meta_learning/predictors/ensemble.py +221 -0
- morphml/meta_learning/predictors/gnn_predictor.py +552 -0
- morphml/meta_learning/predictors/learning_curve.py +231 -0
- morphml/meta_learning/predictors/proxy_metrics.py +261 -0
- morphml/meta_learning/strategy_evolution/__init__.py +27 -0
- morphml/meta_learning/strategy_evolution/adaptive_optimizer.py +226 -0
- morphml/meta_learning/strategy_evolution/bandit.py +276 -0
- morphml/meta_learning/strategy_evolution/portfolio.py +230 -0
- morphml/meta_learning/transfer.py +581 -0
- morphml/meta_learning/warm_start.py +286 -0
- morphml/optimizers/__init__.py +74 -0
- morphml/optimizers/adaptive_operators.py +399 -0
- morphml/optimizers/bayesian/__init__.py +52 -0
- morphml/optimizers/bayesian/acquisition.py +387 -0
- morphml/optimizers/bayesian/base.py +319 -0
- morphml/optimizers/bayesian/gaussian_process.py +635 -0
- morphml/optimizers/bayesian/smac.py +534 -0
- morphml/optimizers/bayesian/tpe.py +411 -0
- morphml/optimizers/differential_evolution.py +220 -0
- morphml/optimizers/evolutionary/__init__.py +61 -0
- morphml/optimizers/evolutionary/cma_es.py +416 -0
- morphml/optimizers/evolutionary/differential_evolution.py +556 -0
- morphml/optimizers/evolutionary/encoding.py +426 -0
- morphml/optimizers/evolutionary/particle_swarm.py +449 -0
- morphml/optimizers/genetic_algorithm.py +486 -0
- morphml/optimizers/gradient_based/__init__.py +22 -0
- morphml/optimizers/gradient_based/darts.py +550 -0
- morphml/optimizers/gradient_based/enas.py +585 -0
- morphml/optimizers/gradient_based/operations.py +474 -0
- morphml/optimizers/gradient_based/utils.py +601 -0
- morphml/optimizers/hill_climbing.py +169 -0
- morphml/optimizers/multi_objective/__init__.py +56 -0
- morphml/optimizers/multi_objective/indicators.py +504 -0
- morphml/optimizers/multi_objective/nsga2.py +647 -0
- morphml/optimizers/multi_objective/visualization.py +427 -0
- morphml/optimizers/nsga2.py +308 -0
- morphml/optimizers/random_search.py +172 -0
- morphml/optimizers/simulated_annealing.py +181 -0
- morphml/plugins/__init__.py +35 -0
- morphml/plugins/custom_evaluator_example.py +81 -0
- morphml/plugins/custom_optimizer_example.py +63 -0
- morphml/plugins/plugin_system.py +454 -0
- morphml/reports/__init__.py +30 -0
- morphml/reports/generator.py +362 -0
- morphml/tracking/__init__.py +7 -0
- morphml/tracking/experiment.py +309 -0
- morphml/tracking/logger.py +301 -0
- morphml/tracking/reporter.py +357 -0
- morphml/utils/__init__.py +6 -0
- morphml/utils/checkpoint.py +189 -0
- morphml/utils/comparison.py +390 -0
- morphml/utils/export.py +407 -0
- morphml/utils/progress.py +392 -0
- morphml/utils/validation.py +392 -0
- morphml/version.py +7 -0
- morphml/visualization/__init__.py +50 -0
- morphml/visualization/analytics.py +423 -0
- morphml/visualization/architecture_diagrams.py +353 -0
- morphml/visualization/architecture_plot.py +223 -0
- morphml/visualization/convergence_plot.py +174 -0
- morphml/visualization/crossover_viz.py +386 -0
- morphml/visualization/graph_viz.py +338 -0
- morphml/visualization/pareto_plot.py +149 -0
- morphml/visualization/plotly_dashboards.py +422 -0
- morphml/visualization/population.py +309 -0
- morphml/visualization/progress.py +260 -0
- morphml-1.0.0.dist-info/METADATA +434 -0
- morphml-1.0.0.dist-info/RECORD +158 -0
- morphml-1.0.0.dist-info/WHEEL +4 -0
- morphml-1.0.0.dist-info/entry_points.txt +3 -0
- morphml-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"""Individual representation for evolutionary algorithms.
|
|
2
|
+
|
|
3
|
+
An Individual wraps a neural architecture (ModelGraph) with fitness tracking
|
|
4
|
+
and metadata for use in population-based optimization.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
>>> from morphml.core.graph import ModelGraph
|
|
8
|
+
>>> from morphml.core.search import Individual
|
|
9
|
+
>>>
|
|
10
|
+
>>> graph = ModelGraph()
|
|
11
|
+
>>> # ... build graph ...
|
|
12
|
+
>>>
|
|
13
|
+
>>> individual = Individual(graph)
|
|
14
|
+
>>> individual.fitness = 0.95
|
|
15
|
+
>>> individual.metadata['accuracy'] = 0.95
|
|
16
|
+
>>> individual.metadata['latency'] = 12.3
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import time
|
|
20
|
+
from typing import Any, Dict, Optional
|
|
21
|
+
|
|
22
|
+
from morphml.core.graph import ModelGraph
|
|
23
|
+
from morphml.logging_config import get_logger
|
|
24
|
+
|
|
25
|
+
logger = get_logger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Individual:
|
|
29
|
+
"""
|
|
30
|
+
Represents a single architecture in the population.
|
|
31
|
+
|
|
32
|
+
An Individual consists of:
|
|
33
|
+
- A neural architecture (ModelGraph)
|
|
34
|
+
- Fitness score (objective value)
|
|
35
|
+
- Metadata (metrics, evaluation info, etc.)
|
|
36
|
+
- Age (number of generations alive)
|
|
37
|
+
|
|
38
|
+
Attributes:
|
|
39
|
+
graph: The neural architecture
|
|
40
|
+
fitness: Fitness score (higher is better)
|
|
41
|
+
metadata: Additional information
|
|
42
|
+
age: Number of generations this individual has survived
|
|
43
|
+
parent_ids: IDs of parent individuals (for genealogy)
|
|
44
|
+
birth_generation: Generation when created
|
|
45
|
+
|
|
46
|
+
Example:
|
|
47
|
+
>>> individual = Individual(graph, fitness=0.92)
|
|
48
|
+
>>> individual.metadata['accuracy'] = 0.92
|
|
49
|
+
>>> individual.metadata['params'] = 1000000
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
graph: ModelGraph,
|
|
55
|
+
fitness: Optional[float] = None,
|
|
56
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
57
|
+
parent_ids: Optional[list] = None,
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
Initialize individual.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
graph: Neural architecture
|
|
64
|
+
fitness: Initial fitness score
|
|
65
|
+
metadata: Additional metadata
|
|
66
|
+
parent_ids: List of parent individual IDs
|
|
67
|
+
"""
|
|
68
|
+
self.graph = graph
|
|
69
|
+
self.fitness = fitness
|
|
70
|
+
self.metadata = metadata or {}
|
|
71
|
+
self.age = 0
|
|
72
|
+
self.parent_ids = parent_ids or []
|
|
73
|
+
self.birth_generation = 0
|
|
74
|
+
|
|
75
|
+
# Generate unique ID (with microseconds for better uniqueness)
|
|
76
|
+
import random
|
|
77
|
+
|
|
78
|
+
self.id = f"{graph.hash()[:16]}_{int(time.time() * 1000000)}_{random.randint(0, 9999)}"
|
|
79
|
+
|
|
80
|
+
# Track evaluation status
|
|
81
|
+
self._evaluated = fitness is not None
|
|
82
|
+
|
|
83
|
+
logger.debug(f"Created Individual: {self.id[:12]}")
|
|
84
|
+
|
|
85
|
+
def is_evaluated(self) -> bool:
|
|
86
|
+
"""
|
|
87
|
+
Check if individual has been evaluated.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
True if fitness has been set
|
|
91
|
+
"""
|
|
92
|
+
return self._evaluated
|
|
93
|
+
|
|
94
|
+
def set_fitness(self, fitness: float, **metrics: Any) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Set fitness and optional metrics.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
fitness: Fitness score
|
|
100
|
+
**metrics: Additional metrics to store in metadata
|
|
101
|
+
|
|
102
|
+
Example:
|
|
103
|
+
>>> individual.set_fitness(0.95, accuracy=0.95, loss=0.05)
|
|
104
|
+
"""
|
|
105
|
+
self.fitness = fitness
|
|
106
|
+
self._evaluated = True
|
|
107
|
+
|
|
108
|
+
# Store metrics
|
|
109
|
+
for key, value in metrics.items():
|
|
110
|
+
self.metadata[key] = value
|
|
111
|
+
|
|
112
|
+
logger.debug(f"Individual {self.id[:12]} fitness set to {fitness:.4f}")
|
|
113
|
+
|
|
114
|
+
def increment_age(self) -> None:
|
|
115
|
+
"""Increment age by 1 generation."""
|
|
116
|
+
self.age += 1
|
|
117
|
+
|
|
118
|
+
def clone(self, keep_fitness: bool = False) -> "Individual":
|
|
119
|
+
"""
|
|
120
|
+
Create a copy of this individual.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
keep_fitness: Whether to copy fitness score
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
New Individual instance
|
|
127
|
+
"""
|
|
128
|
+
new_graph = self.graph.clone()
|
|
129
|
+
new_fitness = self.fitness if keep_fitness else None
|
|
130
|
+
new_metadata = self.metadata.copy()
|
|
131
|
+
|
|
132
|
+
new_individual = Individual(
|
|
133
|
+
graph=new_graph,
|
|
134
|
+
fitness=new_fitness,
|
|
135
|
+
metadata=new_metadata,
|
|
136
|
+
parent_ids=[self.id],
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
new_individual.age = 0 # Reset age
|
|
140
|
+
|
|
141
|
+
return new_individual
|
|
142
|
+
|
|
143
|
+
def get_metric(self, key: str, default: Any = None) -> Any:
|
|
144
|
+
"""
|
|
145
|
+
Get a metric from metadata.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
key: Metric key
|
|
149
|
+
default: Default value if not found
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Metric value
|
|
153
|
+
"""
|
|
154
|
+
return self.metadata.get(key, default)
|
|
155
|
+
|
|
156
|
+
def dominates(self, other: "Individual", objectives: list) -> bool:
|
|
157
|
+
"""
|
|
158
|
+
Check if this individual dominates another (for multi-objective).
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
other: Other individual
|
|
162
|
+
objectives: List of objective names to compare
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
True if this individual dominates the other
|
|
166
|
+
"""
|
|
167
|
+
if not self.is_evaluated() or not other.is_evaluated():
|
|
168
|
+
return False
|
|
169
|
+
|
|
170
|
+
better_in_any = False
|
|
171
|
+
|
|
172
|
+
for obj in objectives:
|
|
173
|
+
self_val = self.get_metric(obj, 0.0)
|
|
174
|
+
other_val = other.get_metric(obj, 0.0)
|
|
175
|
+
|
|
176
|
+
if self_val < other_val:
|
|
177
|
+
return False # Worse in this objective
|
|
178
|
+
if self_val > other_val:
|
|
179
|
+
better_in_any = True
|
|
180
|
+
|
|
181
|
+
return better_in_any
|
|
182
|
+
|
|
183
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
184
|
+
"""
|
|
185
|
+
Serialize to dictionary.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Dictionary representation
|
|
189
|
+
"""
|
|
190
|
+
return {
|
|
191
|
+
"id": self.id,
|
|
192
|
+
"graph": self.graph.to_dict(),
|
|
193
|
+
"fitness": self.fitness,
|
|
194
|
+
"metadata": self.metadata,
|
|
195
|
+
"age": self.age,
|
|
196
|
+
"parent_ids": self.parent_ids,
|
|
197
|
+
"birth_generation": self.birth_generation,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
@classmethod
|
|
201
|
+
def from_dict(cls, data: Dict[str, Any]) -> "Individual":
|
|
202
|
+
"""
|
|
203
|
+
Deserialize from dictionary.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
data: Dictionary representation
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Individual instance
|
|
210
|
+
"""
|
|
211
|
+
graph = ModelGraph.from_dict(data["graph"])
|
|
212
|
+
|
|
213
|
+
individual = cls(
|
|
214
|
+
graph=graph,
|
|
215
|
+
fitness=data.get("fitness"),
|
|
216
|
+
metadata=data.get("metadata", {}),
|
|
217
|
+
parent_ids=data.get("parent_ids", []),
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
individual.id = data["id"]
|
|
221
|
+
individual.age = data.get("age", 0)
|
|
222
|
+
individual.birth_generation = data.get("birth_generation", 0)
|
|
223
|
+
|
|
224
|
+
return individual
|
|
225
|
+
|
|
226
|
+
def __repr__(self) -> str:
|
|
227
|
+
"""String representation."""
|
|
228
|
+
fitness_str = f"{self.fitness:.4f}" if self.fitness is not None else "N/A"
|
|
229
|
+
return (
|
|
230
|
+
f"Individual(id={self.id[:12]}, "
|
|
231
|
+
f"fitness={fitness_str}, "
|
|
232
|
+
f"age={self.age}, "
|
|
233
|
+
f"nodes={len(self.graph.nodes)})"
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
def __lt__(self, other: "Individual") -> bool:
|
|
237
|
+
"""Less than comparison based on fitness (for sorting)."""
|
|
238
|
+
if self.fitness is None:
|
|
239
|
+
return True
|
|
240
|
+
if other.fitness is None:
|
|
241
|
+
return False
|
|
242
|
+
return self.fitness < other.fitness
|
|
243
|
+
|
|
244
|
+
def __eq__(self, other: object) -> bool:
|
|
245
|
+
"""Equality based on ID."""
|
|
246
|
+
if not isinstance(other, Individual):
|
|
247
|
+
return False
|
|
248
|
+
return self.id == other.id
|
|
249
|
+
|
|
250
|
+
def __hash__(self) -> int:
|
|
251
|
+
"""Hash based on ID."""
|
|
252
|
+
return hash(self.id)
|
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
"""Parameter types for search space definition.
|
|
2
|
+
|
|
3
|
+
Provides explicit parameter classes for defining hyperparameter search spaces
|
|
4
|
+
with different distributions and constraints.
|
|
5
|
+
|
|
6
|
+
Author: Eshan Roy <eshanized@proton.me>
|
|
7
|
+
Organization: TONMOY INFRASTRUCTURE & VISION
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import random
|
|
11
|
+
from abc import ABC, abstractmethod
|
|
12
|
+
from typing import Any, List, Optional, Union
|
|
13
|
+
|
|
14
|
+
from morphml.exceptions import ValidationError
|
|
15
|
+
from morphml.logging_config import get_logger
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Parameter(ABC):
|
|
21
|
+
"""
|
|
22
|
+
Base class for all parameter types.
|
|
23
|
+
|
|
24
|
+
Defines the interface for sampling and validating parameter values
|
|
25
|
+
in the search space.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, name: str):
|
|
29
|
+
"""
|
|
30
|
+
Initialize parameter.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
name: Parameter name
|
|
34
|
+
"""
|
|
35
|
+
self.name = name
|
|
36
|
+
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def sample(self) -> Any:
|
|
39
|
+
"""
|
|
40
|
+
Sample a value from parameter space.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Sampled value
|
|
44
|
+
"""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
@abstractmethod
|
|
48
|
+
def validate(self, value: Any) -> bool:
|
|
49
|
+
"""
|
|
50
|
+
Check if value is valid for this parameter.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
value: Value to validate
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
True if valid, False otherwise
|
|
57
|
+
"""
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def to_dict(self) -> dict:
|
|
62
|
+
"""Serialize parameter to dictionary."""
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
def __repr__(self) -> str:
|
|
66
|
+
"""String representation."""
|
|
67
|
+
return f"{self.__class__.__name__}(name={self.name})"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class CategoricalParameter(Parameter):
|
|
71
|
+
"""
|
|
72
|
+
Categorical parameter with discrete choices.
|
|
73
|
+
|
|
74
|
+
Samples uniformly from a list of possible values.
|
|
75
|
+
|
|
76
|
+
Example:
|
|
77
|
+
>>> param = CategoricalParameter('activation', ['relu', 'elu', 'gelu'])
|
|
78
|
+
>>> value = param.sample()
|
|
79
|
+
>>> print(value) # One of: 'relu', 'elu', 'gelu'
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(self, name: str, choices: List[Any], probabilities: Optional[List[float]] = None):
|
|
83
|
+
"""
|
|
84
|
+
Initialize categorical parameter.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
name: Parameter name
|
|
88
|
+
choices: List of possible values
|
|
89
|
+
probabilities: Optional probability weights for each choice
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
ValidationError: If choices is empty or probabilities don't match
|
|
93
|
+
"""
|
|
94
|
+
super().__init__(name)
|
|
95
|
+
|
|
96
|
+
if not choices:
|
|
97
|
+
raise ValidationError(f"Categorical parameter '{name}' must have at least one choice")
|
|
98
|
+
|
|
99
|
+
if probabilities:
|
|
100
|
+
if len(probabilities) != len(choices):
|
|
101
|
+
raise ValidationError(
|
|
102
|
+
f"Probabilities length ({len(probabilities)}) must match choices length ({len(choices)})"
|
|
103
|
+
)
|
|
104
|
+
if abs(sum(probabilities) - 1.0) > 1e-6:
|
|
105
|
+
raise ValidationError(f"Probabilities must sum to 1.0, got {sum(probabilities)}")
|
|
106
|
+
|
|
107
|
+
self.choices = choices
|
|
108
|
+
self.probabilities = probabilities
|
|
109
|
+
|
|
110
|
+
def sample(self) -> Any:
|
|
111
|
+
"""Sample uniformly or with weights from choices."""
|
|
112
|
+
if self.probabilities:
|
|
113
|
+
return random.choices(self.choices, weights=self.probabilities, k=1)[0]
|
|
114
|
+
return random.choice(self.choices)
|
|
115
|
+
|
|
116
|
+
def validate(self, value: Any) -> bool:
|
|
117
|
+
"""Check if value is in choices."""
|
|
118
|
+
return value in self.choices
|
|
119
|
+
|
|
120
|
+
def to_dict(self) -> dict:
|
|
121
|
+
"""Serialize to dictionary."""
|
|
122
|
+
return {
|
|
123
|
+
"type": "categorical",
|
|
124
|
+
"name": self.name,
|
|
125
|
+
"choices": self.choices,
|
|
126
|
+
"probabilities": self.probabilities,
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def __repr__(self) -> str:
|
|
130
|
+
"""String representation."""
|
|
131
|
+
return f"CategoricalParameter({self.name}, choices={self.choices})"
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class IntegerParameter(Parameter):
|
|
135
|
+
"""
|
|
136
|
+
Integer parameter with min/max bounds.
|
|
137
|
+
|
|
138
|
+
Samples integers uniformly or log-uniformly from a range.
|
|
139
|
+
|
|
140
|
+
Example:
|
|
141
|
+
>>> param = IntegerParameter('filters', 32, 512, log_scale=True)
|
|
142
|
+
>>> value = param.sample()
|
|
143
|
+
>>> print(value) # Integer between 32 and 512
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def __init__(
|
|
147
|
+
self,
|
|
148
|
+
name: str,
|
|
149
|
+
low: int,
|
|
150
|
+
high: int,
|
|
151
|
+
log_scale: bool = False,
|
|
152
|
+
step: int = 1,
|
|
153
|
+
):
|
|
154
|
+
"""
|
|
155
|
+
Initialize integer parameter.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
name: Parameter name
|
|
159
|
+
low: Minimum value (inclusive)
|
|
160
|
+
high: Maximum value (inclusive)
|
|
161
|
+
log_scale: If True, sample on log scale (good for powers of 2)
|
|
162
|
+
step: Step size for sampling
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
ValidationError: If low >= high
|
|
166
|
+
"""
|
|
167
|
+
super().__init__(name)
|
|
168
|
+
|
|
169
|
+
if low >= high:
|
|
170
|
+
raise ValidationError(f"low ({low}) must be less than high ({high})")
|
|
171
|
+
|
|
172
|
+
self.low = low
|
|
173
|
+
self.high = high
|
|
174
|
+
self.log_scale = log_scale
|
|
175
|
+
self.step = step
|
|
176
|
+
|
|
177
|
+
def sample(self) -> int:
|
|
178
|
+
"""Sample integer from range."""
|
|
179
|
+
if self.log_scale:
|
|
180
|
+
import math
|
|
181
|
+
|
|
182
|
+
# Sample on log scale
|
|
183
|
+
log_low = math.log2(self.low)
|
|
184
|
+
log_high = math.log2(self.high)
|
|
185
|
+
log_value = random.uniform(log_low, log_high)
|
|
186
|
+
value = int(2**log_value)
|
|
187
|
+
|
|
188
|
+
# Ensure within bounds
|
|
189
|
+
value = max(self.low, min(self.high, value))
|
|
190
|
+
else:
|
|
191
|
+
# Linear sampling
|
|
192
|
+
value = random.randint(self.low, self.high)
|
|
193
|
+
|
|
194
|
+
# Apply step
|
|
195
|
+
if self.step > 1:
|
|
196
|
+
value = (value // self.step) * self.step
|
|
197
|
+
|
|
198
|
+
return value
|
|
199
|
+
|
|
200
|
+
def validate(self, value: Any) -> bool:
|
|
201
|
+
"""Check if value is valid integer in range."""
|
|
202
|
+
if not isinstance(value, int):
|
|
203
|
+
return False
|
|
204
|
+
if value < self.low or value > self.high:
|
|
205
|
+
return False
|
|
206
|
+
if self.step > 1 and (value % self.step) != 0:
|
|
207
|
+
return False
|
|
208
|
+
return True
|
|
209
|
+
|
|
210
|
+
def to_dict(self) -> dict:
|
|
211
|
+
"""Serialize to dictionary."""
|
|
212
|
+
return {
|
|
213
|
+
"type": "integer",
|
|
214
|
+
"name": self.name,
|
|
215
|
+
"low": self.low,
|
|
216
|
+
"high": self.high,
|
|
217
|
+
"log_scale": self.log_scale,
|
|
218
|
+
"step": self.step,
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
def __repr__(self) -> str:
|
|
222
|
+
"""String representation."""
|
|
223
|
+
scale = "log" if self.log_scale else "linear"
|
|
224
|
+
return f"IntegerParameter({self.name}, [{self.low}, {self.high}], {scale})"
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
class FloatParameter(Parameter):
|
|
228
|
+
"""
|
|
229
|
+
Float parameter with min/max bounds.
|
|
230
|
+
|
|
231
|
+
Samples floats uniformly or log-uniformly from a range.
|
|
232
|
+
|
|
233
|
+
Example:
|
|
234
|
+
>>> param = FloatParameter('learning_rate', 1e-4, 1e-2, log_scale=True)
|
|
235
|
+
>>> value = param.sample()
|
|
236
|
+
>>> print(value) # Float between 0.0001 and 0.01
|
|
237
|
+
"""
|
|
238
|
+
|
|
239
|
+
def __init__(
|
|
240
|
+
self,
|
|
241
|
+
name: str,
|
|
242
|
+
low: float,
|
|
243
|
+
high: float,
|
|
244
|
+
log_scale: bool = False,
|
|
245
|
+
):
|
|
246
|
+
"""
|
|
247
|
+
Initialize float parameter.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
name: Parameter name
|
|
251
|
+
low: Minimum value
|
|
252
|
+
high: Maximum value
|
|
253
|
+
log_scale: If True, sample on log scale (good for learning rates)
|
|
254
|
+
|
|
255
|
+
Raises:
|
|
256
|
+
ValidationError: If low >= high
|
|
257
|
+
"""
|
|
258
|
+
super().__init__(name)
|
|
259
|
+
|
|
260
|
+
if low >= high:
|
|
261
|
+
raise ValidationError(f"low ({low}) must be less than high ({high})")
|
|
262
|
+
|
|
263
|
+
self.low = low
|
|
264
|
+
self.high = high
|
|
265
|
+
self.log_scale = log_scale
|
|
266
|
+
|
|
267
|
+
def sample(self) -> float:
|
|
268
|
+
"""Sample float from range."""
|
|
269
|
+
if self.log_scale:
|
|
270
|
+
import math
|
|
271
|
+
|
|
272
|
+
# Sample on log scale
|
|
273
|
+
log_low = math.log10(self.low)
|
|
274
|
+
log_high = math.log10(self.high)
|
|
275
|
+
log_value = random.uniform(log_low, log_high)
|
|
276
|
+
return 10**log_value
|
|
277
|
+
else:
|
|
278
|
+
# Linear sampling
|
|
279
|
+
return random.uniform(self.low, self.high)
|
|
280
|
+
|
|
281
|
+
def validate(self, value: Any) -> bool:
|
|
282
|
+
"""Check if value is valid float in range."""
|
|
283
|
+
if not isinstance(value, (int, float)):
|
|
284
|
+
return False
|
|
285
|
+
return self.low <= value <= self.high
|
|
286
|
+
|
|
287
|
+
def to_dict(self) -> dict:
|
|
288
|
+
"""Serialize to dictionary."""
|
|
289
|
+
return {
|
|
290
|
+
"type": "float",
|
|
291
|
+
"name": self.name,
|
|
292
|
+
"low": self.low,
|
|
293
|
+
"high": self.high,
|
|
294
|
+
"log_scale": self.log_scale,
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
def __repr__(self) -> str:
|
|
298
|
+
"""String representation."""
|
|
299
|
+
scale = "log" if self.log_scale else "linear"
|
|
300
|
+
return f"FloatParameter({self.name}, [{self.low}, {self.high}], {scale})"
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class BooleanParameter(Parameter):
|
|
304
|
+
"""
|
|
305
|
+
Boolean parameter.
|
|
306
|
+
|
|
307
|
+
Samples True/False with configurable probability.
|
|
308
|
+
|
|
309
|
+
Example:
|
|
310
|
+
>>> param = BooleanParameter('use_dropout', probability=0.7)
|
|
311
|
+
>>> value = param.sample()
|
|
312
|
+
>>> print(value) # True with 70% probability
|
|
313
|
+
"""
|
|
314
|
+
|
|
315
|
+
def __init__(self, name: str, probability: float = 0.5):
|
|
316
|
+
"""
|
|
317
|
+
Initialize boolean parameter.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
name: Parameter name
|
|
321
|
+
probability: Probability of sampling True (default 0.5)
|
|
322
|
+
|
|
323
|
+
Raises:
|
|
324
|
+
ValidationError: If probability not in [0, 1]
|
|
325
|
+
"""
|
|
326
|
+
super().__init__(name)
|
|
327
|
+
|
|
328
|
+
if not (0 <= probability <= 1):
|
|
329
|
+
raise ValidationError(f"Probability must be in [0, 1], got {probability}")
|
|
330
|
+
|
|
331
|
+
self.probability = probability
|
|
332
|
+
|
|
333
|
+
def sample(self) -> bool:
|
|
334
|
+
"""Sample boolean value."""
|
|
335
|
+
return random.random() < self.probability
|
|
336
|
+
|
|
337
|
+
def validate(self, value: Any) -> bool:
|
|
338
|
+
"""Check if value is boolean."""
|
|
339
|
+
return isinstance(value, bool)
|
|
340
|
+
|
|
341
|
+
def to_dict(self) -> dict:
|
|
342
|
+
"""Serialize to dictionary."""
|
|
343
|
+
return {
|
|
344
|
+
"type": "boolean",
|
|
345
|
+
"name": self.name,
|
|
346
|
+
"probability": self.probability,
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
def __repr__(self) -> str:
|
|
350
|
+
"""String representation."""
|
|
351
|
+
return f"BooleanParameter({self.name}, p={self.probability})"
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
class ConstantParameter(Parameter):
|
|
355
|
+
"""
|
|
356
|
+
Constant parameter (always returns same value).
|
|
357
|
+
|
|
358
|
+
Useful for fixed hyperparameters that shouldn't be searched.
|
|
359
|
+
|
|
360
|
+
Example:
|
|
361
|
+
>>> param = ConstantParameter('batch_size', 32)
|
|
362
|
+
>>> value = param.sample()
|
|
363
|
+
>>> print(value) # Always 32
|
|
364
|
+
"""
|
|
365
|
+
|
|
366
|
+
def __init__(self, name: str, value: Any):
|
|
367
|
+
"""
|
|
368
|
+
Initialize constant parameter.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
name: Parameter name
|
|
372
|
+
value: Constant value
|
|
373
|
+
"""
|
|
374
|
+
super().__init__(name)
|
|
375
|
+
self.value = value
|
|
376
|
+
|
|
377
|
+
def sample(self) -> Any:
|
|
378
|
+
"""Return constant value."""
|
|
379
|
+
return self.value
|
|
380
|
+
|
|
381
|
+
def validate(self, value: Any) -> bool:
|
|
382
|
+
"""Check if value equals constant."""
|
|
383
|
+
return value == self.value
|
|
384
|
+
|
|
385
|
+
def to_dict(self) -> dict:
|
|
386
|
+
"""Serialize to dictionary."""
|
|
387
|
+
return {
|
|
388
|
+
"type": "constant",
|
|
389
|
+
"name": self.name,
|
|
390
|
+
"value": self.value,
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
def __repr__(self) -> str:
|
|
394
|
+
"""String representation."""
|
|
395
|
+
return f"ConstantParameter({self.name}={self.value})"
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
# Factory function for creating parameters from specification
|
|
399
|
+
def create_parameter(name: str, spec: Union[List, tuple, dict, Any]) -> Parameter:
|
|
400
|
+
"""
|
|
401
|
+
Create parameter from specification.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
name: Parameter name
|
|
405
|
+
spec: Parameter specification (list of choices, tuple of (low, high), dict, or constant)
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
Parameter instance
|
|
409
|
+
|
|
410
|
+
Example:
|
|
411
|
+
>>> p1 = create_parameter('activation', ['relu', 'elu'])
|
|
412
|
+
>>> p2 = create_parameter('filters', (32, 512))
|
|
413
|
+
>>> p3 = create_parameter('dropout_rate', (0.1, 0.5))
|
|
414
|
+
"""
|
|
415
|
+
if isinstance(spec, list):
|
|
416
|
+
# List of choices -> Categorical
|
|
417
|
+
return CategoricalParameter(name, spec)
|
|
418
|
+
|
|
419
|
+
elif isinstance(spec, tuple) and len(spec) == 2:
|
|
420
|
+
low, high = spec
|
|
421
|
+
if isinstance(low, int) and isinstance(high, int):
|
|
422
|
+
# Integer range
|
|
423
|
+
return IntegerParameter(name, low, high)
|
|
424
|
+
else:
|
|
425
|
+
# Float range
|
|
426
|
+
return FloatParameter(name, float(low), float(high))
|
|
427
|
+
|
|
428
|
+
elif isinstance(spec, dict):
|
|
429
|
+
# Dictionary specification
|
|
430
|
+
param_type = spec.get("type", "categorical")
|
|
431
|
+
|
|
432
|
+
if param_type == "categorical":
|
|
433
|
+
return CategoricalParameter(name, spec["choices"], spec.get("probabilities"))
|
|
434
|
+
elif param_type == "integer":
|
|
435
|
+
return IntegerParameter(
|
|
436
|
+
name,
|
|
437
|
+
spec["low"],
|
|
438
|
+
spec["high"],
|
|
439
|
+
spec.get("log_scale", False),
|
|
440
|
+
spec.get("step", 1),
|
|
441
|
+
)
|
|
442
|
+
elif param_type == "float":
|
|
443
|
+
return FloatParameter(name, spec["low"], spec["high"], spec.get("log_scale", False))
|
|
444
|
+
elif param_type == "boolean":
|
|
445
|
+
return BooleanParameter(name, spec.get("probability", 0.5))
|
|
446
|
+
elif param_type == "constant":
|
|
447
|
+
return ConstantParameter(name, spec["value"])
|
|
448
|
+
else:
|
|
449
|
+
raise ValidationError(f"Unknown parameter type: {param_type}")
|
|
450
|
+
|
|
451
|
+
else:
|
|
452
|
+
# Single value -> Constant
|
|
453
|
+
return ConstantParameter(name, spec)
|