morphml 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of morphml might be problematic. Click here for more details.
- morphml/__init__.py +14 -0
- morphml/api/__init__.py +26 -0
- morphml/api/app.py +326 -0
- morphml/api/auth.py +193 -0
- morphml/api/client.py +338 -0
- morphml/api/models.py +132 -0
- morphml/api/rate_limit.py +192 -0
- morphml/benchmarking/__init__.py +36 -0
- morphml/benchmarking/comparison.py +430 -0
- morphml/benchmarks/__init__.py +56 -0
- morphml/benchmarks/comparator.py +409 -0
- morphml/benchmarks/datasets.py +280 -0
- morphml/benchmarks/metrics.py +199 -0
- morphml/benchmarks/openml_suite.py +201 -0
- morphml/benchmarks/problems.py +289 -0
- morphml/benchmarks/suite.py +318 -0
- morphml/cli/__init__.py +5 -0
- morphml/cli/commands/experiment.py +329 -0
- morphml/cli/main.py +457 -0
- morphml/cli/quickstart.py +312 -0
- morphml/config.py +278 -0
- morphml/constraints/__init__.py +19 -0
- morphml/constraints/handler.py +205 -0
- morphml/constraints/predicates.py +285 -0
- morphml/core/__init__.py +3 -0
- morphml/core/crossover.py +449 -0
- morphml/core/dsl/README.md +359 -0
- morphml/core/dsl/__init__.py +72 -0
- morphml/core/dsl/ast_nodes.py +364 -0
- morphml/core/dsl/compiler.py +318 -0
- morphml/core/dsl/layers.py +368 -0
- morphml/core/dsl/lexer.py +336 -0
- morphml/core/dsl/parser.py +455 -0
- morphml/core/dsl/search_space.py +386 -0
- morphml/core/dsl/syntax.py +199 -0
- morphml/core/dsl/type_system.py +361 -0
- morphml/core/dsl/validator.py +386 -0
- morphml/core/graph/__init__.py +40 -0
- morphml/core/graph/edge.py +124 -0
- morphml/core/graph/graph.py +507 -0
- morphml/core/graph/mutations.py +409 -0
- morphml/core/graph/node.py +196 -0
- morphml/core/graph/serialization.py +361 -0
- morphml/core/graph/visualization.py +431 -0
- morphml/core/objectives/__init__.py +20 -0
- morphml/core/search/__init__.py +33 -0
- morphml/core/search/individual.py +252 -0
- morphml/core/search/parameters.py +453 -0
- morphml/core/search/population.py +375 -0
- morphml/core/search/search_engine.py +340 -0
- morphml/distributed/__init__.py +76 -0
- morphml/distributed/fault_tolerance.py +497 -0
- morphml/distributed/health_monitor.py +348 -0
- morphml/distributed/master.py +709 -0
- morphml/distributed/proto/README.md +224 -0
- morphml/distributed/proto/__init__.py +74 -0
- morphml/distributed/proto/worker.proto +170 -0
- morphml/distributed/proto/worker_pb2.py +79 -0
- morphml/distributed/proto/worker_pb2_grpc.py +423 -0
- morphml/distributed/resource_manager.py +416 -0
- morphml/distributed/scheduler.py +567 -0
- morphml/distributed/storage/__init__.py +33 -0
- morphml/distributed/storage/artifacts.py +381 -0
- morphml/distributed/storage/cache.py +366 -0
- morphml/distributed/storage/checkpointing.py +329 -0
- morphml/distributed/storage/database.py +459 -0
- morphml/distributed/worker.py +549 -0
- morphml/evaluation/__init__.py +5 -0
- morphml/evaluation/heuristic.py +237 -0
- morphml/exceptions.py +55 -0
- morphml/execution/__init__.py +5 -0
- morphml/execution/local_executor.py +350 -0
- morphml/integrations/__init__.py +28 -0
- morphml/integrations/jax_adapter.py +206 -0
- morphml/integrations/pytorch_adapter.py +530 -0
- morphml/integrations/sklearn_adapter.py +206 -0
- morphml/integrations/tensorflow_adapter.py +230 -0
- morphml/logging_config.py +93 -0
- morphml/meta_learning/__init__.py +66 -0
- morphml/meta_learning/architecture_similarity.py +277 -0
- morphml/meta_learning/experiment_database.py +240 -0
- morphml/meta_learning/knowledge_base/__init__.py +19 -0
- morphml/meta_learning/knowledge_base/embedder.py +179 -0
- morphml/meta_learning/knowledge_base/knowledge_base.py +313 -0
- morphml/meta_learning/knowledge_base/meta_features.py +265 -0
- morphml/meta_learning/knowledge_base/vector_store.py +271 -0
- morphml/meta_learning/predictors/__init__.py +27 -0
- morphml/meta_learning/predictors/ensemble.py +221 -0
- morphml/meta_learning/predictors/gnn_predictor.py +552 -0
- morphml/meta_learning/predictors/learning_curve.py +231 -0
- morphml/meta_learning/predictors/proxy_metrics.py +261 -0
- morphml/meta_learning/strategy_evolution/__init__.py +27 -0
- morphml/meta_learning/strategy_evolution/adaptive_optimizer.py +226 -0
- morphml/meta_learning/strategy_evolution/bandit.py +276 -0
- morphml/meta_learning/strategy_evolution/portfolio.py +230 -0
- morphml/meta_learning/transfer.py +581 -0
- morphml/meta_learning/warm_start.py +286 -0
- morphml/optimizers/__init__.py +74 -0
- morphml/optimizers/adaptive_operators.py +399 -0
- morphml/optimizers/bayesian/__init__.py +52 -0
- morphml/optimizers/bayesian/acquisition.py +387 -0
- morphml/optimizers/bayesian/base.py +319 -0
- morphml/optimizers/bayesian/gaussian_process.py +635 -0
- morphml/optimizers/bayesian/smac.py +534 -0
- morphml/optimizers/bayesian/tpe.py +411 -0
- morphml/optimizers/differential_evolution.py +220 -0
- morphml/optimizers/evolutionary/__init__.py +61 -0
- morphml/optimizers/evolutionary/cma_es.py +416 -0
- morphml/optimizers/evolutionary/differential_evolution.py +556 -0
- morphml/optimizers/evolutionary/encoding.py +426 -0
- morphml/optimizers/evolutionary/particle_swarm.py +449 -0
- morphml/optimizers/genetic_algorithm.py +486 -0
- morphml/optimizers/gradient_based/__init__.py +22 -0
- morphml/optimizers/gradient_based/darts.py +550 -0
- morphml/optimizers/gradient_based/enas.py +585 -0
- morphml/optimizers/gradient_based/operations.py +474 -0
- morphml/optimizers/gradient_based/utils.py +601 -0
- morphml/optimizers/hill_climbing.py +169 -0
- morphml/optimizers/multi_objective/__init__.py +56 -0
- morphml/optimizers/multi_objective/indicators.py +504 -0
- morphml/optimizers/multi_objective/nsga2.py +647 -0
- morphml/optimizers/multi_objective/visualization.py +427 -0
- morphml/optimizers/nsga2.py +308 -0
- morphml/optimizers/random_search.py +172 -0
- morphml/optimizers/simulated_annealing.py +181 -0
- morphml/plugins/__init__.py +35 -0
- morphml/plugins/custom_evaluator_example.py +81 -0
- morphml/plugins/custom_optimizer_example.py +63 -0
- morphml/plugins/plugin_system.py +454 -0
- morphml/reports/__init__.py +30 -0
- morphml/reports/generator.py +362 -0
- morphml/tracking/__init__.py +7 -0
- morphml/tracking/experiment.py +309 -0
- morphml/tracking/logger.py +301 -0
- morphml/tracking/reporter.py +357 -0
- morphml/utils/__init__.py +6 -0
- morphml/utils/checkpoint.py +189 -0
- morphml/utils/comparison.py +390 -0
- morphml/utils/export.py +407 -0
- morphml/utils/progress.py +392 -0
- morphml/utils/validation.py +392 -0
- morphml/version.py +7 -0
- morphml/visualization/__init__.py +50 -0
- morphml/visualization/analytics.py +423 -0
- morphml/visualization/architecture_diagrams.py +353 -0
- morphml/visualization/architecture_plot.py +223 -0
- morphml/visualization/convergence_plot.py +174 -0
- morphml/visualization/crossover_viz.py +386 -0
- morphml/visualization/graph_viz.py +338 -0
- morphml/visualization/pareto_plot.py +149 -0
- morphml/visualization/plotly_dashboards.py +422 -0
- morphml/visualization/population.py +309 -0
- morphml/visualization/progress.py +260 -0
- morphml-1.0.0.dist-info/METADATA +434 -0
- morphml-1.0.0.dist-info/RECORD +158 -0
- morphml-1.0.0.dist-info/WHEEL +4 -0
- morphml-1.0.0.dist-info/entry_points.txt +3 -0
- morphml-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
"""Performance analytics for MorphML experiments.
|
|
2
|
+
|
|
3
|
+
Statistical analysis and reporting of NAS results.
|
|
4
|
+
|
|
5
|
+
Example:
|
|
6
|
+
>>> from morphml.visualization.analytics import PerformanceAnalytics
|
|
7
|
+
>>> analytics = PerformanceAnalytics()
|
|
8
|
+
>>> report = analytics.analyze_experiment(history)
|
|
9
|
+
>>> print(report['summary'])
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
import numpy as np
|
|
15
|
+
from scipy import stats
|
|
16
|
+
|
|
17
|
+
from morphml.logging_config import get_logger
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PerformanceAnalytics:
|
|
23
|
+
"""
|
|
24
|
+
Statistical analysis of NAS experiment results.
|
|
25
|
+
|
|
26
|
+
Provides comprehensive analytics including:
|
|
27
|
+
- Descriptive statistics
|
|
28
|
+
- Convergence analysis
|
|
29
|
+
- Diversity metrics
|
|
30
|
+
- Statistical significance tests
|
|
31
|
+
|
|
32
|
+
Example:
|
|
33
|
+
>>> analytics = PerformanceAnalytics()
|
|
34
|
+
>>> report = analytics.analyze_experiment(optimizer.history)
|
|
35
|
+
>>> print(f"Best: {report['best']:.4f}")
|
|
36
|
+
>>> print(f"Mean: {report['mean']:.4f}")
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
"""Initialize analytics engine."""
|
|
41
|
+
logger.info("Initialized PerformanceAnalytics")
|
|
42
|
+
|
|
43
|
+
def analyze_experiment(self, history: Dict[str, Any]) -> Dict[str, Any]:
|
|
44
|
+
"""
|
|
45
|
+
Comprehensive experiment analysis.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
history: Optimization history with keys:
|
|
49
|
+
- best_fitness: List of best fitness per generation
|
|
50
|
+
- mean_fitness: List of mean fitness per generation
|
|
51
|
+
- population_fitness: List of lists of fitness values
|
|
52
|
+
- diversity: List of diversity scores
|
|
53
|
+
- architectures: List of architecture dicts
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Analysis report dictionary
|
|
57
|
+
|
|
58
|
+
Example:
|
|
59
|
+
>>> report = analytics.analyze_experiment(history)
|
|
60
|
+
>>> print(report['summary'])
|
|
61
|
+
"""
|
|
62
|
+
best_fitness = history.get("best_fitness", [])
|
|
63
|
+
mean_fitness = history.get("mean_fitness", [])
|
|
64
|
+
all_fitness = []
|
|
65
|
+
|
|
66
|
+
for pop in history.get("population_fitness", []):
|
|
67
|
+
all_fitness.extend(pop)
|
|
68
|
+
|
|
69
|
+
if not all_fitness:
|
|
70
|
+
all_fitness = best_fitness
|
|
71
|
+
|
|
72
|
+
report = {
|
|
73
|
+
"summary": self._compute_summary_statistics(all_fitness),
|
|
74
|
+
"convergence": self._analyze_convergence(best_fitness),
|
|
75
|
+
"diversity": self._analyze_diversity(history.get("diversity", [])),
|
|
76
|
+
"efficiency": self._analyze_efficiency(best_fitness, mean_fitness),
|
|
77
|
+
"architectures": self._analyze_architectures(history.get("architectures", [])),
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return report
|
|
81
|
+
|
|
82
|
+
def _compute_summary_statistics(self, fitness_values: List[float]) -> Dict[str, float]:
|
|
83
|
+
"""
|
|
84
|
+
Compute summary statistics.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
fitness_values: List of fitness values
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Dictionary of statistics
|
|
91
|
+
"""
|
|
92
|
+
if not fitness_values:
|
|
93
|
+
return {}
|
|
94
|
+
|
|
95
|
+
fitness_array = np.array(fitness_values)
|
|
96
|
+
|
|
97
|
+
return {
|
|
98
|
+
"best": float(np.max(fitness_array)),
|
|
99
|
+
"worst": float(np.min(fitness_array)),
|
|
100
|
+
"mean": float(np.mean(fitness_array)),
|
|
101
|
+
"median": float(np.median(fitness_array)),
|
|
102
|
+
"std": float(np.std(fitness_array)),
|
|
103
|
+
"variance": float(np.var(fitness_array)),
|
|
104
|
+
"q25": float(np.percentile(fitness_array, 25)),
|
|
105
|
+
"q75": float(np.percentile(fitness_array, 75)),
|
|
106
|
+
"iqr": float(np.percentile(fitness_array, 75) - np.percentile(fitness_array, 25)),
|
|
107
|
+
"count": len(fitness_values),
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
def _analyze_convergence(self, best_fitness: List[float]) -> Dict[str, Any]:
|
|
111
|
+
"""
|
|
112
|
+
Analyze convergence behavior.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
best_fitness: List of best fitness per generation
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Convergence analysis
|
|
119
|
+
"""
|
|
120
|
+
if len(best_fitness) < 2:
|
|
121
|
+
return {}
|
|
122
|
+
|
|
123
|
+
fitness_array = np.array(best_fitness)
|
|
124
|
+
generations = np.arange(len(best_fitness))
|
|
125
|
+
|
|
126
|
+
# Compute improvement rate
|
|
127
|
+
improvements = np.diff(fitness_array)
|
|
128
|
+
improvement_rate = (
|
|
129
|
+
float(np.mean(improvements[improvements > 0]))
|
|
130
|
+
if len(improvements[improvements > 0]) > 0
|
|
131
|
+
else 0.0
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Detect convergence point (when improvement < threshold)
|
|
135
|
+
threshold = 0.001
|
|
136
|
+
converged_at = None
|
|
137
|
+
for i in range(len(improvements)):
|
|
138
|
+
if all(abs(improvements[i : i + 5]) < threshold):
|
|
139
|
+
converged_at = i
|
|
140
|
+
break
|
|
141
|
+
|
|
142
|
+
# Fit linear trend
|
|
143
|
+
if len(best_fitness) > 1:
|
|
144
|
+
slope, intercept, r_value, p_value, std_err = stats.linregress(
|
|
145
|
+
generations, fitness_array
|
|
146
|
+
)
|
|
147
|
+
else:
|
|
148
|
+
slope, r_value = 0.0, 0.0
|
|
149
|
+
|
|
150
|
+
return {
|
|
151
|
+
"converged": converged_at is not None,
|
|
152
|
+
"converged_at_generation": converged_at,
|
|
153
|
+
"improvement_rate": improvement_rate,
|
|
154
|
+
"total_improvement": float(fitness_array[-1] - fitness_array[0]),
|
|
155
|
+
"trend_slope": float(slope),
|
|
156
|
+
"trend_r_squared": float(r_value**2),
|
|
157
|
+
"final_fitness": float(fitness_array[-1]),
|
|
158
|
+
"generations": len(best_fitness),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
def _analyze_diversity(self, diversity_scores: List[float]) -> Dict[str, Any]:
|
|
162
|
+
"""
|
|
163
|
+
Analyze population diversity.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
diversity_scores: List of diversity scores per generation
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Diversity analysis
|
|
170
|
+
"""
|
|
171
|
+
if not diversity_scores:
|
|
172
|
+
return {}
|
|
173
|
+
|
|
174
|
+
diversity_array = np.array(diversity_scores)
|
|
175
|
+
|
|
176
|
+
# Detect diversity collapse (when diversity drops below threshold)
|
|
177
|
+
threshold = 0.1
|
|
178
|
+
collapsed = diversity_array[-1] < threshold if len(diversity_array) > 0 else False
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
"initial_diversity": float(diversity_array[0]) if len(diversity_array) > 0 else 0.0,
|
|
182
|
+
"final_diversity": float(diversity_array[-1]) if len(diversity_array) > 0 else 0.0,
|
|
183
|
+
"mean_diversity": float(np.mean(diversity_array)),
|
|
184
|
+
"min_diversity": float(np.min(diversity_array)),
|
|
185
|
+
"max_diversity": float(np.max(diversity_array)),
|
|
186
|
+
"diversity_collapsed": collapsed,
|
|
187
|
+
"diversity_trend": "decreasing"
|
|
188
|
+
if len(diversity_array) > 1 and diversity_array[-1] < diversity_array[0]
|
|
189
|
+
else "stable",
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
def _analyze_efficiency(
|
|
193
|
+
self, best_fitness: List[float], mean_fitness: List[float]
|
|
194
|
+
) -> Dict[str, Any]:
|
|
195
|
+
"""
|
|
196
|
+
Analyze search efficiency.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
best_fitness: Best fitness per generation
|
|
200
|
+
mean_fitness: Mean fitness per generation
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Efficiency metrics
|
|
204
|
+
"""
|
|
205
|
+
if not best_fitness or not mean_fitness:
|
|
206
|
+
return {}
|
|
207
|
+
|
|
208
|
+
best_array = np.array(best_fitness)
|
|
209
|
+
mean_array = np.array(mean_fitness)
|
|
210
|
+
|
|
211
|
+
# Compute selection pressure (best vs mean)
|
|
212
|
+
selection_pressure = best_array - mean_array
|
|
213
|
+
|
|
214
|
+
# Compute exploitation vs exploration balance
|
|
215
|
+
# High selection pressure = more exploitation
|
|
216
|
+
avg_pressure = float(np.mean(selection_pressure))
|
|
217
|
+
|
|
218
|
+
return {
|
|
219
|
+
"average_selection_pressure": avg_pressure,
|
|
220
|
+
"exploitation_score": min(1.0, avg_pressure / 0.5), # Normalized
|
|
221
|
+
"exploration_score": max(0.0, 1.0 - avg_pressure / 0.5),
|
|
222
|
+
"efficiency_ratio": float(best_array[-1] / (len(best_fitness) * 0.01))
|
|
223
|
+
if len(best_fitness) > 0
|
|
224
|
+
else 0.0,
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
def _analyze_architectures(self, architectures: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
Analyze architecture characteristics.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
architectures: List of architecture dicts
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Architecture analysis
|
|
236
|
+
"""
|
|
237
|
+
if not architectures:
|
|
238
|
+
return {}
|
|
239
|
+
|
|
240
|
+
# Extract metrics
|
|
241
|
+
parameters = [a.get("parameters", 0) for a in architectures]
|
|
242
|
+
depths = [a.get("depth", 0) for a in architectures]
|
|
243
|
+
widths = [a.get("width", 0) for a in architectures]
|
|
244
|
+
fitness = [a.get("fitness", 0) for a in architectures]
|
|
245
|
+
|
|
246
|
+
# Compute correlations
|
|
247
|
+
param_fitness_corr = (
|
|
248
|
+
float(np.corrcoef(parameters, fitness)[0, 1]) if len(parameters) > 1 else 0.0
|
|
249
|
+
)
|
|
250
|
+
depth_fitness_corr = float(np.corrcoef(depths, fitness)[0, 1]) if len(depths) > 1 else 0.0
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
"count": len(architectures),
|
|
254
|
+
"parameters": {
|
|
255
|
+
"mean": float(np.mean(parameters)),
|
|
256
|
+
"min": float(np.min(parameters)),
|
|
257
|
+
"max": float(np.max(parameters)),
|
|
258
|
+
"std": float(np.std(parameters)),
|
|
259
|
+
},
|
|
260
|
+
"depth": {
|
|
261
|
+
"mean": float(np.mean(depths)),
|
|
262
|
+
"min": float(np.min(depths)),
|
|
263
|
+
"max": float(np.max(depths)),
|
|
264
|
+
},
|
|
265
|
+
"width": {
|
|
266
|
+
"mean": float(np.mean(widths)),
|
|
267
|
+
"min": float(np.min(widths)),
|
|
268
|
+
"max": float(np.max(widths)),
|
|
269
|
+
},
|
|
270
|
+
"correlations": {
|
|
271
|
+
"parameters_fitness": param_fitness_corr,
|
|
272
|
+
"depth_fitness": depth_fitness_corr,
|
|
273
|
+
},
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
def compare_experiments(self, experiments: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
|
277
|
+
"""
|
|
278
|
+
Compare multiple experiments statistically.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
experiments: Dict mapping experiment names to history dicts
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
Comparison report
|
|
285
|
+
|
|
286
|
+
Example:
|
|
287
|
+
>>> comparison = analytics.compare_experiments({
|
|
288
|
+
... 'GA': ga_history,
|
|
289
|
+
... 'Random': random_history
|
|
290
|
+
... })
|
|
291
|
+
"""
|
|
292
|
+
results = {}
|
|
293
|
+
|
|
294
|
+
for name, history in experiments.items():
|
|
295
|
+
best_fitness = history.get("best_fitness", [])
|
|
296
|
+
if best_fitness:
|
|
297
|
+
results[name] = {
|
|
298
|
+
"final_fitness": best_fitness[-1],
|
|
299
|
+
"mean_fitness": np.mean(best_fitness),
|
|
300
|
+
"convergence_speed": self._compute_convergence_speed(best_fitness),
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
# Statistical tests
|
|
304
|
+
if len(results) >= 2:
|
|
305
|
+
names = list(results.keys())
|
|
306
|
+
fitness_lists = [experiments[name].get("best_fitness", []) for name in names]
|
|
307
|
+
|
|
308
|
+
# Perform t-test between first two
|
|
309
|
+
if len(fitness_lists[0]) > 1 and len(fitness_lists[1]) > 1:
|
|
310
|
+
t_stat, p_value = stats.ttest_ind(fitness_lists[0], fitness_lists[1])
|
|
311
|
+
results["statistical_test"] = {
|
|
312
|
+
"test": "t-test",
|
|
313
|
+
"comparison": f"{names[0]} vs {names[1]}",
|
|
314
|
+
"t_statistic": float(t_stat),
|
|
315
|
+
"p_value": float(p_value),
|
|
316
|
+
"significant": p_value < 0.05,
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
return results
|
|
320
|
+
|
|
321
|
+
def _compute_convergence_speed(self, best_fitness: List[float]) -> float:
|
|
322
|
+
"""
|
|
323
|
+
Compute convergence speed metric.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
best_fitness: Best fitness per generation
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
Convergence speed score
|
|
330
|
+
"""
|
|
331
|
+
if len(best_fitness) < 2:
|
|
332
|
+
return 0.0
|
|
333
|
+
|
|
334
|
+
# Compute area under curve (higher = faster convergence)
|
|
335
|
+
fitness_array = np.array(best_fitness)
|
|
336
|
+
normalized = (fitness_array - fitness_array[0]) / (
|
|
337
|
+
fitness_array[-1] - fitness_array[0] + 1e-10
|
|
338
|
+
)
|
|
339
|
+
auc = float(np.trapz(normalized))
|
|
340
|
+
|
|
341
|
+
return auc / len(best_fitness)
|
|
342
|
+
|
|
343
|
+
def generate_report(self, history: Dict[str, Any], output_path: Optional[str] = None) -> str:
|
|
344
|
+
"""
|
|
345
|
+
Generate human-readable analysis report.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
history: Optimization history
|
|
349
|
+
output_path: Optional file path to save report
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
Report string
|
|
353
|
+
|
|
354
|
+
Example:
|
|
355
|
+
>>> report = analytics.generate_report(history, "report.txt")
|
|
356
|
+
>>> print(report)
|
|
357
|
+
"""
|
|
358
|
+
analysis = self.analyze_experiment(history)
|
|
359
|
+
|
|
360
|
+
report_lines = [
|
|
361
|
+
"=" * 60,
|
|
362
|
+
"MORPHML EXPERIMENT ANALYSIS REPORT",
|
|
363
|
+
"=" * 60,
|
|
364
|
+
"",
|
|
365
|
+
"SUMMARY STATISTICS",
|
|
366
|
+
"-" * 60,
|
|
367
|
+
]
|
|
368
|
+
|
|
369
|
+
summary = analysis.get("summary", {})
|
|
370
|
+
if summary:
|
|
371
|
+
report_lines.extend(
|
|
372
|
+
[
|
|
373
|
+
f"Best Fitness: {summary.get('best', 0):.6f}",
|
|
374
|
+
f"Worst Fitness: {summary.get('worst', 0):.6f}",
|
|
375
|
+
f"Mean Fitness: {summary.get('mean', 0):.6f}",
|
|
376
|
+
f"Median Fitness: {summary.get('median', 0):.6f}",
|
|
377
|
+
f"Std Deviation: {summary.get('std', 0):.6f}",
|
|
378
|
+
f"IQR: {summary.get('iqr', 0):.6f}",
|
|
379
|
+
f"Total Evaluated: {summary.get('count', 0)}",
|
|
380
|
+
"",
|
|
381
|
+
]
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
convergence = analysis.get("convergence", {})
|
|
385
|
+
if convergence:
|
|
386
|
+
report_lines.extend(
|
|
387
|
+
[
|
|
388
|
+
"CONVERGENCE ANALYSIS",
|
|
389
|
+
"-" * 60,
|
|
390
|
+
f"Converged: {convergence.get('converged', False)}",
|
|
391
|
+
f"Converged at Gen: {convergence.get('converged_at_generation', 'N/A')}",
|
|
392
|
+
f"Total Improvement: {convergence.get('total_improvement', 0):.6f}",
|
|
393
|
+
f"Improvement Rate: {convergence.get('improvement_rate', 0):.6f}",
|
|
394
|
+
f"Trend R²: {convergence.get('trend_r_squared', 0):.4f}",
|
|
395
|
+
"",
|
|
396
|
+
]
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
diversity = analysis.get("diversity", {})
|
|
400
|
+
if diversity:
|
|
401
|
+
report_lines.extend(
|
|
402
|
+
[
|
|
403
|
+
"DIVERSITY ANALYSIS",
|
|
404
|
+
"-" * 60,
|
|
405
|
+
f"Initial Diversity: {diversity.get('initial_diversity', 0):.4f}",
|
|
406
|
+
f"Final Diversity: {diversity.get('final_diversity', 0):.4f}",
|
|
407
|
+
f"Mean Diversity: {diversity.get('mean_diversity', 0):.4f}",
|
|
408
|
+
f"Diversity Trend: {diversity.get('diversity_trend', 'N/A')}",
|
|
409
|
+
f"Collapsed: {diversity.get('diversity_collapsed', False)}",
|
|
410
|
+
"",
|
|
411
|
+
]
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
report_lines.append("=" * 60)
|
|
415
|
+
|
|
416
|
+
report = "\n".join(report_lines)
|
|
417
|
+
|
|
418
|
+
if output_path:
|
|
419
|
+
with open(output_path, "w") as f:
|
|
420
|
+
f.write(report)
|
|
421
|
+
logger.info(f"Saved report to {output_path}")
|
|
422
|
+
|
|
423
|
+
return report
|