oscura 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. oscura/__init__.py +1 -1
  2. oscura/analyzers/eye/__init__.py +5 -1
  3. oscura/analyzers/eye/generation.py +501 -0
  4. oscura/analyzers/jitter/__init__.py +6 -6
  5. oscura/analyzers/jitter/timing.py +419 -0
  6. oscura/analyzers/patterns/__init__.py +28 -0
  7. oscura/analyzers/patterns/reverse_engineering.py +991 -0
  8. oscura/analyzers/power/__init__.py +35 -12
  9. oscura/analyzers/statistics/__init__.py +4 -0
  10. oscura/analyzers/statistics/basic.py +149 -0
  11. oscura/analyzers/statistics/correlation.py +47 -6
  12. oscura/analyzers/waveform/__init__.py +2 -0
  13. oscura/analyzers/waveform/measurements.py +145 -23
  14. oscura/analyzers/waveform/spectral.py +361 -8
  15. oscura/automotive/__init__.py +1 -1
  16. oscura/core/config/loader.py +0 -1
  17. oscura/core/types.py +108 -0
  18. oscura/loaders/__init__.py +12 -4
  19. oscura/loaders/tss.py +456 -0
  20. oscura/reporting/__init__.py +88 -1
  21. oscura/reporting/automation.py +348 -0
  22. oscura/reporting/citations.py +374 -0
  23. oscura/reporting/core.py +54 -0
  24. oscura/reporting/formatting/__init__.py +11 -0
  25. oscura/reporting/formatting/measurements.py +279 -0
  26. oscura/reporting/html.py +57 -0
  27. oscura/reporting/interpretation.py +431 -0
  28. oscura/reporting/summary.py +329 -0
  29. oscura/reporting/visualization.py +542 -0
  30. oscura/visualization/__init__.py +2 -1
  31. oscura/visualization/batch.py +521 -0
  32. oscura/workflows/__init__.py +2 -0
  33. oscura/workflows/waveform.py +783 -0
  34. {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/METADATA +37 -19
  35. {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/RECORD +38 -26
  36. {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/WHEEL +0 -0
  37. {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/entry_points.txt +0 -0
  38. {oscura-0.6.0.dist-info → oscura-0.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,348 @@
1
+ """Automated analysis and anomaly detection for reports.
2
+
3
+ This module provides automated interpretation of results, anomaly flagging,
4
+ and intelligent recommendations generation.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Any
10
+
11
+ import numpy as np
12
+
13
+ from oscura.reporting.interpretation import MeasurementInterpretation, interpret_measurement
14
+ from oscura.reporting.summary import (
15
+ identify_key_findings,
16
+ recommendations_from_findings,
17
+ summarize_measurements,
18
+ )
19
+
20
+
21
+ def auto_interpret_results(results: dict[str, Any]) -> dict[str, MeasurementInterpretation]:
22
+ """Automatically interpret all results in a dictionary.
23
+
24
+ Args:
25
+ results: Dictionary of measurement results. Can be nested.
26
+
27
+ Returns:
28
+ Dictionary mapping measurement names to interpretations.
29
+
30
+ Example:
31
+ >>> results = {"snr": 45.2, "bandwidth": 1e9, "jitter": 50e-12}
32
+ >>> interpretations = auto_interpret_results(results)
33
+ >>> len(interpretations) >= 3
34
+ True
35
+ """
36
+ interpretations = {}
37
+
38
+ for key, value in results.items():
39
+ if isinstance(value, dict):
40
+ # Nested dictionary - recurse
41
+ if "value" in value:
42
+ # This is a measurement dict with value and metadata
43
+ meas_value = value["value"]
44
+ units = value.get("units", "")
45
+ spec_min = value.get("spec_min")
46
+ spec_max = value.get("spec_max")
47
+
48
+ interp = interpret_measurement(key, meas_value, units, spec_min, spec_max)
49
+ interpretations[key] = interp
50
+ else:
51
+ # Recurse into nested dict
52
+ nested = auto_interpret_results(value)
53
+ interpretations.update(nested)
54
+ elif isinstance(value, int | float):
55
+ # Simple numeric value
56
+ interp = interpret_measurement(key, value)
57
+ interpretations[key] = interp
58
+
59
+ return interpretations
60
+
61
+
62
+ def generate_summary(
63
+ results: dict[str, Any],
64
+ include_recommendations: bool = True,
65
+ ) -> dict[str, Any]:
66
+ """Generate comprehensive automated summary of results.
67
+
68
+ Args:
69
+ results: Dictionary of measurement results.
70
+ include_recommendations: Whether to include recommendations.
71
+
72
+ Returns:
73
+ Summary dictionary with statistics, findings, and recommendations.
74
+
75
+ Example:
76
+ >>> results = {"snr": 45.2, "thd": -60.5, "bandwidth": 1e9}
77
+ >>> summary = generate_summary(results)
78
+ >>> "statistics" in summary
79
+ True
80
+ >>> "key_findings" in summary
81
+ True
82
+ """
83
+ # Flatten results to simple dict
84
+ flat_results = _flatten_results(results)
85
+
86
+ # Generate statistics
87
+ statistics = summarize_measurements(flat_results)
88
+
89
+ # Interpret results
90
+ interpretations = auto_interpret_results(results)
91
+
92
+ # Identify key findings
93
+ key_findings = identify_key_findings(flat_results, interpretations, max_findings=10)
94
+
95
+ # Generate recommendations
96
+ recommendations = []
97
+ if include_recommendations:
98
+ recommendations = recommendations_from_findings(flat_results, interpretations)
99
+
100
+ # Flag anomalies
101
+ anomalies = flag_anomalies(flat_results)
102
+
103
+ return {
104
+ "statistics": statistics,
105
+ "key_findings": key_findings,
106
+ "recommendations": recommendations,
107
+ "anomalies": anomalies,
108
+ "interpretations": {k: v.__dict__ for k, v in interpretations.items()},
109
+ }
110
+
111
+
112
+ def flag_anomalies(
113
+ measurements: dict[str, Any],
114
+ threshold_std: float = 3.0,
115
+ ) -> list[dict[str, Any]]:
116
+ """Flag anomalous measurements using statistical analysis.
117
+
118
+ Args:
119
+ measurements: Dictionary of measurements.
120
+ threshold_std: Number of standard deviations for outlier detection.
121
+
122
+ Returns:
123
+ List of anomaly dictionaries with name, value, and reason.
124
+
125
+ Example:
126
+ >>> measurements = {"m1": 10, "m2": 11, "m3": 12, "m4": 100}
127
+ >>> anomalies = flag_anomalies(measurements, threshold_std=2.0)
128
+ >>> len(anomalies) >= 1
129
+ True
130
+ """
131
+ anomalies: list[dict[str, Any]] = []
132
+
133
+ # Extract numeric values
134
+ numeric_measurements = {k: v for k, v in measurements.items() if isinstance(v, int | float)}
135
+
136
+ if len(numeric_measurements) < 3:
137
+ # Not enough data for statistical analysis
138
+ return anomalies
139
+
140
+ values = np.array(list(numeric_measurements.values()))
141
+ mean = np.mean(values)
142
+ std = np.std(values)
143
+
144
+ if std == 0:
145
+ return anomalies
146
+
147
+ # Check each measurement
148
+ for name, value in numeric_measurements.items():
149
+ z_score = abs((value - mean) / std)
150
+
151
+ if z_score > threshold_std:
152
+ anomalies.append(
153
+ {
154
+ "name": name,
155
+ "value": value,
156
+ "z_score": float(z_score),
157
+ "reason": f"Value is {z_score:.1f} std devs from mean ({mean:.3e})",
158
+ "severity": "high" if z_score > 5 else "medium",
159
+ }
160
+ )
161
+
162
+ # Domain-specific anomaly checks
163
+ for name, value in measurements.items():
164
+ if not isinstance(value, int | float):
165
+ continue
166
+
167
+ # Negative SNR
168
+ if "snr" in name.lower() and value < 0:
169
+ anomalies.append(
170
+ {
171
+ "name": name,
172
+ "value": value,
173
+ "reason": "Negative SNR indicates signal weaker than noise",
174
+ "severity": "critical",
175
+ }
176
+ )
177
+
178
+ # Negative bandwidth
179
+ if "bandwidth" in name.lower() and value <= 0:
180
+ anomalies.append(
181
+ {
182
+ "name": name,
183
+ "value": value,
184
+ "reason": "Invalid negative or zero bandwidth",
185
+ "severity": "critical",
186
+ }
187
+ )
188
+
189
+ # Extremely high jitter (>1 second)
190
+ if "jitter" in name.lower() and value > 1.0:
191
+ anomalies.append(
192
+ {
193
+ "name": name,
194
+ "value": value,
195
+ "reason": "Unrealistically high jitter value (>1 second)",
196
+ "severity": "critical",
197
+ }
198
+ )
199
+
200
+ # Invalid power factor (must be -1 to +1)
201
+ if "power_factor" in name.lower() and abs(value) > 1.0:
202
+ anomalies.append(
203
+ {
204
+ "name": name,
205
+ "value": value,
206
+ "reason": "Power factor must be between -1 and +1",
207
+ "severity": "critical",
208
+ }
209
+ )
210
+
211
+ return anomalies
212
+
213
+
214
+ def suggest_follow_up_analyses(
215
+ measurements: dict[str, Any],
216
+ interpretations: dict[str, MeasurementInterpretation] | None = None,
217
+ ) -> list[str]:
218
+ """Suggest follow-up analyses based on measurement results.
219
+
220
+ Args:
221
+ measurements: Dictionary of measurements.
222
+ interpretations: Optional measurement interpretations.
223
+
224
+ Returns:
225
+ List of suggested analysis descriptions.
226
+
227
+ Example:
228
+ >>> measurements = {"snr": 15.5, "thd": -40}
229
+ >>> suggestions = suggest_follow_up_analyses(measurements)
230
+ >>> len(suggestions) > 0
231
+ True
232
+ """
233
+ suggestions = []
234
+
235
+ # Low SNR - suggest noise analysis
236
+ if "snr" in measurements:
237
+ snr = measurements["snr"]
238
+ if isinstance(snr, int | float) and snr < 30:
239
+ suggestions.append("Perform detailed noise analysis to identify noise sources")
240
+ suggestions.append("Analyze frequency spectrum for interference peaks")
241
+
242
+ # High THD - suggest harmonic analysis
243
+ if "thd" in measurements:
244
+ thd = measurements["thd"]
245
+ if isinstance(thd, int | float) and abs(thd) < 40: # THD typically negative dB
246
+ suggestions.append("Perform harmonic analysis to identify distortion sources")
247
+ suggestions.append("Check for clipping or saturation in signal path")
248
+
249
+ # Jitter present - suggest jitter decomposition
250
+ if any("jitter" in k.lower() for k in measurements):
251
+ suggestions.append("Perform jitter decomposition (RJ, DJ, PJ) for root cause analysis")
252
+ suggestions.append("Analyze clock quality and investigate timing sources")
253
+
254
+ # Power measurements - suggest power quality
255
+ if any("power" in k.lower() for k in measurements):
256
+ suggestions.append("Perform power quality analysis (harmonics, flicker, sags/swells)")
257
+
258
+ # Eye diagram quality issues
259
+ if interpretations:
260
+ poor_quality = [
261
+ k for k, v in interpretations.items() if v.quality.value in ("marginal", "poor")
262
+ ]
263
+ if poor_quality:
264
+ suggestions.append(
265
+ f"Investigate {len(poor_quality)} marginal/poor measurements: "
266
+ f"{', '.join(poor_quality[:3])}"
267
+ )
268
+
269
+ return suggestions
270
+
271
+
272
+ def identify_issues(
273
+ measurements: dict[str, Any],
274
+ anomalies: list[dict[str, Any]],
275
+ ) -> list[dict[str, Any]]:
276
+ """Identify issues from measurements and anomalies.
277
+
278
+ Args:
279
+ measurements: Dictionary of measurements.
280
+ anomalies: List of detected anomalies.
281
+
282
+ Returns:
283
+ List of issue dictionaries with severity and description.
284
+
285
+ Example:
286
+ >>> measurements = {"snr": 10}
287
+ >>> anomalies = [{"name": "snr", "value": 10, "severity": "high"}]
288
+ >>> issues = identify_issues(measurements, anomalies)
289
+ >>> len(issues) > 0
290
+ True
291
+ """
292
+ issues = []
293
+
294
+ # Add critical anomalies as issues
295
+ for anomaly in anomalies:
296
+ if anomaly.get("severity") == "critical":
297
+ issues.append(
298
+ {
299
+ "severity": "critical",
300
+ "measurement": anomaly["name"],
301
+ "description": anomaly["reason"],
302
+ "value": anomaly["value"],
303
+ }
304
+ )
305
+
306
+ # Domain-specific issue detection
307
+ if "snr" in measurements:
308
+ snr = measurements["snr"]
309
+ if isinstance(snr, int | float) and snr < 20:
310
+ issues.append(
311
+ {
312
+ "severity": "high",
313
+ "measurement": "snr",
314
+ "description": f"Low SNR ({snr:.1f} dB) may impact measurement accuracy",
315
+ "value": snr,
316
+ }
317
+ )
318
+
319
+ if "bandwidth" in measurements:
320
+ bw = measurements["bandwidth"]
321
+ if isinstance(bw, int | float) and bw < 10e6:
322
+ issues.append(
323
+ {
324
+ "severity": "medium",
325
+ "measurement": "bandwidth",
326
+ "description": f"Limited bandwidth ({bw / 1e6:.1f} MHz) may restrict signal fidelity",
327
+ "value": bw,
328
+ }
329
+ )
330
+
331
+ return issues
332
+
333
+
334
+ def _flatten_results(results: dict[str, Any]) -> dict[str, Any]:
335
+ """Flatten nested results dictionary."""
336
+ flat = {}
337
+
338
+ for key, value in results.items():
339
+ if isinstance(value, dict):
340
+ if "value" in value:
341
+ flat[key] = value["value"]
342
+ else:
343
+ nested = _flatten_results(value)
344
+ flat.update(nested)
345
+ elif isinstance(value, int | float | str):
346
+ flat[key] = value
347
+
348
+ return flat