sgn-drift 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sgn_drift-0.1.0.dist-info/METADATA +91 -0
- sgn_drift-0.1.0.dist-info/RECORD +22 -0
- sgn_drift-0.1.0.dist-info/WHEEL +5 -0
- sgn_drift-0.1.0.dist-info/entry_points.txt +7 -0
- sgn_drift-0.1.0.dist-info/top_level.txt +1 -0
- sgndrift/__init__.py +0 -0
- sgndrift/_version.py +34 -0
- sgndrift/bin/__init__.py +0 -0
- sgndrift/bin/estimate_drift.py +278 -0
- sgndrift/bin/plot_drift.py +177 -0
- sgndrift/bin/plot_drift_comparison.py +211 -0
- sgndrift/bin/plot_drift_super.py +272 -0
- sgndrift/bin/plot_drift_super_comp.py +360 -0
- sgndrift/bin/plot_drift_time.py +210 -0
- sgndrift/psd/__init__.py +0 -0
- sgndrift/psd/drift.py +73 -0
- sgndrift/psd/estimators.py +150 -0
- sgndrift/sinks/__init__.py +0 -0
- sgndrift/sinks/drift_sink.py +154 -0
- sgndrift/transforms/__init__.py +0 -0
- sgndrift/transforms/drift.py +145 -0
- sgndrift/transforms/psd.py +190 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Double Super Drift Visualization Tool.
|
|
3
|
+
|
|
4
|
+
Generates a side-by-side comparison dashboard for TWO datasets.
|
|
5
|
+
Each side features:
|
|
6
|
+
1. Top: Phase Space Scatter Plot.
|
|
7
|
+
2. Bottom: Stacked Time-Series Strip Chart.
|
|
8
|
+
|
|
9
|
+
Crucially, this script locks the scales (X, Y, Color, and TS-Amplitude)
|
|
10
|
+
across both datasets to allow rigorous visual comparison of Laminar vs Turbulent regimes.
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
sgn-drift-plot-super-multi fileA.csv fileB.csv --label-a "Safe" --label-b "Scattering"
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import argparse
|
|
17
|
+
import sys
|
|
18
|
+
from typing import Optional, List, Tuple
|
|
19
|
+
|
|
20
|
+
import matplotlib.pyplot as plt
|
|
21
|
+
from matplotlib.colors import LogNorm, Normalize
|
|
22
|
+
import pandas as pd
|
|
23
|
+
import numpy as np
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def load_data(
|
|
27
|
+
filepath: str, start: Optional[float] = None, end: Optional[float] = None
|
|
28
|
+
) -> pd.DataFrame:
|
|
29
|
+
"""Loads drift data from CSV and applies time filtering."""
|
|
30
|
+
try:
|
|
31
|
+
df = pd.read_csv(filepath)
|
|
32
|
+
except FileNotFoundError:
|
|
33
|
+
print(f"Error: File not found at {filepath}")
|
|
34
|
+
sys.exit(1)
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Error reading CSV: {e}")
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
df.columns = df.columns.str.strip()
|
|
40
|
+
if "time" not in df.columns:
|
|
41
|
+
print(f"Error: {filepath} must contain a 'time' column.")
|
|
42
|
+
sys.exit(1)
|
|
43
|
+
|
|
44
|
+
# Apply Time Filtering
|
|
45
|
+
if start is not None:
|
|
46
|
+
df = df[df["time"] >= start]
|
|
47
|
+
if end is not None:
|
|
48
|
+
df = df[df["time"] <= end]
|
|
49
|
+
|
|
50
|
+
if df.empty:
|
|
51
|
+
# Warn but return empty so we can plot "No Data" placeholder
|
|
52
|
+
print(f"Warning: No data found in {filepath} for the specified time range.")
|
|
53
|
+
|
|
54
|
+
return df
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_color_values_and_meta(
|
|
58
|
+
df: pd.DataFrame, color_col: Optional[str]
|
|
59
|
+
) -> Tuple[np.ndarray, str, str]:
|
|
60
|
+
"""
|
|
61
|
+
Extracts raw values for coloring.
|
|
62
|
+
Does NOT normalize (normalization happens globally later).
|
|
63
|
+
"""
|
|
64
|
+
if df.empty or not color_col:
|
|
65
|
+
return np.array([]), "", ""
|
|
66
|
+
|
|
67
|
+
if color_col.lower() == "time":
|
|
68
|
+
# Time is relative to the start of THIS dataset
|
|
69
|
+
t_min = df["time"].min()
|
|
70
|
+
values = (df["time"] - t_min).to_numpy()
|
|
71
|
+
label = "Time (s from start)"
|
|
72
|
+
cmap = "turbo"
|
|
73
|
+
elif color_col in df.columns:
|
|
74
|
+
# Bands: Log10 Transform
|
|
75
|
+
raw = df[color_col].to_numpy().copy()
|
|
76
|
+
# Mask <= 0
|
|
77
|
+
raw[raw <= 0] = np.nan
|
|
78
|
+
values = np.log10(raw)
|
|
79
|
+
label = f"{color_col} Magnitude (Log10)"
|
|
80
|
+
cmap = "plasma"
|
|
81
|
+
else:
|
|
82
|
+
print(f"Warning: Color column '{color_col}' not found.")
|
|
83
|
+
return np.array([]), "", ""
|
|
84
|
+
|
|
85
|
+
return values, label, cmap
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def make_super_multi_plot(
|
|
89
|
+
df_a: pd.DataFrame,
|
|
90
|
+
df_b: pd.DataFrame,
|
|
91
|
+
label_a: str,
|
|
92
|
+
label_b: str,
|
|
93
|
+
x_col: str,
|
|
94
|
+
y_col: str,
|
|
95
|
+
ts_bands: List[str],
|
|
96
|
+
output_path: str,
|
|
97
|
+
color_col: Optional[str] = None,
|
|
98
|
+
log_scale: bool = True,
|
|
99
|
+
title: Optional[str] = None,
|
|
100
|
+
):
|
|
101
|
+
"""Generates the side-by-side dashboard."""
|
|
102
|
+
|
|
103
|
+
# --- 1. Global Scale Calculations ---
|
|
104
|
+
|
|
105
|
+
# A. Scatter Axis Limits (X/Y)
|
|
106
|
+
concat_x = []
|
|
107
|
+
concat_y = []
|
|
108
|
+
if not df_a.empty:
|
|
109
|
+
concat_x.append(df_a[x_col])
|
|
110
|
+
concat_y.append(df_a[y_col])
|
|
111
|
+
if not df_b.empty:
|
|
112
|
+
concat_x.append(df_b[x_col])
|
|
113
|
+
concat_y.append(df_b[y_col])
|
|
114
|
+
|
|
115
|
+
all_x = pd.concat(concat_x) if concat_x else pd.Series(dtype=float)
|
|
116
|
+
all_y = pd.concat(concat_y) if concat_y else pd.Series(dtype=float)
|
|
117
|
+
|
|
118
|
+
if not all_x.empty:
|
|
119
|
+
if log_scale:
|
|
120
|
+
valid_x = all_x[all_x > 0]
|
|
121
|
+
valid_y = all_y[all_y > 0]
|
|
122
|
+
x_min = valid_x.min() if not valid_x.empty else 0.1
|
|
123
|
+
x_max = valid_x.max() if not valid_x.empty else 10.0
|
|
124
|
+
y_min = valid_y.min() if not valid_y.empty else 0.1
|
|
125
|
+
y_max = valid_y.max() if not valid_y.empty else 10.0
|
|
126
|
+
else:
|
|
127
|
+
x_min, x_max = all_x.min(), all_x.max()
|
|
128
|
+
y_min, y_max = all_y.min(), all_y.max()
|
|
129
|
+
else:
|
|
130
|
+
x_min, x_max, y_min, y_max = 0.1, 10, 0.1, 10
|
|
131
|
+
|
|
132
|
+
# B. Color Normalization
|
|
133
|
+
c_a, c_lbl, cmap = get_color_values_and_meta(df_a, color_col)
|
|
134
|
+
c_b, _, _ = get_color_values_and_meta(df_b, color_col)
|
|
135
|
+
|
|
136
|
+
norm = None
|
|
137
|
+
if color_col:
|
|
138
|
+
c_all = np.concatenate([c_a, c_b])
|
|
139
|
+
# Filter NaNs (from Log10 <= 0)
|
|
140
|
+
c_valid = c_all[~np.isnan(c_all)]
|
|
141
|
+
if len(c_valid) > 0:
|
|
142
|
+
if color_col.lower() == "time":
|
|
143
|
+
# For time, we actually want independent scaling usually?
|
|
144
|
+
# No, typically 0-3600s vs 0-3600s. Global norm works.
|
|
145
|
+
norm = Normalize(vmin=c_valid.min(), vmax=c_valid.max())
|
|
146
|
+
else:
|
|
147
|
+
# Bands
|
|
148
|
+
norm = Normalize(vmin=c_valid.min(), vmax=c_valid.max())
|
|
149
|
+
else:
|
|
150
|
+
norm = Normalize(vmin=0, vmax=1)
|
|
151
|
+
|
|
152
|
+
# --- 2. Figure Layout ---
|
|
153
|
+
fig = plt.figure(figsize=(20, 14))
|
|
154
|
+
|
|
155
|
+
# Outer Grid: 1 Row, 2 Cols (Left Dataset, Right Dataset)
|
|
156
|
+
gs_outer = fig.add_gridspec(1, 2, wspace=0.15)
|
|
157
|
+
|
|
158
|
+
datasets = [(df_a, label_a, c_a), (df_b, label_b, c_b)]
|
|
159
|
+
|
|
160
|
+
# Store axes for potential shared linking
|
|
161
|
+
scatter_axes = []
|
|
162
|
+
ts_axes_matrix = [] # Rows = bands, Cols = datasets
|
|
163
|
+
|
|
164
|
+
for i, (df, label, c_vals) in enumerate(datasets):
|
|
165
|
+
# Inner Grid: 2 Rows (Scatter, TimeSeries), Height Ratio 1.3:1
|
|
166
|
+
gs_inner = gs_outer[i].subgridspec(2, 1, height_ratios=[1.3, 1.0], hspace=0.15)
|
|
167
|
+
|
|
168
|
+
# --- Top: Scatter Plot ---
|
|
169
|
+
ax_sc = fig.add_subplot(gs_inner[0])
|
|
170
|
+
scatter_axes.append(ax_sc)
|
|
171
|
+
|
|
172
|
+
if not df.empty:
|
|
173
|
+
c_arg = c_vals if color_col else "tab:blue"
|
|
174
|
+
sc = ax_sc.scatter(
|
|
175
|
+
df[x_col],
|
|
176
|
+
df[y_col],
|
|
177
|
+
c=c_arg,
|
|
178
|
+
cmap=cmap if color_col else None,
|
|
179
|
+
norm=norm,
|
|
180
|
+
s=15, alpha=0.6, edgecolors="none"
|
|
181
|
+
)
|
|
182
|
+
else:
|
|
183
|
+
ax_sc.text(0.5, 0.5, "No Data", ha='center')
|
|
184
|
+
|
|
185
|
+
ax_sc.set_title(f"{label}\nPhase Space ({x_col} vs {y_col})", fontweight='bold', fontsize=12)
|
|
186
|
+
ax_sc.set_xlabel(f"{x_col} Drift", fontsize=10)
|
|
187
|
+
if i == 0:
|
|
188
|
+
ax_sc.set_ylabel(f"{y_col} Drift", fontsize=10)
|
|
189
|
+
else:
|
|
190
|
+
# Hide Y labels for the right plot to clean up, if they share axis
|
|
191
|
+
# But we set limits manually, so maybe keep ticks but remove label?
|
|
192
|
+
# Let's keep ticks for readability.
|
|
193
|
+
pass
|
|
194
|
+
|
|
195
|
+
if log_scale:
|
|
196
|
+
ax_sc.set_xscale("log")
|
|
197
|
+
ax_sc.set_yscale("log")
|
|
198
|
+
|
|
199
|
+
ax_sc.grid(True, which="both", alpha=0.3)
|
|
200
|
+
ax_sc.set_xlim(x_min, x_max)
|
|
201
|
+
ax_sc.set_ylim(y_min, y_max)
|
|
202
|
+
|
|
203
|
+
# --- Bottom: Stacked Time Series ---
|
|
204
|
+
n_bands = len(ts_bands)
|
|
205
|
+
gs_ts = gs_inner[1].subgridspec(n_bands, 1, hspace=0.0)
|
|
206
|
+
|
|
207
|
+
# Calculate Relative Time
|
|
208
|
+
if not df.empty:
|
|
209
|
+
t0 = df["time"].min()
|
|
210
|
+
t_rel = df["time"] - t0
|
|
211
|
+
|
|
212
|
+
# Heuristic Units
|
|
213
|
+
dur = t_rel.max()
|
|
214
|
+
if dur > 3600:
|
|
215
|
+
t_plot = t_rel / 3600.0
|
|
216
|
+
t_unit = "Hours"
|
|
217
|
+
elif dur > 60:
|
|
218
|
+
t_plot = t_rel / 60.0
|
|
219
|
+
t_unit = "Minutes"
|
|
220
|
+
else:
|
|
221
|
+
t_plot = t_rel
|
|
222
|
+
t_unit = "Seconds"
|
|
223
|
+
else:
|
|
224
|
+
t_plot = []
|
|
225
|
+
t_unit = "Seconds"
|
|
226
|
+
|
|
227
|
+
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']
|
|
228
|
+
|
|
229
|
+
current_col_ts_axes = []
|
|
230
|
+
|
|
231
|
+
for b_idx, band in enumerate(ts_bands):
|
|
232
|
+
ax_ts = fig.add_subplot(gs_ts[b_idx])
|
|
233
|
+
current_col_ts_axes.append(ax_ts)
|
|
234
|
+
|
|
235
|
+
if not df.empty and band in df.columns:
|
|
236
|
+
col = colors[b_idx % len(colors)]
|
|
237
|
+
ax_ts.plot(t_plot, df[band], color=col, linewidth=1.2)
|
|
238
|
+
|
|
239
|
+
# In-plot label
|
|
240
|
+
ax_ts.text(0.01, 0.8, band, transform=ax_ts.transAxes,
|
|
241
|
+
fontweight='bold', fontsize=9,
|
|
242
|
+
bbox=dict(facecolor='white', alpha=0.7, edgecolor='none'))
|
|
243
|
+
|
|
244
|
+
if log_scale:
|
|
245
|
+
ax_ts.set_yscale("log")
|
|
246
|
+
|
|
247
|
+
ax_ts.grid(True, which="both", alpha=0.3)
|
|
248
|
+
|
|
249
|
+
# Formatting
|
|
250
|
+
if i == 0:
|
|
251
|
+
ax_ts.set_ylabel(r"$\|\Omega\|$", fontsize=9)
|
|
252
|
+
|
|
253
|
+
if b_idx < n_bands - 1:
|
|
254
|
+
ax_ts.set_xticklabels([])
|
|
255
|
+
else:
|
|
256
|
+
ax_ts.set_xlabel(f"Time ({t_unit} from start)", fontsize=10)
|
|
257
|
+
|
|
258
|
+
ts_axes_matrix.append(current_col_ts_axes)
|
|
259
|
+
|
|
260
|
+
# --- 3. Link TS Y-Axes across columns ---
|
|
261
|
+
# Row 0 Left linked to Row 0 Right, etc.
|
|
262
|
+
# ts_axes_matrix has 2 lists (Left Col Axes, Right Col Axes)
|
|
263
|
+
if len(ts_axes_matrix) == 2:
|
|
264
|
+
left_ts = ts_axes_matrix[0]
|
|
265
|
+
right_ts = ts_axes_matrix[1]
|
|
266
|
+
for ax_l, ax_r in zip(left_ts, right_ts):
|
|
267
|
+
# Share Y limits
|
|
268
|
+
# We explicitly get limits from both and set the max
|
|
269
|
+
# (Matplotlib sharey might hide ticks on the right, we want visible ticks but locked scale)
|
|
270
|
+
yl_l = ax_l.get_ylim()
|
|
271
|
+
yl_r = ax_r.get_ylim()
|
|
272
|
+
|
|
273
|
+
# For log scale, be careful with 0 or infinites
|
|
274
|
+
if log_scale:
|
|
275
|
+
# Just let sharey handle it or manually compute?
|
|
276
|
+
# sharey is easiest
|
|
277
|
+
ax_l.sharey(ax_r)
|
|
278
|
+
else:
|
|
279
|
+
new_min = min(yl_l[0], yl_r[0])
|
|
280
|
+
new_max = max(yl_l[1], yl_r[1])
|
|
281
|
+
ax_l.set_ylim(new_min, new_max)
|
|
282
|
+
ax_r.set_ylim(new_min, new_max)
|
|
283
|
+
|
|
284
|
+
# --- 4. Shared Colorbar (if applicable) ---
|
|
285
|
+
if color_col and norm:
|
|
286
|
+
# Create a dummy ScalarMappable
|
|
287
|
+
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
|
|
288
|
+
sm.set_array([])
|
|
289
|
+
|
|
290
|
+
# Place colorbar on the far right
|
|
291
|
+
fig.subplots_adjust(right=0.9)
|
|
292
|
+
cbar_ax = fig.add_axes([0.92, 0.55, 0.015, 0.3]) # Positioned next to Scatter plots
|
|
293
|
+
fig.colorbar(sm, cax=cbar_ax, label=c_lbl)
|
|
294
|
+
|
|
295
|
+
if title:
|
|
296
|
+
fig.suptitle(title, fontsize=18, y=0.95)
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight')
|
|
300
|
+
print(f"Double Super Plot saved to: {output_path}")
|
|
301
|
+
except Exception as e:
|
|
302
|
+
print(f"Error saving plot: {e}")
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def parse_args():
|
|
306
|
+
parser = argparse.ArgumentParser(description="Double Super Drift Visualization.")
|
|
307
|
+
|
|
308
|
+
parser.add_argument("file_a", type=str, help="First CSV file")
|
|
309
|
+
parser.add_argument("file_b", type=str, help="Second CSV file")
|
|
310
|
+
|
|
311
|
+
parser.add_argument("--label-a", type=str, default="Dataset A", help="Label for Left")
|
|
312
|
+
parser.add_argument("--label-b", type=str, default="Dataset B", help="Label for Right")
|
|
313
|
+
|
|
314
|
+
# Zoom Options - Independent for A and B
|
|
315
|
+
parser.add_argument("--start-a", type=float, default=None, help="Start GPS for File A")
|
|
316
|
+
parser.add_argument("--end-a", type=float, default=None, help="End GPS for File A")
|
|
317
|
+
parser.add_argument("--start-b", type=float, default=None, help="Start GPS for File B")
|
|
318
|
+
parser.add_argument("--end-b", type=float, default=None, help="End GPS for File B")
|
|
319
|
+
|
|
320
|
+
# Plotting Options
|
|
321
|
+
parser.add_argument("--x-band", type=str, default="v_low", help="Scatter X")
|
|
322
|
+
parser.add_argument("--y-band", type=str, default="v_mid", help="Scatter Y")
|
|
323
|
+
parser.add_argument("--ts-bands", type=str, default="v_low,v_mid,v_high,total", help="TS bands list")
|
|
324
|
+
|
|
325
|
+
parser.add_argument(
|
|
326
|
+
"--color-by", type=str, default="time",
|
|
327
|
+
help="Column to color Scatter by (default: time)"
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
parser.add_argument("-o", "--output", type=str, default="drift_super_multi.png", help="Output filename")
|
|
331
|
+
parser.add_argument("--linear", action="store_true", help="Use linear scales")
|
|
332
|
+
parser.add_argument("--title", type=str, default=None, help="Global Title")
|
|
333
|
+
|
|
334
|
+
return parser.parse_args()
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def main():
|
|
338
|
+
args = parse_args()
|
|
339
|
+
|
|
340
|
+
df_a = load_data(args.file_a, start=args.start_a, end=args.end_a)
|
|
341
|
+
df_b = load_data(args.file_b, start=args.start_b, end=args.end_b)
|
|
342
|
+
|
|
343
|
+
ts_bands_list = [b.strip() for b in args.ts_bands.split(",")]
|
|
344
|
+
|
|
345
|
+
make_super_multi_plot(
|
|
346
|
+
df_a, df_b,
|
|
347
|
+
label_a=args.label_a,
|
|
348
|
+
label_b=args.label_b,
|
|
349
|
+
x_col=args.x_band,
|
|
350
|
+
y_col=args.y_band,
|
|
351
|
+
ts_bands=ts_bands_list,
|
|
352
|
+
output_path=args.output,
|
|
353
|
+
color_col=args.color_by,
|
|
354
|
+
log_scale=not args.linear,
|
|
355
|
+
title=args.title
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
if __name__ == "__main__":
|
|
360
|
+
main()
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Drift Time-Series Visualization Tool.
|
|
3
|
+
|
|
4
|
+
Generates a multi-panel "Strip Chart" of drift velocities over time.
|
|
5
|
+
Useful for correlating glitches across different frequency bands.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Default: Stacked subplots (one per band) with shared X-axis.
|
|
9
|
+
- Optional: Overlay mode (all bands on one plot) via --overlay.
|
|
10
|
+
- Zoom: --start and --end filters.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import argparse
|
|
14
|
+
import sys
|
|
15
|
+
from typing import Optional, List
|
|
16
|
+
|
|
17
|
+
import matplotlib.pyplot as plt
|
|
18
|
+
import pandas as pd
|
|
19
|
+
import numpy as np
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def load_data(
|
|
23
|
+
filepath: str, start: Optional[float] = None, end: Optional[float] = None
|
|
24
|
+
) -> pd.DataFrame:
|
|
25
|
+
"""Loads drift data from CSV and applies time filtering."""
|
|
26
|
+
try:
|
|
27
|
+
df = pd.read_csv(filepath)
|
|
28
|
+
except FileNotFoundError:
|
|
29
|
+
print(f"Error: File not found at {filepath}")
|
|
30
|
+
sys.exit(1)
|
|
31
|
+
except Exception as e:
|
|
32
|
+
print(f"Error reading CSV: {e}")
|
|
33
|
+
sys.exit(1)
|
|
34
|
+
|
|
35
|
+
df.columns = df.columns.str.strip()
|
|
36
|
+
|
|
37
|
+
if "time" not in df.columns:
|
|
38
|
+
print("Error: CSV must contain a 'time' column.")
|
|
39
|
+
sys.exit(1)
|
|
40
|
+
|
|
41
|
+
# --- Apply Time Filtering (Zoom) ---
|
|
42
|
+
if start is not None:
|
|
43
|
+
df = df[df["time"] >= start]
|
|
44
|
+
if end is not None:
|
|
45
|
+
df = df[df["time"] <= end]
|
|
46
|
+
|
|
47
|
+
if df.empty:
|
|
48
|
+
print(f"Warning: No data found in {filepath} for the specified time range.")
|
|
49
|
+
sys.exit(1)
|
|
50
|
+
|
|
51
|
+
return df
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def make_timeseries_plot(
|
|
55
|
+
df: pd.DataFrame,
|
|
56
|
+
bands: List[str],
|
|
57
|
+
output_path: str,
|
|
58
|
+
overlay: bool = False,
|
|
59
|
+
log_scale: bool = True,
|
|
60
|
+
title: Optional[str] = None,
|
|
61
|
+
):
|
|
62
|
+
"""
|
|
63
|
+
Generates the time-series plot.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
overlay (bool): If False (Default), creates stacked subplots (separate axes).
|
|
67
|
+
If True, plots all bands on a single axis.
|
|
68
|
+
"""
|
|
69
|
+
# Validate bands
|
|
70
|
+
valid_bands = [b for b in bands if b in df.columns]
|
|
71
|
+
if not valid_bands:
|
|
72
|
+
print(f"Error: None of the requested bands {bands} found in CSV.")
|
|
73
|
+
print(f"Available columns: {list(df.columns)}")
|
|
74
|
+
sys.exit(1)
|
|
75
|
+
|
|
76
|
+
# Prepare Time Axis relative to the zoomed window
|
|
77
|
+
t0 = df["time"].min()
|
|
78
|
+
duration = df["time"].max() - t0
|
|
79
|
+
|
|
80
|
+
# Smart Time Units
|
|
81
|
+
if duration > 172800: # > 2 days
|
|
82
|
+
t_data = (df["time"] - t0) / 86400.0
|
|
83
|
+
t_unit = "Days"
|
|
84
|
+
elif duration > 3600: # > 1 hour
|
|
85
|
+
t_data = (df["time"] - t0) / 3600.0
|
|
86
|
+
t_unit = "Hours"
|
|
87
|
+
elif duration > 60:
|
|
88
|
+
t_data = (df["time"] - t0) / 60.0
|
|
89
|
+
t_unit = "Minutes"
|
|
90
|
+
else:
|
|
91
|
+
t_data = df["time"] - t0
|
|
92
|
+
t_unit = "Seconds"
|
|
93
|
+
|
|
94
|
+
# --- Plot Setup ---
|
|
95
|
+
if overlay:
|
|
96
|
+
# Single Axis Mode
|
|
97
|
+
fig, ax = plt.subplots(figsize=(12, 6))
|
|
98
|
+
axes = [ax] * len(valid_bands)
|
|
99
|
+
else:
|
|
100
|
+
# Stacked Subplots Mode (Default)
|
|
101
|
+
# sharex=True locks the time axis zoom for all plots
|
|
102
|
+
fig, axes = plt.subplots(
|
|
103
|
+
nrows=len(valid_bands),
|
|
104
|
+
ncols=1,
|
|
105
|
+
figsize=(12, 3 * len(valid_bands)),
|
|
106
|
+
sharex=True,
|
|
107
|
+
constrained_layout=True,
|
|
108
|
+
)
|
|
109
|
+
# Handle single band case where subplots returns an Ax object, not list
|
|
110
|
+
if len(valid_bands) == 1:
|
|
111
|
+
axes = [axes]
|
|
112
|
+
|
|
113
|
+
# Standard colors cycle
|
|
114
|
+
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd"]
|
|
115
|
+
|
|
116
|
+
for i, (ax, band) in enumerate(zip(axes, valid_bands)):
|
|
117
|
+
color = colors[i % len(colors)]
|
|
118
|
+
|
|
119
|
+
ax.plot(t_data, df[band], label=band, color=color, linewidth=1.5, alpha=0.9)
|
|
120
|
+
|
|
121
|
+
if log_scale:
|
|
122
|
+
ax.set_yscale("log")
|
|
123
|
+
|
|
124
|
+
ax.grid(True, which="both", alpha=0.3)
|
|
125
|
+
|
|
126
|
+
# In stacked mode, give each plot its own legend and label
|
|
127
|
+
if not overlay:
|
|
128
|
+
ax.set_ylabel(r"$\|\Omega\|$")
|
|
129
|
+
ax.legend(loc="upper right", frameon=True)
|
|
130
|
+
ax.set_title(f"Band: {band}", fontsize=10, loc="left", pad=2)
|
|
131
|
+
|
|
132
|
+
# --- Final Formatting ---
|
|
133
|
+
if overlay:
|
|
134
|
+
axes[0].set_ylabel(r"Drift Velocity $\|\Omega\|$")
|
|
135
|
+
axes[0].set_xlabel(f"Time ({t_unit} from GPS {t0:.1f})")
|
|
136
|
+
axes[0].legend(loc="upper right")
|
|
137
|
+
else:
|
|
138
|
+
# Only label the bottom X-axis in stacked mode
|
|
139
|
+
axes[-1].set_xlabel(f"Time ({t_unit} from GPS {t0:.1f})")
|
|
140
|
+
|
|
141
|
+
if title:
|
|
142
|
+
fig.suptitle(title, fontsize=16)
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
plt.savefig(output_path, dpi=150)
|
|
146
|
+
print(f"Time-series plot saved to: {output_path}")
|
|
147
|
+
except Exception as e:
|
|
148
|
+
print(f"Error saving plot: {e}")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def parse_args():
|
|
152
|
+
parser = argparse.ArgumentParser(
|
|
153
|
+
description="Visualize Geometric Drift Time-Series."
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
parser.add_argument("input_file", type=str, help="Path to input CSV file")
|
|
157
|
+
|
|
158
|
+
parser.add_argument(
|
|
159
|
+
"--bands",
|
|
160
|
+
type=str,
|
|
161
|
+
default="v_low,v_mid,v_high",
|
|
162
|
+
help="Comma-separated list of bands to plot (default: v_low,v_mid,v_high)",
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
parser.add_argument(
|
|
166
|
+
"-o",
|
|
167
|
+
"--output",
|
|
168
|
+
type=str,
|
|
169
|
+
default="drift_timeseries.png",
|
|
170
|
+
help="Output filename",
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Zoom Options
|
|
174
|
+
parser.add_argument(
|
|
175
|
+
"--start", type=float, default=None, help="Start GPS Time (Zoom)"
|
|
176
|
+
)
|
|
177
|
+
parser.add_argument("--end", type=float, default=None, help="End GPS Time (Zoom)")
|
|
178
|
+
|
|
179
|
+
parser.add_argument(
|
|
180
|
+
"--overlay",
|
|
181
|
+
action="store_true",
|
|
182
|
+
help="Plot all bands on one axis (Default is Stacked)",
|
|
183
|
+
)
|
|
184
|
+
parser.add_argument(
|
|
185
|
+
"--linear", action="store_true", help="Use linear Y-scale (Default is Log)"
|
|
186
|
+
)
|
|
187
|
+
parser.add_argument("--title", type=str, default=None, help="Custom plot title")
|
|
188
|
+
|
|
189
|
+
return parser.parse_args()
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def main():
|
|
193
|
+
args = parse_args()
|
|
194
|
+
|
|
195
|
+
bands_list = [b.strip() for b in args.bands.split(",")]
|
|
196
|
+
|
|
197
|
+
df = load_data(args.input_file, start=args.start, end=args.end)
|
|
198
|
+
|
|
199
|
+
make_timeseries_plot(
|
|
200
|
+
df,
|
|
201
|
+
bands=bands_list,
|
|
202
|
+
output_path=args.output,
|
|
203
|
+
overlay=args.overlay,
|
|
204
|
+
log_scale=not args.linear,
|
|
205
|
+
title=args.title,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
if __name__ == "__main__":
|
|
210
|
+
main()
|
sgndrift/psd/__init__.py
ADDED
|
File without changes
|
sgndrift/psd/drift.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core utility functions for calculating Geometric Drift (Fisher Information Velocity).
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def calculate_fisher_velocity(
|
|
13
|
+
current_psd: np.ndarray,
|
|
14
|
+
previous_psd: np.ndarray,
|
|
15
|
+
dt: float,
|
|
16
|
+
frequencies: np.ndarray,
|
|
17
|
+
delta_f: float,
|
|
18
|
+
bands: Optional[Dict[str, Tuple[float, float]]] = None,
|
|
19
|
+
) -> Dict[str, float]:
|
|
20
|
+
"""
|
|
21
|
+
Computes the Fisher Information Velocity (Geometric Drift) between two PSD snapshots.
|
|
22
|
+
|
|
23
|
+
The metric is defined as the norm of the time-derivative of the PSD,
|
|
24
|
+
weighted by the Fisher Information metric (1/S^2).
|
|
25
|
+
|
|
26
|
+
Formula:
|
|
27
|
+
v^2 = Integral [ (dS/dt / S)^2 ] df
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
current_psd: Array of PSD power values at time t.
|
|
31
|
+
previous_psd: Array of PSD power values at time t - dt.
|
|
32
|
+
dt: Time difference in seconds.
|
|
33
|
+
frequencies: Array of frequency bins corresponding to the PSDs.
|
|
34
|
+
delta_f: Frequency bin width (Hz).
|
|
35
|
+
bands: Dictionary of frequency bands {'name': (f_min, f_max)} to compute.
|
|
36
|
+
If None, computes 'total' over the entire provided spectrum.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dictionary mapping band names to drift velocity values.
|
|
40
|
+
"""
|
|
41
|
+
if dt <= 0:
|
|
42
|
+
raise ValueError("Time difference dt must be positive.")
|
|
43
|
+
|
|
44
|
+
# 1. Compute Time Derivative: S_dot = (S_curr - S_prev) / dt
|
|
45
|
+
diff = (current_psd - previous_psd) / dt
|
|
46
|
+
|
|
47
|
+
# 2. Compute Integrand: (S_dot / S)^2
|
|
48
|
+
# Handle division by zero or negative/zero power values safely
|
|
49
|
+
valid = current_psd > 0
|
|
50
|
+
integrand = np.zeros_like(current_psd)
|
|
51
|
+
integrand[valid] = (diff[valid] / current_psd[valid]) ** 2
|
|
52
|
+
|
|
53
|
+
# 3. Integrate over bands
|
|
54
|
+
drift_results = {}
|
|
55
|
+
bands_to_compute = bands.copy() if bands else {}
|
|
56
|
+
|
|
57
|
+
if not bands_to_compute:
|
|
58
|
+
# Default to total band if none provided
|
|
59
|
+
# Extend upper bound to ensure coverage
|
|
60
|
+
bands_to_compute = {"total": (frequencies[0], frequencies[-1] + delta_f * 0.5)}
|
|
61
|
+
|
|
62
|
+
for name, (fmin, fmax) in bands_to_compute.items():
|
|
63
|
+
# Mask frequencies within the band
|
|
64
|
+
mask = (frequencies >= fmin) & (frequencies < fmax)
|
|
65
|
+
|
|
66
|
+
if np.any(mask):
|
|
67
|
+
# Integral [ ... ] df
|
|
68
|
+
integral = np.sum(integrand[mask]) * delta_f
|
|
69
|
+
drift_results[name] = np.sqrt(integral)
|
|
70
|
+
else:
|
|
71
|
+
drift_results[name] = 0.0
|
|
72
|
+
|
|
73
|
+
return drift_results
|