pydartdiags 0.0.43__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydartdiags might be problematic. Click here for more details.

Files changed (28) hide show
  1. pydartdiags-0.5.1/MANIFEST.in +2 -0
  2. {pydartdiags-0.0.43/src/pydartdiags.egg-info → pydartdiags-0.5.1}/PKG-INFO +10 -5
  3. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/README.md +3 -3
  4. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/pyproject.toml +3 -2
  5. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/setup.py +7 -2
  6. pydartdiags-0.5.1/src/pydartdiags/matplots/matplots.py +423 -0
  7. pydartdiags-0.5.1/src/pydartdiags/obs_sequence/composite_types.yaml +35 -0
  8. pydartdiags-0.5.1/src/pydartdiags/obs_sequence/obs_sequence.py +1238 -0
  9. pydartdiags-0.5.1/src/pydartdiags/plots/__init__.py +0 -0
  10. pydartdiags-0.5.1/src/pydartdiags/plots/plots.py +191 -0
  11. pydartdiags-0.5.1/src/pydartdiags/stats/__init__.py +0 -0
  12. pydartdiags-0.5.1/src/pydartdiags/stats/stats.py +432 -0
  13. {pydartdiags-0.0.43 → pydartdiags-0.5.1/src/pydartdiags.egg-info}/PKG-INFO +10 -5
  14. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/src/pydartdiags.egg-info/SOURCES.txt +7 -1
  15. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/src/pydartdiags.egg-info/requires.txt +1 -0
  16. pydartdiags-0.5.1/tests/test_obs_sequence.py +879 -0
  17. pydartdiags-0.5.1/tests/test_stats.py +783 -0
  18. pydartdiags-0.0.43/src/pydartdiags/obs_sequence/obs_sequence.py +0 -825
  19. pydartdiags-0.0.43/src/pydartdiags/plots/plots.py +0 -339
  20. pydartdiags-0.0.43/tests/test_obs_sequence.py +0 -225
  21. pydartdiags-0.0.43/tests/test_plots.py +0 -52
  22. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/LICENSE +0 -0
  23. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/setup.cfg +0 -0
  24. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/src/pydartdiags/__init__.py +0 -0
  25. {pydartdiags-0.0.43/src/pydartdiags/obs_sequence → pydartdiags-0.5.1/src/pydartdiags/matplots}/__init__.py +0 -0
  26. {pydartdiags-0.0.43/src/pydartdiags/plots → pydartdiags-0.5.1/src/pydartdiags/obs_sequence}/__init__.py +0 -0
  27. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/src/pydartdiags.egg-info/dependency_links.txt +0 -0
  28. {pydartdiags-0.0.43 → pydartdiags-0.5.1}/src/pydartdiags.egg-info/top_level.txt +0 -0
@@ -0,0 +1,2 @@
1
+ include pydartdiags/obs_sequence/composite_types.yaml
2
+
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: pydartdiags
3
- Version: 0.0.43
3
+ Version: 0.5.1
4
4
  Summary: Observation Sequence Diagnostics for DART
5
5
  Home-page: https://github.com/NCAR/pyDARTdiags.git
6
6
  Author: Helen Kershaw
@@ -18,22 +18,27 @@ Requires-Dist: pandas>=2.2.0
18
18
  Requires-Dist: numpy>=1.26
19
19
  Requires-Dist: plotly>=5.22.0
20
20
  Requires-Dist: pyyaml>=6.0.2
21
+ Requires-Dist: matplotlib>=3.9.4
22
+ Dynamic: author
23
+ Dynamic: home-page
24
+ Dynamic: license-file
25
+ Dynamic: requires-python
21
26
 
22
27
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
23
28
  [![codecov](https://codecov.io/gh/NCAR/pyDARTdiags/graph/badge.svg?token=VK55SQZSVD)](https://codecov.io/gh/NCAR/pyDARTdiags)
24
29
  [![PyPI version](https://badge.fury.io/py/pydartdiags.svg)](https://pypi.org/project/pydartdiags/)
25
-
30
+ [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
26
31
 
27
32
  # pyDARTdiags
28
33
 
29
- pyDARTdiags is a Python library for obsevation space diagnostics for the Data Assimilation Research Testbed ([DART](https://github.com/NCAR/DART)).
34
+ pyDARTdiags is a Python library for observation space diagnostics for the Data Assimilation Research Testbed ([DART](https://github.com/NCAR/DART)).
30
35
 
31
36
  pyDARTdiags is under initial development, so please use caution.
32
37
  The MATLAB [observation space diagnostics](https://docs.dart.ucar.edu/en/latest/guide/matlab-observation-space.html) are available through [DART](https://github.com/NCAR/DART).
33
38
 
34
39
 
35
40
  pyDARTdiags can be installed through pip: https://pypi.org/project/pydartdiags/
36
- Documenation : https://ncar.github.io/pyDARTdiags/
41
+ Documentation : https://ncar.github.io/pyDARTdiags/
37
42
 
38
43
  ## Contributing
39
44
  Contributions are welcome! If you have a feature request, bug report, or a suggestion, please open an issue on our GitHub repository.
@@ -1,18 +1,18 @@
1
1
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
2
2
  [![codecov](https://codecov.io/gh/NCAR/pyDARTdiags/graph/badge.svg?token=VK55SQZSVD)](https://codecov.io/gh/NCAR/pyDARTdiags)
3
3
  [![PyPI version](https://badge.fury.io/py/pydartdiags.svg)](https://pypi.org/project/pydartdiags/)
4
-
4
+ [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
5
5
 
6
6
  # pyDARTdiags
7
7
 
8
- pyDARTdiags is a Python library for obsevation space diagnostics for the Data Assimilation Research Testbed ([DART](https://github.com/NCAR/DART)).
8
+ pyDARTdiags is a Python library for observation space diagnostics for the Data Assimilation Research Testbed ([DART](https://github.com/NCAR/DART)).
9
9
 
10
10
  pyDARTdiags is under initial development, so please use caution.
11
11
  The MATLAB [observation space diagnostics](https://docs.dart.ucar.edu/en/latest/guide/matlab-observation-space.html) are available through [DART](https://github.com/NCAR/DART).
12
12
 
13
13
 
14
14
  pyDARTdiags can be installed through pip: https://pypi.org/project/pydartdiags/
15
- Documenation : https://ncar.github.io/pyDARTdiags/
15
+ Documentation : https://ncar.github.io/pyDARTdiags/
16
16
 
17
17
  ## Contributing
18
18
  Contributions are welcome! If you have a feature request, bug report, or a suggestion, please open an issue on our GitHub repository.
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "pydartdiags"
7
- version = "0.0.43"
7
+ version = "0.5.1"
8
8
  authors = [
9
9
  { name="Helen Kershaw", email="hkershaw@ucar.edu" },
10
10
  ]
@@ -20,7 +20,8 @@ dependencies = [
20
20
  "pandas>=2.2.0",
21
21
  "numpy>=1.26",
22
22
  "plotly>=5.22.0",
23
- "pyyaml>=6.0.2"
23
+ "pyyaml>=6.0.2",
24
+ "matplotlib>=3.9.4"
24
25
  ]
25
26
 
26
27
  [project.urls]
@@ -2,9 +2,13 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="pydartdiags",
5
- version="0.0.41",
5
+ version="0.5.1",
6
6
  packages=find_packages(where="src"),
7
7
  package_dir={"": "src"},
8
+ include_package_data=True,
9
+ package_data={
10
+ "pydartdiags": ["obs_sequence/composite_types.yaml"],
11
+ },
8
12
  author="Helen Kershaw",
9
13
  author_email="hkershaw@ucar.edu",
10
14
  description="Observation Sequence Diagnostics for DART",
@@ -21,6 +25,7 @@ setup(
21
25
  "pandas>=2.2.0",
22
26
  "numpy>=1.26",
23
27
  "plotly>=5.22.0",
24
- "pyyaml>=6.0.2"
28
+ "pyyaml>=6.0.2",
29
+ "matplotlib>=3.9.4"
25
30
  ],
26
31
  )
@@ -0,0 +1,423 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ from pydartdiags.stats import stats
3
+ import matplotlib.pyplot as plt
4
+
5
+ # HK @todo color scheme class
6
+ dacolors = ["green", "magenta", "orange", "red"]
7
+
8
+
9
+ def plot_profile(obs_seq, levels, type, bias=True, rmse=True, totalspread=True):
10
+ """
11
+ plot_profile on the levels for prior and posterior if present
12
+ - bias
13
+ - rmse
14
+ - totalspread
15
+
16
+ Args:
17
+ obs_seq, levels, type, bias=True, rmse=True, totalspread=True
18
+
19
+ Example:
20
+
21
+ type = 'RADIOSONDE_U_WIND_COMPONENT'
22
+ hPalevels = [0.0, 100.0, 150.0, 200.0, 250.0, 300.0, 400.0, 500.0, 700, 850, 925, 1000]
23
+ levels = [i * 100 for i in hPalevels]
24
+
25
+ plot_profile(obs_seq, levels, type, bias=True, rmse=True, totalspread=True)
26
+
27
+ """
28
+
29
+ # calculate stats and add to dataframe
30
+ stats.diag_stats(obs_seq.df)
31
+ qc0 = stats.select_used_qcs(obs_seq.df) # filter only qc=0, qc=2
32
+
33
+ # filter by type
34
+ qc0 = qc0[qc0["type"] == type]
35
+ if qc0.empty:
36
+ print(f"No rows found for type: {type}")
37
+ return None
38
+
39
+ all_df = obs_seq.df[obs_seq.df["type"] == type] # for possible vs used
40
+
41
+ if all_df["vert_unit"].nunique() > 1:
42
+ print(
43
+ f"Multiple vertical units found in the data: {all_df['vert_unit'].unique()} for type: {type}"
44
+ )
45
+ return None
46
+
47
+ vert_unit = all_df.iloc[0]["vert_unit"]
48
+ if vert_unit == "pressure (Pa)":
49
+ conversion = 0.01 # from Pa to hPa
50
+ else:
51
+ conversion = 1.0 # no conversion needed
52
+
53
+ # grand statistics
54
+ grand = stats.grand_statistics(qc0)
55
+
56
+ # add level bins to the dataframe
57
+ stats.bin_by_layer(all_df, levels, verticalUnit=vert_unit)
58
+ stats.bin_by_layer(qc0, levels, verticalUnit=vert_unit)
59
+
60
+ # aggregate by layer
61
+ df_pvu = stats.possible_vs_used_by_layer(all_df) # possible vs used
62
+ df = stats.layer_statistics(qc0) # bias, rmse, totalspread for plotting
63
+
64
+ # using rmse because mean_sqrt vs mean for bias (get a column with 0 obs)
65
+ if "prior_rmse" not in df.columns:
66
+ print(f"All layers empty for type: {type}")
67
+ return None
68
+
69
+ fig, ax1 = plt.subplots(figsize=(8, 8))
70
+
71
+ # convert to hPa HK @todo only for Pressure (Pa)
72
+ df["midpoint"] = df["midpoint"].astype(float)
73
+ df["midpoint"] = df["midpoint"] * conversion
74
+
75
+ df_pvu["midpoint"] = df_pvu["midpoint"].astype(float)
76
+ df_pvu["midpoint"] = df_pvu["midpoint"] * conversion
77
+
78
+ # Add horizontal stripes alternating between gray and white to represent the vertical levels
79
+ left = df["vlevels"].apply(lambda x: x.left * conversion) # todo convert to HPa
80
+ right = df["vlevels"].apply(lambda x: x.right * conversion)
81
+ for i in range(len(left)):
82
+ color = "gray" if i % 2 == 0 else "white"
83
+ ax1.axhspan(left.iloc[i], right.iloc[i], color=color, alpha=0.3)
84
+
85
+ # Plot the 'bias' data on the first y-axis
86
+ if bias:
87
+ ax1.plot(
88
+ df["prior_bias"],
89
+ df["midpoint"],
90
+ color=dacolors[0],
91
+ marker=".",
92
+ linestyle="-",
93
+ label="prior bias",
94
+ )
95
+ bias_prior = grand.loc[0, "prior_bias"]
96
+ if "posterior_bias" in df.columns:
97
+ ax1.plot(
98
+ df["posterior_bias"],
99
+ df["midpoint"],
100
+ color=dacolors[0],
101
+ marker=".",
102
+ linestyle="--",
103
+ label="posterior bias",
104
+ )
105
+ bias_posterior = grand.loc[0, "posterior_bias"]
106
+ if rmse:
107
+ ax1.plot(
108
+ df["prior_rmse"],
109
+ df["midpoint"],
110
+ color=dacolors[1],
111
+ marker=".",
112
+ linestyle="-",
113
+ label="prior RMSE",
114
+ )
115
+ rmse_prior = grand.loc[0, "prior_rmse"]
116
+ if "posterior_rmse" in df.columns:
117
+ ax1.plot(
118
+ df["posterior_rmse"],
119
+ df["midpoint"],
120
+ color=dacolors[1],
121
+ marker=".",
122
+ linestyle="--",
123
+ label="posterior RMSE",
124
+ )
125
+ rmse_posterior = grand.loc[0, "posterior_rmse"]
126
+ if totalspread:
127
+ ax1.plot(
128
+ df["prior_totalspread"],
129
+ df["midpoint"],
130
+ color=dacolors[2],
131
+ marker=".",
132
+ linestyle="-",
133
+ label="prior totalspread",
134
+ )
135
+ totalspread_prior = grand.loc[0, "prior_totalspread"]
136
+ if "posterior_totalspread" in df.columns:
137
+ totalspread_posterior = grand.loc[0, "posterior_totalspread"]
138
+ ax1.plot(
139
+ df["posterior_totalspread"],
140
+ df["midpoint"],
141
+ color=dacolors[2],
142
+ marker=".",
143
+ linestyle="--",
144
+ label="posterior totalspread",
145
+ )
146
+
147
+ ax1.set_ylabel("hPa")
148
+ ax1.tick_params(axis="y")
149
+ ax1.set_yticks(df["midpoint"])
150
+ # ax1.set_yticklabels(df['midpoint'])
151
+
152
+ ax3 = ax1.twiny()
153
+ ax3.set_xlabel("# obs (o=possible; +=assimilated)", color=dacolors[-1])
154
+ ax3.tick_params(axis="x", colors=dacolors[-1])
155
+ ax3.plot(
156
+ df_pvu["possible"],
157
+ df_pvu["midpoint"],
158
+ color=dacolors[-1],
159
+ marker="o",
160
+ linestyle="",
161
+ markerfacecolor="none",
162
+ label="possible",
163
+ )
164
+ ax3.plot(
165
+ df_pvu["used"],
166
+ df_pvu["midpoint"],
167
+ color=dacolors[-1],
168
+ marker="+",
169
+ linestyle="",
170
+ label="possible",
171
+ )
172
+ ax3.set_xlim(left=0)
173
+
174
+ if vert_unit == "pressure (Pa)":
175
+ ax1.invert_yaxis()
176
+ ax1.set_title(type)
177
+ # Build the datalabel string
178
+ datalabel = []
179
+ if bias:
180
+ datalabel.append("bias")
181
+ if rmse:
182
+ datalabel.append("rmse")
183
+ if totalspread:
184
+ datalabel.append("totalspread")
185
+ ax1.set_xlabel(", ".join(datalabel))
186
+
187
+ lines1, labels1 = ax1.get_legend_handles_labels()
188
+ ax1.legend(lines1, labels1, loc="upper left", bbox_to_anchor=(1.05, 1))
189
+
190
+ ax1.text(
191
+ 0.6, -0.08, obs_seq.file, ha="center", va="center", transform=ax1.transAxes
192
+ )
193
+
194
+ # Add a text box with information below the legend
195
+ textstr = "Grand statistics:\n"
196
+ if bias:
197
+ textstr += f"prior_bias: {bias_prior:.7f}\n"
198
+ if rmse:
199
+ textstr += f"rmse_prior: {rmse_prior:.7f}\n"
200
+ if totalspread:
201
+ textstr += f"totalspread_prior: {totalspread_prior:.7f}\n"
202
+ if "posterior_bias" in df.columns:
203
+ if bias:
204
+ textstr += f"posterior_bias: {bias_posterior:.7f}\n"
205
+ if rmse:
206
+ textstr += f"rmse_posterior: {rmse_posterior:.7f}\n"
207
+ if totalspread:
208
+ textstr += f"totalspread_posterior: {totalspread_posterior:.7f}\n"
209
+
210
+ props = dict(boxstyle="round", facecolor="wheat", alpha=0.5)
211
+ ax1.text(
212
+ 1.05,
213
+ 0.5,
214
+ textstr,
215
+ transform=ax1.transAxes,
216
+ fontsize=10,
217
+ verticalalignment="top",
218
+ bbox=props,
219
+ )
220
+
221
+ plt.tight_layout()
222
+ plt.show()
223
+
224
+ return fig
225
+
226
+
227
+ def plot_rank_histogram(obs_seq, levels, type, ens_size):
228
+
229
+ qc0 = stats.select_used_qcs(obs_seq.df) # filter only qc=0, qc=2
230
+ qc0 = qc0[qc0["type"] == type] # filter by type
231
+ stats.bin_by_layer(qc0, levels) # bin by level
232
+
233
+ midpoints = qc0["midpoint"].unique()
234
+
235
+ for level in sorted(midpoints):
236
+
237
+ df = qc0[qc0["midpoint"] == level]
238
+
239
+ df = stats.calculate_rank(qc0)
240
+
241
+ if "posterior_rank" in df.columns:
242
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
243
+ else:
244
+ fig, ax1 = plt.subplots()
245
+
246
+ # Plot the prior rank histogram
247
+ bins = list(range(1, ens_size + 2))
248
+ ax1.hist(
249
+ df["prior_rank"], bins=bins, color="blue", alpha=0.5, label="prior rank"
250
+ )
251
+ ax1.set_title("Prior Rank Histogram")
252
+ ax1.set_xlabel("Observation Rank (among ensemble members)")
253
+ ax1.set_ylabel("Count")
254
+
255
+ # Plot the posterior rank histogram if it exists
256
+ if "posterior_rank" in df.columns:
257
+ ax2.hist(
258
+ df["posterior_rank"],
259
+ bins=bins,
260
+ color="green",
261
+ alpha=0.5,
262
+ label="posterior rank",
263
+ )
264
+ ax2.set_title("Posterior Rank Histogram")
265
+ ax2.set_xlabel("Observation Rank (among ensemble members)")
266
+ ax2.set_ylabel("Count")
267
+
268
+ fig.suptitle(f"{type} at Level {level}", fontsize=14)
269
+
270
+ plt.tight_layout(rect=[0, 0.03, 1, 0.95])
271
+ plt.show()
272
+
273
+ return fig
274
+
275
+
276
+ def plot_evolution(
277
+ obs_seq,
278
+ type,
279
+ time_bin_width,
280
+ stat,
281
+ levels=None,
282
+ tick_interval=2,
283
+ time_format="%m-%d",
284
+ plot_pvu=True,
285
+ ):
286
+ """
287
+ Plot the time evolution of the requested statistics and optionally used vs possible observations.
288
+
289
+ Args:
290
+ obs_seq: The observation sequence object.
291
+ type (str): The type of observation to filter by.
292
+ time_bin_width (str): The width of each time bin (e.g., '3600s' for 1 hour).
293
+ stat (str): The statistic to plot. Default is "prior_rmse".
294
+ levels (list, optional): The levels to bin by. If None, no binning by level.
295
+ tick_interval (int): Interval for x-axis ticks (default is 2).
296
+ time_format (str): Format string for time labels on the x-axis (default is '%m-%d').
297
+ plot_pvu (bool): Whether to plot possible vs used observations (default is True).
298
+
299
+ Returns:
300
+ fig: The matplotlib figure object.
301
+ """
302
+ # Calculate stats and add to dataframe
303
+ stats.diag_stats(obs_seq.df)
304
+ qc0 = stats.select_used_qcs(obs_seq.df) # filter only qc=0, qc=2
305
+ qc0 = qc0[qc0["type"] == type] # filter by type
306
+
307
+ if qc0.empty:
308
+ print(f"No data found for type: {type}")
309
+ return
310
+
311
+ all_df = obs_seq.df[obs_seq.df["type"] == type] # for possible vs used
312
+
313
+ if levels:
314
+ stats.bin_by_layer(qc0, levels) # bin by level
315
+ midpoints = qc0["midpoint"].unique()
316
+
317
+ for level in sorted(midpoints):
318
+ df = qc0[qc0["midpoint"] == level]
319
+
320
+ # Bin by time
321
+ stats.bin_by_time(df, time_bin_width)
322
+
323
+ # Aggregate by time bin
324
+ df = stats.time_statistics(df)
325
+
326
+ # Calculate possible vs used if enabled
327
+ df_pvu = None
328
+ if plot_pvu:
329
+ stats.bin_by_time(all_df, time_bin_width)
330
+ df_pvu = stats.possible_vs_used_by_time(all_df)
331
+
332
+ # Plot the time evolution of requested stats
333
+ plot_time_evolution(
334
+ df, df_pvu, stat, type, level, tick_interval, time_format, plot_pvu
335
+ )
336
+ else:
337
+ # Bin by time
338
+ stats.bin_by_time(qc0, time_bin_width)
339
+
340
+ # Aggregate by time bin
341
+ df = stats.time_statistics(qc0)
342
+
343
+ # Calculate possible vs used if enabled
344
+ df_pvu = None
345
+ if plot_pvu:
346
+ stats.bin_by_time(all_df, time_bin_width)
347
+ df_pvu = stats.possible_vs_used_by_time(all_df)
348
+
349
+ # Plot the time evolution of requested stats
350
+ return plot_time_evolution(
351
+ df, df_pvu, stat, type, None, tick_interval, time_format, plot_pvu
352
+ )
353
+
354
+
355
+ def plot_time_evolution(
356
+ df, df_pvu, stat, type, level, tick_interval, time_format, plot_pvu
357
+ ):
358
+ """
359
+ Plot the time evolution of the requested statistics and optionally used vs possible observations.
360
+
361
+ Args:
362
+ df (pd.DataFrame): The aggregated DataFrame for statistics.
363
+ df_pvu (pd.DataFrame): The DataFrame for possible vs used observations (if plot_pvu is True).
364
+ stat (str): The statistic to plot.
365
+ type (str): The type of observation.
366
+ level (float or None): The vertical level (if applicable).
367
+ tick_interval (int): Interval for x-axis ticks (default is 2).
368
+ time_format (str): Format string for time labels on the x-axis.
369
+ plot_pvu (bool): Whether to plot possible vs used observations (default is True).
370
+
371
+ Returns:
372
+ fig: The matplotlib figure object.
373
+ """
374
+ fig, ax1 = plt.subplots()
375
+
376
+ # Plot prior and posterior statistics
377
+ if f"prior_{stat}" in df.columns:
378
+ ax1.plot(df["time_bin_midpoint"], df[f"prior_{stat}"], label=f"prior {stat}")
379
+ if f"posterior_{stat}" in df.columns:
380
+ ax1.plot(
381
+ df["time_bin_midpoint"], df[f"posterior_{stat}"], label=f"posterior {stat}"
382
+ )
383
+
384
+ # Set x-axis ticks every 'tick_interval' values
385
+ tick_positions = df["time_bin_midpoint"][::tick_interval]
386
+ ax1.set_xticks(tick_positions)
387
+ ax1.set_xticklabels(
388
+ tick_positions.dt.strftime(time_format), rotation=45, ha="right"
389
+ )
390
+
391
+ # Add a secondary y-axis for possible vs used observations if enabled
392
+ if plot_pvu and df_pvu is not None:
393
+ ax2 = ax1.twinx()
394
+ ax2.set_ylabel("# obs (o=possible; +=assimilated)", color="red")
395
+ ax2.tick_params(axis="y", colors="red")
396
+
397
+ # Plot possible and used observations
398
+ ax2.plot(
399
+ df_pvu["time_bin_midpoint"],
400
+ df_pvu["possible"],
401
+ color="red",
402
+ marker="o",
403
+ linestyle="",
404
+ markerfacecolor="none",
405
+ )
406
+ ax2.plot(
407
+ df_pvu["time_bin_midpoint"],
408
+ df_pvu["used"],
409
+ color="red",
410
+ marker="+",
411
+ linestyle="",
412
+ )
413
+ ax2.set_ylim(bottom=0)
414
+
415
+ ax1.legend(loc="upper right")
416
+ title = f"{type}" if level is None else f"{type} at level {level}"
417
+ ax1.set_title(title)
418
+ ax1.set_xlabel("Time")
419
+ ax1.set_ylabel(stat)
420
+
421
+ plt.tight_layout()
422
+
423
+ return fig
@@ -0,0 +1,35 @@
1
+ acars_horizontal_wind:
2
+ description: ACARS-derived Horizontal wind speed
3
+ components:
4
+ - acars_u_wind_component
5
+ - acars_v_wind_component
6
+
7
+ sat_horizontal_wind:
8
+ description: Satellite-derived horizontal wind speed
9
+ components:
10
+ - sat_u_wind_component
11
+ - sat_v_wind_component
12
+
13
+ radiosonde_horizontal_wind:
14
+ description: Radiosonde-derived horizontal wind speed
15
+ components:
16
+ - radiosonde_u_wind_component
17
+ - radiosonde_v_wind_component
18
+
19
+ aircraft_horizontal_wind:
20
+ description: Aircraft-derived horizontal wind speed
21
+ components:
22
+ - aircraft_u_wind_component
23
+ - aircraft_v_wind_component
24
+
25
+ 10_m_horizontal_wind:
26
+ description: 10 meter horizontal wind speed
27
+ components:
28
+ - 10m_u_wind_component
29
+ - 10m_v_wind_component
30
+
31
+ marine_sfc_horizontal_wind:
32
+ description: Marine surface horizontal wind speed
33
+ components:
34
+ - marine_sfc_u_wind_component
35
+ - marine_sfc_v_wind_component