pydartdiags 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,510 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ import pandas as pd
3
+ import numpy as np
4
+ from functools import wraps
5
+ from datetime import datetime, timedelta
6
+
7
+
8
+ def apply_to_phases_in_place(func):
9
+ """
10
+ Decorator to apply a function to both 'prior' and 'posterior' phases
11
+ and modify the DataFrame in place.
12
+
13
+ The decorated function should accept 'phase' as its first argument.
14
+ """
15
+
16
+ @wraps(func)
17
+ def wrapper(df, *args, **kwargs):
18
+ for phase in ["prior", "posterior"]:
19
+ if f"{phase}_ensemble_spread" in df.columns:
20
+ func(df, phase, *args, **kwargs)
21
+ return df
22
+
23
+ return wrapper
24
+
25
+
26
+ def apply_to_phases_by_type_return_df(func):
27
+ """
28
+ Decorator to apply a function to both 'prior' and 'posterior' phases and return a new DataFrame.
29
+
30
+ The decorated function should accept 'phase' as its first argument and return a DataFrame.
31
+ """
32
+
33
+ @wraps(func)
34
+ def wrapper(df, *args, **kwargs):
35
+ results = []
36
+ for phase in ["prior", "posterior"]:
37
+ if f"{phase}_ensemble_mean" in df.columns:
38
+ result = func(df, phase, *args, **kwargs)
39
+ results.append(result)
40
+
41
+ if not results:
42
+ return (
43
+ pd.DataFrame()
44
+ ) # Return an empty DataFrame if no results are generated
45
+
46
+ # Dynamically determine merge keys based on common columns
47
+ common_columns = set(results[0].columns)
48
+ for result in results[1:]:
49
+ common_columns &= set(result.columns)
50
+
51
+ # Exclude phase-specific columns from the merge keys
52
+ phase_specific_columns = {
53
+ f"{phase}_sq_err",
54
+ f"{phase}_bias",
55
+ f"{phase}_totalvar",
56
+ f"{phase}_rmse",
57
+ f"{phase}_totalspread",
58
+ }
59
+ merge_keys = list(common_columns - phase_specific_columns)
60
+
61
+ if len(results) == 2:
62
+ return pd.merge(results[0], results[1], on=merge_keys)
63
+ else:
64
+ return results[0]
65
+
66
+ return wrapper
67
+
68
+
69
+ def apply_to_phases_by_obs(func):
70
+ """
71
+ Decorator to apply a function to both 'prior' and 'posterior' phases and return a new DataFrame.
72
+
73
+ The decorated function should accept 'phase' as its first argument and return a DataFrame.
74
+ """
75
+
76
+ @wraps(func)
77
+ def wrapper(df, *args, **kwargs):
78
+
79
+ res_df = func(df, "prior", *args, **kwargs)
80
+ if "posterior_ensemble_mean" in df.columns:
81
+ posterior_df = func(df, "posterior", *args, **kwargs)
82
+ res_df["posterior_rank"] = posterior_df["posterior_rank"]
83
+
84
+ return res_df
85
+
86
+ return wrapper
87
+
88
+
89
+ @apply_to_phases_by_obs
90
+ def calculate_rank(df, phase):
91
+ """
92
+ Calculate the rank of observations within an ensemble.
93
+
94
+ Note:
95
+
96
+ This function is decorated with @apply_to_phases_by_obs, which modifies its usage.
97
+ You should call it as calculate_rank(df), and the decorator will automatically apply the
98
+ function to all relevant phases (‘prior’ and ‘posterior’).
99
+
100
+ This function takes a DataFrame containing ensemble predictions and observed values,
101
+ adds sampling noise to the ensemble predictions, and calculates the rank of the observed
102
+ value within the perturbed ensemble for each observation. The rank indicates the position
103
+ of the observed value within the sorted ensemble values, with 1 being the lowest. If the
104
+ observed value is larger than the largest ensemble member, its rank is set to the ensemble
105
+ size plus one.
106
+
107
+ Parameters:
108
+ df (pd.DataFrame): A DataFrame with columns for rank, and observation type.
109
+
110
+ Returns:
111
+ DataFrame containing columns for 'rank' and observation 'type'.
112
+ """
113
+ column = f"{phase}_ensemble_member"
114
+ ensemble_values = df.filter(regex=column).to_numpy().copy()
115
+ std_dev = np.sqrt(df["obs_err_var"]).to_numpy()
116
+ obsvalue = df["observation"].to_numpy()
117
+ obstype = df["type"].to_numpy()
118
+ ens_size = ensemble_values.shape[1]
119
+ mean = 0.0 # mean of the sampling noise
120
+ rank = np.zeros(obsvalue.shape[0], dtype=int)
121
+
122
+ for obs in range(ensemble_values.shape[0]):
123
+ sampling_noise = np.random.normal(mean, std_dev[obs], ens_size)
124
+ ensemble_values[obs] += sampling_noise
125
+ ensemble_values[obs].sort()
126
+ for i, ens in enumerate(ensemble_values[obs]):
127
+ if obsvalue[obs] <= ens:
128
+ rank[obs] = i + 1
129
+ break
130
+
131
+ if rank[obs] == 0: # observation is larger than largest ensemble member
132
+ rank[obs] = ens_size + 1
133
+
134
+ result_df = pd.DataFrame({"type": obstype, f"{phase}_rank": rank})
135
+
136
+ return result_df
137
+
138
+
139
+ def mean_then_sqrt(x):
140
+ """
141
+ Calculates the mean of an array-like object and then takes the square root of the result.
142
+
143
+ Parameters:
144
+ arr (array-like): An array-like object (such as a list or a pandas Series).
145
+ The elements should be numeric.
146
+
147
+ Returns:
148
+ float: The square root of the mean of the input array.
149
+
150
+ Raises:
151
+ TypeError: If the input is not an array-like object containing numeric values.
152
+ ValueError: If the input array is empty.
153
+ """
154
+
155
+ return np.sqrt(np.mean(x))
156
+
157
+
158
+ @apply_to_phases_in_place
159
+ def diag_stats(df, phase):
160
+ """
161
+ Calculate diagnostic statistics for a given phase and add them to the DataFrame.
162
+
163
+ Note:
164
+ This function is decorated with @apply_to_phases_in_place, which modifies its usage.
165
+ You should call it as diag_stats(df), and the decorator will automatically apply the
166
+ function to all relevant phases (‘prior’ and ‘posterior’) modifying the DataFrame
167
+ in place.
168
+
169
+ Args:
170
+ df (pandas.DataFrame): The input DataFrame containing observation data and ensemble statistics.
171
+ The DataFrame must include the following columns:
172
+
173
+ - 'observation': The actual observation values.
174
+ - 'obs_err_var': The variance of the observation error.
175
+ - 'prior_ensemble_mean' and/or 'posterior_ensemble_mean': The mean of the ensemble.
176
+ - 'prior_ensemble_spread' and/or 'posterior_ensemble_spread': The spread of the ensemble.
177
+
178
+ Returns:
179
+ None: The function modifies the DataFrame in place by adding the following columns:
180
+ - 'prior_sq_err' and/or 'posterior_sq_err': The square error for the 'prior' and 'posterior' phases.
181
+ - 'prior_bias' and/or 'posterior_bias': The bias for the 'prior' and 'posterior' phases.
182
+ - 'prior_totalvar' and/or 'posterior_totalvar': The total variance for the 'prior' and 'posterior' phases.
183
+
184
+ Notes:
185
+ - Spread is the standard deviation of the ensemble.
186
+ - The function modifies the input DataFrame by adding new columns for the calculated statistics.
187
+ """
188
+ pd.options.mode.copy_on_write = True
189
+
190
+ # input from the observation sequence
191
+ spread_column = f"{phase}_ensemble_spread"
192
+ mean_column = f"{phase}_ensemble_mean"
193
+
194
+ # Calculated from the observation sequence
195
+ sq_err_column = f"{phase}_sq_err"
196
+ bias_column = f"{phase}_bias"
197
+ totalvar_column = f"{phase}_totalvar"
198
+
199
+ df[sq_err_column] = (df[mean_column] - df["observation"]) ** 2
200
+ df[bias_column] = df[mean_column] - df["observation"]
201
+ df[totalvar_column] = df["obs_err_var"] + df[spread_column] ** 2
202
+
203
+
204
+ def bin_by_layer(df, levels, verticalUnit="pressure (Pa)"):
205
+ """
206
+ Bin observations by vertical layers and add 'vlevels' and 'midpoint' columns to the DataFrame.
207
+
208
+ This function bins the observations in the DataFrame based on the specified vertical levels and adds two new columns:
209
+ 'vlevels', which represents the categorized vertical levels, and 'midpoint', which represents the midpoint of each
210
+ vertical level bin. Only observations (row) with the specified vertical unit are binned.
211
+
212
+ Args:
213
+ df (pandas.DataFrame): The input DataFrame containing observation data.
214
+ The DataFrame must include the following columns:
215
+
216
+ - 'vertical': The vertical coordinate values of the observations.
217
+ - 'vert_unit': The unit of the vertical coordinate values.
218
+
219
+ levels (list): A list of bin edges for the vertical levels.
220
+ verticalUnit (str, optional): The unit of the vertical axis (e.g., 'pressure (Pa)'). Default is 'pressure (Pa)'.
221
+
222
+ Returns:
223
+ pandas.DataFrame: The input DataFrame with additional columns for the binned vertical levels and their midpoints:
224
+ - 'vlevels': The categorized vertical levels.
225
+ - 'midpoint': The midpoint of each vertical level bin.
226
+
227
+ Notes:
228
+ - The function modifies the input DataFrame by adding 'vlevels' and 'midpoint' columns.
229
+ - The 'midpoint' values are calculated as half the midpoint of each vertical level bin.
230
+ """
231
+ pd.options.mode.copy_on_write = True
232
+ df.loc[df["vert_unit"] == verticalUnit, "vlevels"] = pd.cut(
233
+ df.loc[df["vert_unit"] == verticalUnit, "vertical"], levels, include_lowest=True
234
+ )
235
+ df.loc[:, "midpoint"] = df["vlevels"].apply(lambda x: x.mid)
236
+
237
+
238
+ def bin_by_time(df, time_value):
239
+ """
240
+ Bin observations by time and add 'time_bin' and 'time_bin_midpoint' columns to the DataFrame.
241
+ The first bin starts 1 second before the minimum time value, so the minimum time is included in the
242
+ first bin. The last bin is inclusive of the maximum time value.
243
+
244
+ Args:
245
+ df (pd.DataFrame): The input DataFrame containing a 'time' column.
246
+ time_value (str): The width of each time bin (e.g., '3600S' for 1 hour).
247
+
248
+ Returns:
249
+ None: The function modifies the DataFrame in place by adding 'time_bin' and 'time_bin_midpoint' columns.
250
+ """
251
+ # Create time bins
252
+ start = df["time"].min() - timedelta(seconds=1)
253
+ end = df["time"].max()
254
+ # Determine if the end time aligns with the bin boundary
255
+ time_delta = pd.Timedelta(time_value)
256
+ aligned_end = (pd.Timestamp(end) + time_delta).floor(time_value)
257
+
258
+ time_bins = pd.date_range(
259
+ start=start,
260
+ end=aligned_end,
261
+ freq=time_value,
262
+ )
263
+
264
+ df["time_bin"] = pd.cut(df["time"], bins=time_bins)
265
+
266
+ # Calculate the midpoint of each time bin
267
+ df["time_bin_midpoint"] = df["time_bin"].apply(
268
+ lambda x: x.left + (x.right - x.left) / 2 if pd.notnull(x) else None
269
+ )
270
+
271
+
272
+ @apply_to_phases_by_type_return_df
273
+ def grand_statistics(df, phase):
274
+ """
275
+ Calculate grand statistics (RMSE, bias, total spread) for each observation type and phase.
276
+
277
+ This function assumes that diagnostic statistics (such as squared error, bias, and total variance)
278
+ have already been computed by :func:`diag_stats` and are present in the DataFrame. It groups the data by observation
279
+ type and computes the root mean square error (RMSE), mean bias, and total spread for the specified phase.
280
+
281
+ Note:
282
+ This function is decorated with @apply_to_phases_by_type_return_df, which modifies its usage
283
+ You should call it as grand_statistics(df), and the decorator will automatically apply the function
284
+ to all relevant phases ('prior' and 'posterior') and return a merged DataFrame.
285
+
286
+ Args:
287
+ df (pandas.DataFrame): The input DataFrame containing diagnostic statistics for observations.
288
+
289
+ Returns:
290
+ pandas.DataFrame: A DataFrame with columns:
291
+ - 'type': The observation type.
292
+ - '{phase}_rmse': The root mean square error for the phase.
293
+ - '{phase}_bias': The mean bias for the phase.
294
+ - '{phase}_totalspread': The total spread for the phase.
295
+ """
296
+
297
+ # assuming diag_stats has been called
298
+ grand = (
299
+ df.groupby(["type"], observed=False)
300
+ .agg(
301
+ {
302
+ f"{phase}_sq_err": mean_then_sqrt,
303
+ f"{phase}_bias": "mean",
304
+ f"{phase}_totalvar": mean_then_sqrt,
305
+ }
306
+ )
307
+ .reset_index()
308
+ )
309
+
310
+ grand.rename(columns={f"{phase}_sq_err": f"{phase}_rmse"}, inplace=True)
311
+ grand.rename(columns={f"{phase}_totalvar": f"{phase}_totalspread"}, inplace=True)
312
+
313
+ return grand
314
+
315
+
316
+ @apply_to_phases_by_type_return_df
317
+ def layer_statistics(df, phase):
318
+ """
319
+ Calculate statistics (RMSE, bias, total spread) for each observation type and vertical layer.
320
+
321
+ This function assumes that diagnostic statistics (such as squared error, bias, and total variance)
322
+ have already been computed with :func:`diag_stats` and are present in the DataFrame. It groups the data by
323
+ vertical layer midpoint and observation type, and computes the root mean square error (RMSE),
324
+ mean bias, and total spread for the specified phase for each vertical layer.
325
+
326
+ Note:
327
+ This function is decorated with @apply_to_phases_by_type_return_df, which modifies its usage
328
+ You should call it as layer_statistics(df), and the decorator will automatically apply the function
329
+ to all relevant phases ('prior' and 'posterior') and return a merged DataFrame.
330
+
331
+ Args:
332
+ df (pandas.DataFrame): The input DataFrame containing diagnostic statistics for observations.
333
+ phase (str): The phase for which to calculate the statistics ('prior' or 'posterior').
334
+
335
+ Returns:
336
+ pandas.DataFrame: A DataFrame with columns:
337
+ - 'midpoint': The midpoint of the vertical layer.
338
+ - 'type': The observation type.
339
+ - '{phase}_rmse': The root mean square error for the phase.
340
+ - '{phase}_bias': The mean bias for the phase.
341
+ - '{phase}_totalspread': The total spread for the phase.
342
+ - 'vert_unit': The vertical unit.
343
+ - 'vlevels': The categorized vertical level.
344
+ """
345
+
346
+ # assuming diag_stats has been called
347
+ layer_stats = (
348
+ df.groupby(["midpoint", "type"], observed=False)
349
+ .agg(
350
+ {
351
+ f"{phase}_sq_err": mean_then_sqrt,
352
+ f"{phase}_bias": "mean",
353
+ f"{phase}_totalvar": mean_then_sqrt,
354
+ "vert_unit": "first",
355
+ "vlevels": "first",
356
+ }
357
+ )
358
+ .reset_index()
359
+ )
360
+
361
+ layer_stats.rename(columns={f"{phase}_sq_err": f"{phase}_rmse"}, inplace=True)
362
+ layer_stats.rename(
363
+ columns={f"{phase}_totalvar": f"{phase}_totalspread"}, inplace=True
364
+ )
365
+
366
+ return layer_stats
367
+
368
+
369
+ @apply_to_phases_by_type_return_df
370
+ def time_statistics(df, phase):
371
+ """
372
+ Calculate time-based statistics (RMSE, bias, total spread) for each observation type and time bin.
373
+
374
+ This function assumes that diagnostic statistics (such as squared error, bias, and total variance)
375
+ have already been computed by :func:`diag_stats` and are present in the DataFrame. It groups the data
376
+ by time bin midpoint and observation type, and computes the root mean square error (RMSE), mean bias,
377
+ and total spread for the specified phase for each time bin.
378
+
379
+ Note:
380
+ This function is decorated with @apply_to_phases_by_type_return_df.
381
+ You should call it as time_statistics(df), and the decorator will automatically apply the function
382
+ to all relevant phases ('prior' and 'posterior') and return a merged DataFrame.
383
+
384
+ Args:
385
+ df (pandas.DataFrame): The input DataFrame containing diagnostic statistics for observations.
386
+ phase (str): The phase for which to calculate the statistics ('prior' or 'posterior').
387
+
388
+ Returns:
389
+ pandas.DataFrame: A DataFrame with columns:
390
+ - 'time_bin_midpoint': The midpoint of the time bin.
391
+ - 'type': The observation type.
392
+ - '{phase}_rmse': The root mean square error for the phase.
393
+ - '{phase}_bias': The mean bias for the phase.
394
+ - '{phase}_totalspread': The total spread for the phase.
395
+ - 'time_bin': The time bin interval.
396
+ - 'time': The first time value in the bin.
397
+ """
398
+ # Assuming diag_stats has been called
399
+ time_stats = (
400
+ df.groupby(["time_bin_midpoint", "type"], observed=False)
401
+ .agg(
402
+ {
403
+ f"{phase}_sq_err": mean_then_sqrt,
404
+ f"{phase}_bias": "mean",
405
+ f"{phase}_totalvar": mean_then_sqrt,
406
+ "time_bin": "first",
407
+ "time": "first",
408
+ }
409
+ )
410
+ .reset_index()
411
+ )
412
+
413
+ time_stats.rename(columns={f"{phase}_sq_err": f"{phase}_rmse"}, inplace=True)
414
+ time_stats.rename(
415
+ columns={f"{phase}_totalvar": f"{phase}_totalspread"}, inplace=True
416
+ )
417
+
418
+ return time_stats
419
+
420
+
421
+ def possible_vs_used(df):
422
+ """
423
+ Calculates the count of possible vs. used observations by type.
424
+
425
+ This function takes a DataFrame containing observation data, including a 'type' column for the observation
426
+ type and an 'observation' column. The number of used observations ('used'), is the total number
427
+ of assimilated observations (as determined by the `select_used_qcs` function).
428
+ The result is a DataFrame with each observation type, the count of possible observations, and the count of
429
+ used observations.
430
+
431
+ Returns:
432
+ pd.DataFrame: A DataFrame with three columns: 'type', 'possible', and 'used'. 'type' is the observation type,
433
+ 'possible' is the count of all observations of that type, and 'used' is the count of observations of that type
434
+ that passed quality control checks.
435
+ """
436
+ possible = df.groupby("type")["observation"].count()
437
+ possible.rename("possible", inplace=True)
438
+
439
+ used_qcs = select_used_qcs(df).groupby("type")["observation"].count()
440
+ used = used_qcs.reindex(possible.index, fill_value=0)
441
+ used.rename("used", inplace=True)
442
+
443
+ return pd.concat([possible, used], axis=1).reset_index()
444
+
445
+
446
+ def possible_vs_used_by_layer(df):
447
+ """
448
+ Calculates the count of possible vs. used observations by type and vertical level.
449
+ """
450
+ possible = df.groupby(["type", "midpoint"], observed=False)["type"].count()
451
+ possible.rename("possible", inplace=True)
452
+
453
+ used_qcs = (
454
+ select_used_qcs(df)
455
+ .groupby(["type", "midpoint"], observed=False)["type"]
456
+ .count()
457
+ )
458
+
459
+ used = used_qcs.reindex(possible.index, fill_value=0)
460
+ used.rename("used", inplace=True)
461
+
462
+ return pd.concat([possible, used], axis=1).reset_index()
463
+
464
+
465
+ def select_used_qcs(df):
466
+ """
467
+ Select rows from the DataFrame where the observation was used.
468
+ Includes observations for which the posterior forward observation operators failed.
469
+
470
+ Returns:
471
+ pandas.DataFrame: A DataFrame containing only the rows with a DART quality control flag 0 or 2.
472
+ """
473
+ return df[(df["DART_quality_control"] == 0) | (df["DART_quality_control"] == 2)]
474
+
475
+
476
+ def possible_vs_used_by_time(df):
477
+ """
478
+ Calculates the count of possible vs. used observations by type and time bin.
479
+
480
+ Args:
481
+ df (pd.DataFrame): The input DataFrame containing observation data.
482
+ The DataFrame must include:
483
+
484
+ - 'type': The observation type.
485
+ - 'time_bin_midpoint': The midpoint of the time bin.
486
+ - 'observation': The observation values.
487
+ - 'DART_quality_control': The quality control flag.
488
+
489
+ Returns:
490
+ pd.DataFrame: A DataFrame with the following columns:
491
+ - 'time_bin_midpoint': The midpoint of the time bin.
492
+ - 'type': The observation type.
493
+ - 'possible': The count of all observations in the time bin.
494
+ - 'used': The count of observations in the time bin that passed quality control checks.
495
+ """
496
+ # Count all observations (possible) grouped by time_bin_midpoint and type
497
+ possible = df.groupby(["time_bin_midpoint", "type"], observed=False)["type"].count()
498
+ possible.rename("possible", inplace=True)
499
+
500
+ # Count used observations (QC=0 or QC=2) grouped by time_bin_midpoint and type
501
+ used_qcs = (
502
+ select_used_qcs(df)
503
+ .groupby(["time_bin_midpoint", "type"], observed=False)["type"]
504
+ .count()
505
+ )
506
+ used = used_qcs.reindex(possible.index, fill_value=0)
507
+ used.rename("used", inplace=True)
508
+
509
+ # Combine possible and used into a single DataFrame
510
+ return pd.concat([possible, used], axis=1).reset_index()
@@ -0,0 +1,45 @@
1
+ Metadata-Version: 2.4
2
+ Name: pydartdiags
3
+ Version: 0.6.4
4
+ Summary: Observation Sequence Diagnostics for DART
5
+ Author-email: Helen Kershaw <hkershaw@ucar.edu>
6
+ License-Expression: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/NCAR/pyDARTdiags.git
8
+ Project-URL: Issues, https://github.com/NCAR/pyDARTdiags/issues
9
+ Project-URL: Documentation, https://ncar.github.io/pyDARTdiags
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Requires-Dist: pandas<3.0.0,>=2.2.0
16
+ Requires-Dist: numpy>=1.26
17
+ Requires-Dist: plotly>=5.22.0
18
+ Requires-Dist: pyyaml>=6.0.2
19
+ Requires-Dist: matplotlib>=3.9.4
20
+ Dynamic: license-file
21
+
22
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
23
+ [![codecov](https://codecov.io/gh/NCAR/pyDARTdiags/graph/badge.svg?token=VK55SQZSVD)](https://codecov.io/gh/NCAR/pyDARTdiags)
24
+ [![PyPI version](https://badge.fury.io/py/pydartdiags.svg)](https://pypi.org/project/pydartdiags/)
25
+ [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
26
+
27
+ # pyDARTdiags
28
+
29
+ pyDARTdiags is a Python library for observation space diagnostics for the Data Assimilation Research Testbed ([DART](https://github.com/NCAR/DART)).
30
+
31
+ pyDARTdiags is under initial development, so please use caution.
32
+ The MATLAB [observation space diagnostics](https://docs.dart.ucar.edu/en/latest/guide/matlab-observation-space.html) are available through [DART](https://github.com/NCAR/DART).
33
+
34
+
35
+ pyDARTdiags can be installed through pip: https://pypi.org/project/pydartdiags/
36
+ Documentation : https://ncar.github.io/pyDARTdiags/
37
+
38
+ ## Contributing
39
+ Contributions are welcome! If you have a feature request, bug report, or a suggestion, please open an issue on our GitHub repository.
40
+ Please read our [Contributors Guide](https://github.com/NCAR/pyDARTdiags/blob/main/CONTRIBUTING.md) if you would like to contribute to
41
+ pyDARTdiags.
42
+
43
+ ## License
44
+
45
+ pyDARTdiags is released under the Apache License 2.0. For more details, see the LICENSE file in the root directory of this source tree or visit [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0).
@@ -0,0 +1,16 @@
1
+ pydartdiags/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ pydartdiags/data.py,sha256=Fg5uqO_Pb6HOmIrfLONsvrzHWaJTCVK9HkyfnRNukUQ,6743
3
+ pydartdiags/matplots/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ pydartdiags/matplots/matplots.py,sha256=lqt2cdDLnZfBUuzzeAr9n-u1TOMwFsZG-uaVdc-3fBY,16981
5
+ pydartdiags/obs_sequence/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ pydartdiags/obs_sequence/composite_types.yaml,sha256=PVLMU6x6KcVMCwPB-U65C_e0YQUemfqUhYMpf1DhFOY,917
7
+ pydartdiags/obs_sequence/obs_sequence.py,sha256=F9oMHhhvcnARr7YvT_pbJY7GwiSItnxnBKBHsN71APk,53085
8
+ pydartdiags/plots/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ pydartdiags/plots/plots.py,sha256=U7WQjE_qN-5a8-85D-PkkgILSFBzTJQ1mcGBa7l5DHI,6464
10
+ pydartdiags/stats/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ pydartdiags/stats/stats.py,sha256=QQ13UKCJFaVemEgriTLNC3xptqjE3U3J9WFKIUJeuck,20214
12
+ pydartdiags-0.6.4.dist-info/licenses/LICENSE,sha256=ROglds_Eg_ylXp-1MHmEawDqMw_UsCB4r9sk7z9PU9M,11377
13
+ pydartdiags-0.6.4.dist-info/METADATA,sha256=M0QG9pWLX55ag6Plxxk9TNGiltX5sghbGqDx8BqIG44,2255
14
+ pydartdiags-0.6.4.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
15
+ pydartdiags-0.6.4.dist-info/top_level.txt,sha256=LfMoPLnSd0VhhlWev1eeX9t6AzvyASOloag0LO_ppWg,12
16
+ pydartdiags-0.6.4.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+