pyIntensityFeatures 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyIntensityFeatures/__init__.py +30 -0
- pyIntensityFeatures/_main.py +500 -0
- pyIntensityFeatures/instruments/__init__.py +9 -0
- pyIntensityFeatures/instruments/satellites.py +137 -0
- pyIntensityFeatures/proc/__init__.py +10 -0
- pyIntensityFeatures/proc/boundaries.py +420 -0
- pyIntensityFeatures/proc/fitting.py +374 -0
- pyIntensityFeatures/proc/intensity.py +251 -0
- pyIntensityFeatures/tests/__init__.py +1 -0
- pyIntensityFeatures/tests/test_instruments_satellites.py +210 -0
- pyIntensityFeatures/tests/test_main.py +734 -0
- pyIntensityFeatures/tests/test_proc_boundaries.py +613 -0
- pyIntensityFeatures/tests/test_proc_fitting.py +218 -0
- pyIntensityFeatures/tests/test_proc_intensity.py +205 -0
- pyIntensityFeatures/tests/test_utils_checks.py +933 -0
- pyIntensityFeatures/tests/test_utils_coords.py +197 -0
- pyIntensityFeatures/tests/test_utils_distributions.py +236 -0
- pyIntensityFeatures/tests/test_utils_grids.py +189 -0
- pyIntensityFeatures/tests/test_utils_output.py +433 -0
- pyIntensityFeatures/utils/__init__.py +13 -0
- pyIntensityFeatures/utils/checks.py +420 -0
- pyIntensityFeatures/utils/coords.py +157 -0
- pyIntensityFeatures/utils/distributions.py +199 -0
- pyIntensityFeatures/utils/grids.py +113 -0
- pyIntensityFeatures/utils/output.py +276 -0
- pyintensityfeatures-0.1.0.dist-info/METADATA +360 -0
- pyintensityfeatures-0.1.0.dist-info/RECORD +30 -0
- pyintensityfeatures-0.1.0.dist-info/WHEEL +5 -0
- pyintensityfeatures-0.1.0.dist-info/licenses/LICENSE +28 -0
- pyintensityfeatures-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# Full license can be found in License.md
|
|
4
|
+
#
|
|
5
|
+
# DISTRIBUTION STATEMENT A: Approved for public release. Distribution is
|
|
6
|
+
# unlimited.
|
|
7
|
+
# -----------------------------------------------------------------------------
|
|
8
|
+
"""Tests for functions in `utils.output`."""
|
|
9
|
+
|
|
10
|
+
import datetime as dt
|
|
11
|
+
import numpy as np
|
|
12
|
+
import unittest
|
|
13
|
+
|
|
14
|
+
from pyIntensityFeatures.utils import output
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TestBoundaryDictFuncs(unittest.TestCase):
|
|
18
|
+
"""Tests for functions that create and alter the boundary dicts."""
|
|
19
|
+
|
|
20
|
+
def setUp(self):
|
|
21
|
+
"""Set up the test runs."""
|
|
22
|
+
self.lat_dim = 'mlat'
|
|
23
|
+
self.attr_dict = None
|
|
24
|
+
self.opt_coords = None
|
|
25
|
+
self.coord_dict = None
|
|
26
|
+
self.data_dict = None
|
|
27
|
+
self.new_data = None
|
|
28
|
+
self.dataset = None
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
def tearDown(self):
|
|
32
|
+
"""Tear down the test environment."""
|
|
33
|
+
del self.opt_coords, self.coord_dict, self.data_dict, self.new_data
|
|
34
|
+
del self.lat_dim, self.attr_dict, self.dataset
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
def eval_coord_dict(self):
|
|
38
|
+
"""Evaluate the coordinate dict."""
|
|
39
|
+
coord_keys = ['sweep_start', 'sweep_end', 'mlt']
|
|
40
|
+
|
|
41
|
+
if self.opt_coords is not None:
|
|
42
|
+
coord_keys.extend(list(self.opt_coords.keys()))
|
|
43
|
+
|
|
44
|
+
# Evaluate the presence of the desired keys and their data
|
|
45
|
+
for key in coord_keys:
|
|
46
|
+
self.assertIn(key, self.coord_dict.keys(),
|
|
47
|
+
msg="missing coordinate key: {:}".format(key))
|
|
48
|
+
|
|
49
|
+
if self.opt_coords is not None:
|
|
50
|
+
if key in self.opt_coords.keys():
|
|
51
|
+
self.assertEqual(self.coord_dict[key], self.opt_coords[key])
|
|
52
|
+
else:
|
|
53
|
+
if self.new_data is None:
|
|
54
|
+
if key == 'mlt':
|
|
55
|
+
self.assertIsNone(self.coord_dict[key])
|
|
56
|
+
else:
|
|
57
|
+
self.assertListEqual(self.coord_dict[key], [])
|
|
58
|
+
else:
|
|
59
|
+
if key.find('sweep') == 0:
|
|
60
|
+
# New times should be added to the end
|
|
61
|
+
self.assertEqual(self.new_data[key],
|
|
62
|
+
self.coord_dict[key][-1])
|
|
63
|
+
else:
|
|
64
|
+
# Other coordinates should not change
|
|
65
|
+
msg = "Unexpected coordinate data in {:}".format(key)
|
|
66
|
+
self.assertListEqual(list(self.new_data[key]),
|
|
67
|
+
list(self.coord_dict[key]),
|
|
68
|
+
msg=msg)
|
|
69
|
+
return
|
|
70
|
+
|
|
71
|
+
def eval_data_dict(self):
|
|
72
|
+
"""Evaluate the data dict."""
|
|
73
|
+
data_keys = ['mlat', 'eq_bounds', 'eq_uncert', 'po_bounds',
|
|
74
|
+
'po_uncert', 'eq_params', 'po_params', 'mean_intensity',
|
|
75
|
+
'std_intensity', 'num_intensity']
|
|
76
|
+
data_dims = ['sweep_start', 'lat', 'mlt', 'coeff']
|
|
77
|
+
|
|
78
|
+
# Evaluate the presence of the desired keys and their data
|
|
79
|
+
for key in data_keys:
|
|
80
|
+
self.assertIn(key, self.data_dict.keys(),
|
|
81
|
+
msg="missing data key: {:}".format(key))
|
|
82
|
+
|
|
83
|
+
if self.new_data is None:
|
|
84
|
+
# Test the data dimensions
|
|
85
|
+
self.assertTrue(isinstance(self.data_dict[key][0], tuple))
|
|
86
|
+
for dim in self.data_dict[key][0]:
|
|
87
|
+
self.assertIn(dim, data_dims)
|
|
88
|
+
|
|
89
|
+
# Test the data values
|
|
90
|
+
self.assertListEqual(self.data_dict[key][1], [])
|
|
91
|
+
else:
|
|
92
|
+
# Test the data dimensions
|
|
93
|
+
self.assertTrue(isinstance(self.data_dict[key][0], tuple))
|
|
94
|
+
for dim in self.data_dict[key][0]:
|
|
95
|
+
self.assertIn(dim, data_dims)
|
|
96
|
+
|
|
97
|
+
# Test the data values
|
|
98
|
+
self.assertEqual(len(self.new_data[key]),
|
|
99
|
+
len(self.data_dict[key][1][-1]),
|
|
100
|
+
msg="Updated {:} is too short: {:}".format(
|
|
101
|
+
key, self.data_dict[key]))
|
|
102
|
+
self.assertTrue(np.all(self.data_dict[key][1][-1]
|
|
103
|
+
== self.new_data[key]),
|
|
104
|
+
msg="{:} data arrays not equal".format(key))
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
def eval_dataset(self):
|
|
108
|
+
"""Evaluate a Dataset assigned to `new_data`."""
|
|
109
|
+
# Evaluate global attributes
|
|
110
|
+
if self.attr_dict is None:
|
|
111
|
+
self.assertDictEqual(self.dataset.attrs, {})
|
|
112
|
+
else:
|
|
113
|
+
# Test access through attribute dict
|
|
114
|
+
self.assertDictEqual(self.dataset.attrs, self.attr_dict)
|
|
115
|
+
|
|
116
|
+
# Test access as attributes
|
|
117
|
+
for attr in self.attr_dict:
|
|
118
|
+
self.assertTrue(hasattr(self.dataset, attr))
|
|
119
|
+
|
|
120
|
+
# Evalute the coordinates
|
|
121
|
+
for coord in self.coord_dict.keys():
|
|
122
|
+
self.assertIn(coord, self.dataset.coords)
|
|
123
|
+
|
|
124
|
+
if coord in self.dataset.dims:
|
|
125
|
+
if coord.find('sweep') < 0:
|
|
126
|
+
self.assertTrue(
|
|
127
|
+
np.all(self.dataset.coords[coord].values
|
|
128
|
+
== self.coord_dict[coord]),
|
|
129
|
+
msg="".join(["Bad coordinate values for ", coord,
|
|
130
|
+
": {:} != {:}".format(
|
|
131
|
+
self.dataset.coords[coord].values,
|
|
132
|
+
self.coord_dict[coord])]))
|
|
133
|
+
else:
|
|
134
|
+
self.assertEqual(self.dataset.coords[coord],
|
|
135
|
+
self.coord_dict[coord],
|
|
136
|
+
msg="unequal coordinate values for {:}".format(
|
|
137
|
+
coord))
|
|
138
|
+
|
|
139
|
+
# Evaluate the data
|
|
140
|
+
for dvar in self.data_dict.keys():
|
|
141
|
+
self.assertIn(dvar, self.dataset.data_vars)
|
|
142
|
+
self.assertTrue(np.all(self.dataset[dvar].values
|
|
143
|
+
== self.data_dict[dvar][1][0]))
|
|
144
|
+
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
def update_new_data(self, inc=0, hemi=1, mlt_inc=0.5):
|
|
148
|
+
"""Create data for the `new_data` test attribute.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
inc : int
|
|
153
|
+
Number by which values will be incremented.
|
|
154
|
+
hemi : int
|
|
155
|
+
1 for Northern and -1 for Southern hemisphere
|
|
156
|
+
mlt_inc : float
|
|
157
|
+
Increment for the MLT bins (default=0.5)
|
|
158
|
+
|
|
159
|
+
"""
|
|
160
|
+
mlt = np.arange(0, 24, mlt_inc)
|
|
161
|
+
mlat = hemi * np.arange(59.0, 90.0, 1.0)
|
|
162
|
+
params = [1.0, 0.1, 0.01, 100.0, hemi * 70.0, 5.0]
|
|
163
|
+
self.new_data = {'sweep_start': dt.datetime(1999, 2, 11, inc),
|
|
164
|
+
'sweep_end': dt.datetime(1999, 2, 11, inc, 50),
|
|
165
|
+
'mlt': mlt, 'mlat': mlat,
|
|
166
|
+
'eq_bounds': np.full(shape=mlt.shape,
|
|
167
|
+
fill_value=60.0 + inc) * hemi,
|
|
168
|
+
'eq_uncert': np.ones(shape=mlt.shape),
|
|
169
|
+
'eq_params': np.full(shape=(mlt.shape[0], len(params)),
|
|
170
|
+
fill_value=params),
|
|
171
|
+
'po_bounds': np.full(shape=mlt.shape,
|
|
172
|
+
fill_value=80.0 + inc) * hemi,
|
|
173
|
+
'po_uncert': np.ones(shape=mlt.shape),
|
|
174
|
+
'po_params': np.full(shape=(mlt.shape[0], len(params)),
|
|
175
|
+
fill_value=params),
|
|
176
|
+
'mean_intensity': np.ones(shape=(mlat.shape[0],
|
|
177
|
+
mlt.shape[0])),
|
|
178
|
+
'std_intensity': np.zeros(shape=(mlat.shape[0],
|
|
179
|
+
mlt.shape[0])),
|
|
180
|
+
'num_intensity': np.ones(shape=(mlat.shape[0],
|
|
181
|
+
mlt.shape[0]))}
|
|
182
|
+
return
|
|
183
|
+
|
|
184
|
+
def test_init_boundary_dicts(self):
|
|
185
|
+
"""Test success for initalizing the boundary dicts."""
|
|
186
|
+
# Update with and without optional coordinates
|
|
187
|
+
for self.opt_coords in [None, {'test': 'test_value'}]:
|
|
188
|
+
with self.subTest(opt_coords=self.opt_coords):
|
|
189
|
+
# Initalize the dicts
|
|
190
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts(
|
|
191
|
+
opt_coords=self.opt_coords)
|
|
192
|
+
|
|
193
|
+
# Test the output
|
|
194
|
+
self.eval_coord_dict()
|
|
195
|
+
self.eval_data_dict()
|
|
196
|
+
return
|
|
197
|
+
|
|
198
|
+
def test_update_boundary_dicts(self):
|
|
199
|
+
"""Test success for updating the boundary dicts."""
|
|
200
|
+
# Update with and without any data
|
|
201
|
+
for num_update in [0, 1, 2]:
|
|
202
|
+
with self.subTest(num_update=num_update):
|
|
203
|
+
# Initalize the dicts
|
|
204
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts(
|
|
205
|
+
opt_coords=self.opt_coords)
|
|
206
|
+
|
|
207
|
+
for inc in range(num_update):
|
|
208
|
+
self.update_new_data(inc=inc)
|
|
209
|
+
|
|
210
|
+
# Update the dicts
|
|
211
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
212
|
+
self.data_dict)
|
|
213
|
+
|
|
214
|
+
# Test the updated dicts
|
|
215
|
+
self.eval_coord_dict()
|
|
216
|
+
self.eval_data_dict()
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
def test_update_boundary_dicts_change_mlt_inc(self):
|
|
220
|
+
"""Test raises ValueError for updating with different MLT bin inc."""
|
|
221
|
+
# Initalize the dicts
|
|
222
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts(
|
|
223
|
+
opt_coords=self.opt_coords)
|
|
224
|
+
|
|
225
|
+
# Add data
|
|
226
|
+
self.update_new_data()
|
|
227
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
228
|
+
self.data_dict)
|
|
229
|
+
|
|
230
|
+
# Change the MLT bins in the new data
|
|
231
|
+
self.update_new_data(mlt_inc=5.0)
|
|
232
|
+
|
|
233
|
+
# Update the dicts and evaluate the error
|
|
234
|
+
self.assertRaisesRegex(ValueError,
|
|
235
|
+
'change in magnetic local time bin increment',
|
|
236
|
+
output.update_boundary_dicts,
|
|
237
|
+
*[self.new_data, self.coord_dict,
|
|
238
|
+
self.data_dict])
|
|
239
|
+
return
|
|
240
|
+
|
|
241
|
+
def test_update_boundary_dicts_change_mlt_vals(self):
|
|
242
|
+
"""Test raises ValueError for updating with different MLT bin vals."""
|
|
243
|
+
# Initalize the dicts
|
|
244
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts(
|
|
245
|
+
opt_coords=self.opt_coords)
|
|
246
|
+
|
|
247
|
+
# Add data
|
|
248
|
+
self.update_new_data()
|
|
249
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
250
|
+
self.data_dict)
|
|
251
|
+
|
|
252
|
+
# Change the MLT bins in the new data
|
|
253
|
+
self.update_new_data()
|
|
254
|
+
self.new_data['mlt'][0] += 0.01
|
|
255
|
+
|
|
256
|
+
# Update the dicts and evaluate the error
|
|
257
|
+
self.assertRaisesRegex(ValueError,
|
|
258
|
+
'change in magnetic local time bin values',
|
|
259
|
+
output.update_boundary_dicts,
|
|
260
|
+
*[self.new_data, self.coord_dict,
|
|
261
|
+
self.data_dict])
|
|
262
|
+
return
|
|
263
|
+
|
|
264
|
+
def test_reshape_lat_coeff_data_no_lat_bins(self):
|
|
265
|
+
"""Test ValueError raised with no lat bins when reshaping data."""
|
|
266
|
+
# Initalize the dicts
|
|
267
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
268
|
+
|
|
269
|
+
# Run with the empty dicts and evaluate the error raised
|
|
270
|
+
self.assertRaisesRegex(ValueError, "no latitude data",
|
|
271
|
+
output.reshape_lat_coeff_data,
|
|
272
|
+
*[self.coord_dict, self.data_dict, 0])
|
|
273
|
+
return
|
|
274
|
+
|
|
275
|
+
def test_reshape_lat_coeff_data_bad_shaped_lat_bins(self):
|
|
276
|
+
"""Test ValueError with badly formed lat bins when reshaping data."""
|
|
277
|
+
# Initalize the dicts
|
|
278
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
279
|
+
|
|
280
|
+
# Update the dicts and reset the magnetic latitude bins
|
|
281
|
+
self.update_new_data()
|
|
282
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
283
|
+
self.data_dict)
|
|
284
|
+
self.data_dict['mlat'][1][0][0] -= 0.1
|
|
285
|
+
|
|
286
|
+
# Run and evaluate the error raised
|
|
287
|
+
self.assertRaisesRegex(ValueError, "badly shaped latitude bins",
|
|
288
|
+
output.reshape_lat_coeff_data,
|
|
289
|
+
*[self.coord_dict, self.data_dict, 6])
|
|
290
|
+
return
|
|
291
|
+
|
|
292
|
+
def test_reshape_lat_coeff_data_inconsistent_lat_bins(self):
|
|
293
|
+
"""Test ValueError with inconsistent lat bins when reshaping data."""
|
|
294
|
+
# Initalize the dicts
|
|
295
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
296
|
+
|
|
297
|
+
# Update the dicts and reset the magnetic latitude bins
|
|
298
|
+
self.update_new_data(inc=0)
|
|
299
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
300
|
+
self.data_dict)
|
|
301
|
+
self.update_new_data(inc=1)
|
|
302
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
303
|
+
self.data_dict)
|
|
304
|
+
self.data_dict['mlat'][1][-1] = np.arange(59.0, 90.0, 2.0)
|
|
305
|
+
|
|
306
|
+
# Run and evaluate the error raised
|
|
307
|
+
self.assertRaisesRegex(ValueError, "inconsistent latitude increments",
|
|
308
|
+
output.reshape_lat_coeff_data,
|
|
309
|
+
*[self.coord_dict, self.data_dict, 6])
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
def test_reshape_lat_coeff_data_bad_coeff_order(self):
|
|
313
|
+
"""Test ValueError with badly ordered coeff data when reshaping data."""
|
|
314
|
+
# Initalize the dicts
|
|
315
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
316
|
+
|
|
317
|
+
# Update the dicts and reset the coefficient data
|
|
318
|
+
self.update_new_data()
|
|
319
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
320
|
+
self.data_dict)
|
|
321
|
+
self.data_dict['po_params'] = list(self.data_dict['po_params'])
|
|
322
|
+
self.data_dict['po_params'][1][0] = self.data_dict['po_params'][1][
|
|
323
|
+
0].transpose()
|
|
324
|
+
self.data_dict['po_params'][0] = (self.data_dict['po_params'][0][0],
|
|
325
|
+
self.data_dict['po_params'][0][2],
|
|
326
|
+
self.data_dict['po_params'][0][1])
|
|
327
|
+
self.data_dict['po_params'] = tuple(self.data_dict['po_params'])
|
|
328
|
+
|
|
329
|
+
# Run and evaluate the error raised
|
|
330
|
+
self.assertRaisesRegex(ValueError,
|
|
331
|
+
"unexpected dimension order for coefficients",
|
|
332
|
+
output.reshape_lat_coeff_data,
|
|
333
|
+
*[self.coord_dict, self.data_dict, 6])
|
|
334
|
+
return
|
|
335
|
+
|
|
336
|
+
def test_reshape_lat_coeff_data_bad_coeff_dims(self):
|
|
337
|
+
"""Test ValueError with badly shaped coeff dims when reshaping data."""
|
|
338
|
+
# Initalize the dicts
|
|
339
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
340
|
+
|
|
341
|
+
# Update the dicts and reset the coefficient data
|
|
342
|
+
self.update_new_data()
|
|
343
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
344
|
+
self.data_dict)
|
|
345
|
+
self.data_dict['po_params'] = list(self.data_dict['po_params'])
|
|
346
|
+
self.data_dict['po_params'][0] = (self.data_dict['po_params'][0][0],
|
|
347
|
+
self.data_dict['po_params'][0][2])
|
|
348
|
+
self.data_dict['po_params'] = tuple(self.data_dict['po_params'])
|
|
349
|
+
|
|
350
|
+
# Run and evaluate the error raised
|
|
351
|
+
self.assertRaisesRegex(ValueError,
|
|
352
|
+
"unexpected dimension order for coefficients",
|
|
353
|
+
output.reshape_lat_coeff_data,
|
|
354
|
+
*[self.coord_dict, self.data_dict, 6])
|
|
355
|
+
return
|
|
356
|
+
|
|
357
|
+
def test_reshape_lat_coeff_data(self):
|
|
358
|
+
"""Test success when reshaping data."""
|
|
359
|
+
|
|
360
|
+
for hemi in [-1, 1]:
|
|
361
|
+
# Initalize the dicts
|
|
362
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
363
|
+
|
|
364
|
+
with self.subTest(hemisphere=hemi):
|
|
365
|
+
# Update the dicts
|
|
366
|
+
self.update_new_data(inc=0, hemi=hemi)
|
|
367
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
368
|
+
self.data_dict)
|
|
369
|
+
self.update_new_data(inc=1, hemi=hemi)
|
|
370
|
+
output.update_boundary_dicts(self.new_data, self.coord_dict,
|
|
371
|
+
self.data_dict)
|
|
372
|
+
|
|
373
|
+
# Run to reshape the output
|
|
374
|
+
mlat_bins, self.data_dict = output.reshape_lat_coeff_data(
|
|
375
|
+
self.coord_dict, self.data_dict, 6)
|
|
376
|
+
|
|
377
|
+
# Evaluate the latitude bins
|
|
378
|
+
self.assertTrue(np.all(mlat_bins == self.new_data['mlat']),
|
|
379
|
+
msg="unexpected magnetic latitude bins")
|
|
380
|
+
|
|
381
|
+
# Evaluate the data output
|
|
382
|
+
for key in self.data_dict.keys():
|
|
383
|
+
self.assertTrue(isinstance(self.data_dict[key], tuple))
|
|
384
|
+
|
|
385
|
+
dims = list(self.data_dict[key][0])
|
|
386
|
+
self.assertEqual(
|
|
387
|
+
len(dims), len(np.array(
|
|
388
|
+
self.data_dict[key][1]).shape),
|
|
389
|
+
msg="Unexpected shape for {:}".format(key))
|
|
390
|
+
return
|
|
391
|
+
|
|
392
|
+
def test_convert_boundary_dict_empty(self):
|
|
393
|
+
"""Test creation of an empty Dataset without full coordinates."""
|
|
394
|
+
# Initalize the dicts
|
|
395
|
+
self.coord_dict, self.data_dict = output.init_boundary_dicts()
|
|
396
|
+
|
|
397
|
+
# Create a dataset
|
|
398
|
+
self.dataset = output.convert_boundary_dict(self.coord_dict,
|
|
399
|
+
self.data_dict, 0)
|
|
400
|
+
|
|
401
|
+
# Evalute the empty dataset
|
|
402
|
+
self.assertEqual(len(self.dataset.dims), 0)
|
|
403
|
+
self.assertEqual(len(self.dataset.data_vars), 0)
|
|
404
|
+
return
|
|
405
|
+
|
|
406
|
+
def test_convert_boundary_dict(self):
|
|
407
|
+
"""Test creation of a Dataset from the boundary dicts."""
|
|
408
|
+
# Cycle through the coordinate options
|
|
409
|
+
for self.opt_coords in [None, {'test': 'test_value'}]:
|
|
410
|
+
with self.subTest(opt_coords=self.opt_coords):
|
|
411
|
+
|
|
412
|
+
# Cycle through the attribute options
|
|
413
|
+
for self.attr_dict in [None, {'test': 'test_attr'}]:
|
|
414
|
+
with self.subTest(attrs=self.attr_dict):
|
|
415
|
+
# Initalize the dicts
|
|
416
|
+
(self.coord_dict,
|
|
417
|
+
self.data_dict) = output.init_boundary_dicts(
|
|
418
|
+
opt_coords=self.opt_coords, lat_dim=self.lat_dim)
|
|
419
|
+
|
|
420
|
+
# Update the dicts
|
|
421
|
+
self.update_new_data()
|
|
422
|
+
output.update_boundary_dicts(self.new_data,
|
|
423
|
+
self.coord_dict,
|
|
424
|
+
self.data_dict)
|
|
425
|
+
|
|
426
|
+
# Create a dataset
|
|
427
|
+
self.dataset = output.convert_boundary_dict(
|
|
428
|
+
self.coord_dict, self.data_dict, 6,
|
|
429
|
+
lat_dim=self.lat_dim, attr_dict=self.attr_dict)
|
|
430
|
+
|
|
431
|
+
# Evalute the dataset
|
|
432
|
+
self.eval_dataset()
|
|
433
|
+
return
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
#
|
|
4
|
+
# DISTRIBUTION STATEMENT A: Approved for public release. Distribution is
|
|
5
|
+
# unlimited.
|
|
6
|
+
# -----------------------------------------------------------------------------
|
|
7
|
+
"""pyIntensityFeatures utilities."""
|
|
8
|
+
|
|
9
|
+
from pyIntensityFeatures.utils import checks # noqa F401
|
|
10
|
+
from pyIntensityFeatures.utils import coords # noqa F401
|
|
11
|
+
from pyIntensityFeatures.utils import distributions # noqa F401
|
|
12
|
+
from pyIntensityFeatures.utils import grids # noqa F401
|
|
13
|
+
from pyIntensityFeatures.utils import output # noqa F401
|