pypromice 1.5.1__tar.gz → 1.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pypromice might be problematic. Click here for more details.

Files changed (60) hide show
  1. {pypromice-1.5.1/src/pypromice.egg-info → pypromice-1.5.3}/PKG-INFO +3 -2
  2. {pypromice-1.5.1 → pypromice-1.5.3}/setup.py +1 -1
  3. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/L0toL1.py +122 -94
  4. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/L1toL2.py +150 -165
  5. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/L2toL3.py +5 -2
  6. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/resample.py +2 -2
  7. pypromice-1.5.3/src/pypromice/process/wind.py +66 -0
  8. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/tx.py +4 -0
  9. {pypromice-1.5.1 → pypromice-1.5.3/src/pypromice.egg-info}/PKG-INFO +3 -2
  10. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice.egg-info/SOURCES.txt +1 -0
  11. {pypromice-1.5.1 → pypromice-1.5.3}/LICENSE.txt +0 -0
  12. {pypromice-1.5.1 → pypromice-1.5.3}/MANIFEST.in +0 -0
  13. {pypromice-1.5.1 → pypromice-1.5.3}/README.md +0 -0
  14. {pypromice-1.5.1 → pypromice-1.5.3}/setup.cfg +0 -0
  15. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/__init__.py +0 -0
  16. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/get/__init__.py +0 -0
  17. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/get/get.py +0 -0
  18. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/get/get_promice_data.py +0 -0
  19. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/__init__.py +0 -0
  20. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/bufr_to_csv.py +0 -0
  21. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/bufr_utilities.py +0 -0
  22. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/create_bufr_files.py +0 -0
  23. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/get_bufr.py +0 -0
  24. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/make_metadata_csv.py +0 -0
  25. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/positions_seed.csv +0 -0
  26. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/postprocess/real_time_utilities.py +0 -0
  27. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/__init__.py +0 -0
  28. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/aws.py +0 -0
  29. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/get_l2.py +0 -0
  30. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/get_l2tol3.py +0 -0
  31. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/join_l2.py +0 -0
  32. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/join_l3.py +0 -0
  33. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/load.py +0 -0
  34. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/utilities.py +0 -0
  35. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/value_clipping.py +0 -0
  36. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/process/write.py +0 -0
  37. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/__init__.py +0 -0
  38. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/github_data_issues.py +0 -0
  39. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/percentiles/__init__.py +0 -0
  40. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/percentiles/compute_thresholds.py +0 -0
  41. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/percentiles/outlier_detector.py +0 -0
  42. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/percentiles/thresholds.csv +0 -0
  43. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/qc/persistence.py +0 -0
  44. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/resources/__init__.py +0 -0
  45. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/resources/file_attributes.csv +0 -0
  46. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/resources/variable_aliases_GC-Net.csv +0 -0
  47. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/resources/variables.csv +0 -0
  48. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/station_configuration.py +0 -0
  49. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/__init__.py +0 -0
  50. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/get_l0tx.py +0 -0
  51. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/get_msg.py +0 -0
  52. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/payload_formats.csv +0 -0
  53. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/tx/payload_types.csv +0 -0
  54. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/utilities/__init__.py +0 -0
  55. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/utilities/dependency_graph.py +0 -0
  56. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice/utilities/git.py +0 -0
  57. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice.egg-info/dependency_links.txt +0 -0
  58. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice.egg-info/entry_points.txt +0 -0
  59. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice.egg-info/requires.txt +0 -0
  60. {pypromice-1.5.1 → pypromice-1.5.3}/src/pypromice.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: pypromice
3
- Version: 1.5.1
3
+ Version: 1.5.3
4
4
  Summary: PROMICE/GC-Net data processing toolbox
5
5
  Home-page: https://github.com/GEUS-Glaciology-and-Climate/pypromice
6
6
  Author: GEUS Glaciology and Climate
@@ -34,6 +34,7 @@ Dynamic: description
34
34
  Dynamic: description-content-type
35
35
  Dynamic: home-page
36
36
  Dynamic: keywords
37
+ Dynamic: license-file
37
38
  Dynamic: project-url
38
39
  Dynamic: requires-dist
39
40
  Dynamic: requires-python
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setuptools.setup(
7
7
  name="pypromice",
8
- version="1.5.1",
8
+ version="1.5.3",
9
9
  author="GEUS Glaciology and Climate",
10
10
  description="PROMICE/GC-Net data processing toolbox",
11
11
  long_description=long_description,
@@ -7,6 +7,7 @@ import pandas as pd
7
7
  import xarray as xr
8
8
  import re, logging
9
9
  from pypromice.process.value_clipping import clip_values
10
+ from pypromice.process import wind
10
11
  logger = logging.getLogger(__name__)
11
12
 
12
13
 
@@ -23,12 +24,12 @@ def toL1(L0, vars_df, T_0=273.15, tilt_threshold=-100):
23
24
  Air temperature for sonic ranger adjustment
24
25
  tilt_threshold : int
25
26
  Tilt-o-meter threshold for valid measurements
26
-
27
+
27
28
  Returns
28
29
  -------
29
30
  ds : xarray.Dataset
30
31
  Level 1 dataset
31
- '''
32
+ '''
32
33
  assert(type(L0) == xr.Dataset)
33
34
  ds = L0
34
35
  ds.attrs['level'] = 'L1'
@@ -48,7 +49,7 @@ def toL1(L0, vars_df, T_0=273.15, tilt_threshold=-100):
48
49
  # If we do not want to shift hourly average values back -1 hr, then comment the following line.
49
50
  ds = addTimeShift(ds, vars_df)
50
51
 
51
- if hasattr(ds, 'dsr_eng_coef'):
52
+ if hasattr(ds, 'dsr_eng_coef'):
52
53
  ds['dsr'] = (ds['dsr'] * 10) / ds.attrs['dsr_eng_coef'] # Convert radiation from engineering to physical units
53
54
  if hasattr(ds, 'usr_eng_coef'): # TODO add metadata to indicate whether radiometer values are corrected with calibration values or not
54
55
  ds['usr'] = (ds['usr'] * 10) / ds.attrs['usr_eng_coef']
@@ -58,10 +59,10 @@ def toL1(L0, vars_df, T_0=273.15, tilt_threshold=-100):
58
59
  ds['ulr'] = ((ds['ulr'] * 10) / ds.attrs['ulr_eng_coef']) + 5.67E-8*(ds['t_rad'] + T_0)**4
59
60
 
60
61
  ds['z_boom_u'] = _reformatArray(ds['z_boom_u']) # Reformat boom height
61
-
62
+
62
63
  ds['t_u_interp'] = interpTemp(ds['t_u'], vars_df)
63
- ds['z_boom_u'] = ds['z_boom_u'] * ((ds['t_u_interp'] + T_0)/T_0)**0.5 # Adjust sonic ranger readings for sensitivity to air temperature
64
-
64
+ ds['z_boom_u'] = ds['z_boom_u'] * ((ds['t_u_interp'] + T_0)/T_0)**0.5 # Adjust sonic ranger readings for sensitivity to air temperature
65
+
65
66
  if ds['gps_lat'].dtype.kind == 'O': # Decode and reformat GPS information
66
67
  if 'NH' in ds['gps_lat'].dropna(dim='time').values[1]:
67
68
  ds = decodeGPS(ds, ['gps_lat','gps_lon','gps_time'])
@@ -73,22 +74,22 @@ def toL1(L0, vars_df, T_0=273.15, tilt_threshold=-100):
73
74
  else:
74
75
  try:
75
76
  ds = decodeGPS(ds, ['gps_lat','gps_lon','gps_time']) # TODO this is a work around specifically for L0 RAW processing for THU_U. Find a way to make this slicker
76
-
77
+
77
78
  except:
78
79
  print('Invalid GPS type {ds["gps_lat"].dtype} for decoding')
79
-
80
+
80
81
  for l in ['gps_lat', 'gps_lon', 'gps_alt','gps_time']:
81
- ds[l] = _reformatArray(ds[l])
82
+ ds[l] = _reformatArray(ds[l])
82
83
 
83
84
  if hasattr(ds, 'latitude') and hasattr(ds, 'longitude'):
84
85
  ds['gps_lat'] = reformatGPS(ds['gps_lat'], ds.attrs['latitude'])
85
86
  ds['gps_lon'] = reformatGPS(ds['gps_lon'], ds.attrs['longitude'])
86
87
 
87
88
  if hasattr(ds, 'logger_type'): # Convert tilt voltage to degrees
88
- if ds.attrs['logger_type'].upper() == 'CR1000':
89
- ds['tilt_x'] = getTiltDegrees(ds['tilt_x'], tilt_threshold)
90
- ds['tilt_y'] = getTiltDegrees(ds['tilt_y'], tilt_threshold)
91
-
89
+ if ds.attrs['logger_type'].upper() == 'CR1000':
90
+ ds['tilt_x'] = getTiltDegrees(ds['tilt_x'], tilt_threshold)
91
+ ds['tilt_y'] = getTiltDegrees(ds['tilt_y'], tilt_threshold)
92
+
92
93
  if hasattr(ds, 'tilt_y_factor'): # Apply tilt factor (e.g. -1 will invert tilt angle)
93
94
  ds['tilt_y'] = ds['tilt_y']*ds.attrs['tilt_y_factor']
94
95
 
@@ -97,39 +98,66 @@ def toL1(L0, vars_df, T_0=273.15, tilt_threshold=-100):
97
98
  # since we interpolate above in _getTiltDegrees. PJW
98
99
  ds['tilt_x'] = smoothTilt(ds['tilt_x'], 7) # Smooth tilt
99
100
  ds['tilt_y'] = smoothTilt(ds['tilt_y'], 7)
100
-
101
- if hasattr(ds, 'bedrock'): # Fix tilt to zero if station is on bedrock
102
- if ds.attrs['bedrock']==True or ds.attrs['bedrock'].lower() in 'true':
103
- ds.attrs['bedrock'] = True # ensures all AWS objects have a 'bedrock' attribute
104
- ds['tilt_x'] = (('time'), np.arange(ds['time'].size)*0)
105
- ds['tilt_y'] = (('time'), np.arange(ds['time'].size)*0)
106
- else:
107
- ds.attrs['bedrock'] = False # ensures all AWS objects have a 'bedrock' attribute
108
- else:
109
- ds.attrs['bedrock'] = False # ensures all AWS objects have a 'bedrock' attribute
110
-
101
+
102
+ # Apply wind factor if provided
103
+ # This is in the case of an anemometer rotations improperly translated to wind speed by the logger program
104
+ if hasattr(ds, 'wind_u_coef'):
105
+ logger.info(f'Wind speed correction applied to wspd_u based on factor of {ds.attrs["wind_u_coef"]}')
106
+ ds['wspd_u'] = wind.correct_wind_speed(ds['wspd_u'],
107
+ ds.attrs['wind_u_coef'])
108
+ if hasattr(ds, 'wind_l_coef'):
109
+ logger.info(f'Wind speed correction applied to wspd_u based on factor of {ds.attrs["wind_l_coef"]}')
110
+ ds['wspd_l'] = wind.correct_wind_speed(ds['wspd_l'],
111
+ ds.attrs['wind_l_coef'])
112
+ if hasattr(ds, 'wind_i_coef'):
113
+ logger.info(f'Wind speed correction applied to wspd_u based on factor of {ds.attrs["wind_i_coef"]}')
114
+ ds['wspd_i'] = wind.correct_wind_speed(ds['wspd_i'],
115
+ ds.attrs['wind_i_coef'])
116
+
117
+ # Handle cases where the bedrock attribute is incorrectly set
118
+ if not 'bedrock' in ds.attrs:
119
+ logger.warning('bedrock attribute is not set')
120
+ ds.attrs['bedrock'] = False
121
+ elif not isinstance(ds.attrs['bedrock'], bool):
122
+ logger.warning(f'bedrock attribute is not boolean: {ds.attrs["bedrock"]}')
123
+ ds.attrs['bedrock'] = str(ds.attrs['bedrock']).lower() == 'true'
124
+
125
+ is_bedrock = ds.attrs['bedrock']
126
+
127
+ if is_bedrock:
128
+ # some bedrock stations (e.g. KAN_B) do not have tilt in L0 files
129
+ # we need to create them manually
130
+ for var in ['tilt_x','tilt_y']:
131
+ if var not in ds.data_vars:
132
+ ds[var] = (('time'), np.full(ds['time'].size, np.nan))
133
+
134
+ # WEG_B has a non-null z_pt even though it is a berock station
135
+ if ~ds['z_pt'].isnull().all(): # Calculate pressure transducer fluid density
136
+ ds['z_pt'] = (('time'), np.full(ds['time'].size, np.nan))
137
+ logger.info('Warning: Non-null data for z_pt at a bedrock site')
138
+
111
139
  if ds.attrs['number_of_booms']==1: # 1-boom processing
112
- if ~ds['z_pt'].isnull().all(): # Calculate pressure transducer fluid density
140
+ if ~ds['z_pt'].isnull().all(): # Calculate pressure transducer fluid density
113
141
  if hasattr(ds, 'pt_z_offset'): # Apply SR50 stake offset
114
- ds['z_pt'] = ds['z_pt'] + int(ds.attrs['pt_z_offset'])
115
- ds['z_pt_cor'],ds['z_pt']=getPressDepth(ds['z_pt'], ds['p_u'],
116
- ds.attrs['pt_antifreeze'],
117
- ds.attrs['pt_z_factor'],
118
- ds.attrs['pt_z_coef'],
119
- ds.attrs['pt_z_p_coef'])
120
- ds['z_stake'] = _reformatArray(ds['z_stake']) # Reformat boom height
142
+ ds['z_pt'] = ds['z_pt'] + int(ds.attrs['pt_z_offset'])
143
+ ds['z_pt_cor'],ds['z_pt']=getPressDepth(ds['z_pt'], ds['p_u'],
144
+ ds.attrs['pt_antifreeze'],
145
+ ds.attrs['pt_z_factor'],
146
+ ds.attrs['pt_z_coef'],
147
+ ds.attrs['pt_z_p_coef'])
148
+ ds['z_stake'] = _reformatArray(ds['z_stake']) # Reformat boom height
121
149
  ds['z_stake'] = ds['z_stake'] * ((ds['t_u'] + T_0)/T_0)**0.5 # Adjust sonic ranger readings for sensitivity to air temperature
122
-
150
+
123
151
  elif ds.attrs['number_of_booms']==2: # 2-boom processing
124
- ds['z_boom_l'] = _reformatArray(ds['z_boom_l']) # Reformat boom height
152
+ ds['z_boom_l'] = _reformatArray(ds['z_boom_l']) # Reformat boom height
125
153
  ds['t_l_interp'] = interpTemp(ds['t_l'], vars_df)
126
- ds['z_boom_l'] = ds['z_boom_l'] * ((ds['t_l_interp']+ T_0)/T_0)**0.5 # Adjust sonic ranger readings for sensitivity to air temperature
154
+ ds['z_boom_l'] = ds['z_boom_l'] * ((ds['t_l_interp']+ T_0)/T_0)**0.5 # Adjust sonic ranger readings for sensitivity to air temperature
127
155
 
128
156
  ds = clip_values(ds, vars_df)
129
157
  for key in ['hygroclip_t_offset', 'dsr_eng_coef', 'usr_eng_coef',
130
- 'dlr_eng_coef', 'ulr_eng_coef', 'pt_z_coef', 'pt_z_p_coef',
131
- 'pt_z_factor', 'pt_antifreeze', 'boom_azimuth', 'nodata',
132
- 'conf', 'file']:
158
+ 'dlr_eng_coef', 'ulr_eng_coef', 'wind_u_coef','wind_l_coef',
159
+ 'wind_i_coef', 'pt_z_coef', 'pt_z_p_coef', 'pt_z_factor',
160
+ 'pt_antifreeze', 'boom_azimuth', 'nodata', 'conf', 'file']:
133
161
  ds.attrs.pop(key, None)
134
162
 
135
163
  return ds
@@ -220,10 +248,10 @@ def addTimeShift(ds, vars_df):
220
248
  # ds_out = xr.Dataset(dict(zip(df_out.columns, vals)), attrs=ds.attrs)
221
249
  return ds_out
222
250
 
223
- def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
224
- '''Adjust pressure depth and calculate pressure transducer depth based on
251
+ def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
252
+ '''Adjust pressure depth and calculate pressure transducer depth based on
225
253
  pressure transducer fluid density
226
-
254
+
227
255
  Parameters
228
256
  ----------
229
257
  z_pt : xr.Dataarray
@@ -231,7 +259,7 @@ def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
231
259
  p : xr.Dataarray
232
260
  Air pressure
233
261
  pt_antifreeze : float
234
- Pressure transducer anti-freeze percentage for fluid density
262
+ Pressure transducer anti-freeze percentage for fluid density
235
263
  correction
236
264
  pt_z_factor : float
237
265
  Pressure transducer factor
@@ -239,7 +267,7 @@ def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
239
267
  Pressure transducer coefficient
240
268
  pt_z_p_coef : float
241
269
  Pressure transducer coefficient
242
-
270
+
243
271
  Returns
244
272
  -------
245
273
  z_pt_cor : xr.Dataarray
@@ -247,8 +275,8 @@ def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
247
275
  z_pt : xr.Dataarray
248
276
  Pressure transducer depth
249
277
  '''
250
- # Calculate pressure transducer fluid density
251
- if pt_antifreeze == 50: #TODO: Implement function w/ reference (analytical or from LUT)
278
+ # Calculate pressure transducer fluid density
279
+ if pt_antifreeze == 50: #TODO: Implement function w/ reference (analytical or from LUT)
252
280
  rho_af = 1092 #TODO: Track uncertainty
253
281
  elif pt_antifreeze == 100:
254
282
  rho_af = 1145
@@ -257,19 +285,19 @@ def getPressDepth(z_pt, p, pt_antifreeze, pt_z_factor, pt_z_coef, pt_z_p_coef):
257
285
  logger.info('ERROR: Incorrect metadata: "pt_antifreeze" = ' +
258
286
  f'{pt_antifreeze}. Antifreeze mix only supported at 50% or 100%')
259
287
  # assert(False)
260
-
288
+
261
289
  # Correct pressure depth
262
290
  z_pt_cor = z_pt * pt_z_coef * pt_z_factor * 998.0 / rho_af + 100 * (pt_z_p_coef - p) / (rho_af * 9.81)
263
291
 
264
292
  # Calculate pressure transducer depth
265
293
  z_pt = z_pt * pt_z_coef * pt_z_factor * 998.0 / rho_af
266
-
294
+
267
295
  return z_pt_cor, z_pt
268
296
 
269
297
 
270
298
  def interpTemp(temp, var_configurations, max_interp=pd.Timedelta(12,'h')):
271
299
  '''Clip and interpolate temperature dataset for use in corrections
272
-
300
+
273
301
  Parameters
274
302
  ----------
275
303
  temp : `xarray.DataArray`
@@ -278,7 +306,7 @@ def interpTemp(temp, var_configurations, max_interp=pd.Timedelta(12,'h')):
278
306
  Dataframe to retrieve attribute hi-lo values from for temperature clipping
279
307
  max_interp : `pandas.Timedelta`
280
308
  Maximum time steps to interpolate across. The default is 12 hours.
281
-
309
+
282
310
  Returns
283
311
  -------
284
312
  temp_interp : `xarray.DataArray`
@@ -286,18 +314,18 @@ def interpTemp(temp, var_configurations, max_interp=pd.Timedelta(12,'h')):
286
314
  '''
287
315
  # Determine if upper or lower temperature array
288
316
  var = temp.name.lower()
289
-
317
+
290
318
  # Find range threshold and use it to clip measurements
291
319
  cols = ["lo", "hi", "OOL"]
292
320
  assert set(cols) <= set(var_configurations.columns)
293
321
  variable_limits = var_configurations[cols].dropna(how="all")
294
322
  temp = temp.where(temp >= variable_limits.loc[var,'lo'])
295
323
  temp = temp.where(temp <= variable_limits.loc[var, 'hi'])
296
-
324
+
297
325
  # Drop duplicates and interpolate across NaN values
298
326
  # temp_interp = temp.drop_duplicates(dim='time', keep='first')
299
327
  temp_interp = temp.interpolate_na(dim='time', max_gap=max_interp)
300
-
328
+
301
329
  return temp_interp
302
330
 
303
331
 
@@ -309,7 +337,7 @@ def smoothTilt(tilt, win_size):
309
337
  In Python, this should be
310
338
  dstxy = dstxy.rolling(time=7, win_type='boxcar', center=True).mean()
311
339
  But the EDGE_MIRROR makes it a bit more complicated
312
-
340
+
313
341
  Parameters
314
342
  ----------
315
343
  tilt : xarray.DataArray
@@ -338,9 +366,9 @@ def smoothTilt(tilt, win_size):
338
366
  return tdf_rolling
339
367
 
340
368
  def getTiltDegrees(tilt, threshold):
341
- '''Filter tilt with given threshold, and convert from voltage to degrees.
342
- Voltage-to-degrees converseion is based on the equation in 3.2.9 at
343
- https://essd.copernicus.org/articles/13/3819/2021/#section3
369
+ '''Filter tilt with given threshold, and convert from voltage to degrees.
370
+ Voltage-to-degrees converseion is based on the equation in 3.2.9 at
371
+ https://essd.copernicus.org/articles/13/3819/2021/#section3
344
372
 
345
373
  Parameters
346
374
  ----------
@@ -348,7 +376,7 @@ def getTiltDegrees(tilt, threshold):
348
376
  Array (either 'tilt_x' or 'tilt_y'), tilt values (voltage)
349
377
  threshold : int
350
378
  Values below this threshold (-100) will not be retained.
351
-
379
+
352
380
  Returns
353
381
  -------
354
382
  dst.interpolate_na() : xarray.DataArray
@@ -358,7 +386,7 @@ def getTiltDegrees(tilt, threshold):
358
386
  notOKtilt = (tilt < threshold)
359
387
  OKtilt = (tilt >= threshold)
360
388
  tilt = tilt / 10
361
-
389
+
362
390
  # IDL version:
363
391
  # tiltX = tiltX/10.
364
392
  # tiltnonzero = where(tiltX ne 0 and tiltX gt -40 and tiltX lt 40)
@@ -366,26 +394,26 @@ def getTiltDegrees(tilt, threshold):
366
394
  # tiltY = tiltY/10.
367
395
  # tiltnonzero = where(tiltY ne 0 and tiltY gt -40 and tiltY lt 40)
368
396
  # if n_elements(tiltnonzero) ne 1 then tiltY[tiltnonzero] = tiltY[tiltnonzero]/abs(tiltY[tiltnonzero])*(-0.49*(abs(tiltY[tiltnonzero]))^4 + 3.6*(abs(tiltY[tiltnonzero]))^3 - 10.4*(abs(tiltY[tiltnonzero]))^2 +21.1*(abs(tiltY[tiltnonzero])))
369
-
397
+
370
398
  dst = tilt
371
399
  nz = (dst != 0) & (np.abs(dst) < 40)
372
-
400
+
373
401
  dst = dst.where(~nz, other = dst / np.abs(dst)
374
402
  * (-0.49
375
403
  * (np.abs(dst))**4 + 3.6
376
404
  * (np.abs(dst))**3 - 10.4
377
405
  * (np.abs(dst))**2 + 21.1
378
406
  * (np.abs(dst))))
379
-
407
+
380
408
  # if n_elements(OKtiltX) gt 1 then tiltX[notOKtiltX] = interpol(tiltX[OKtiltX],OKtiltX,notOKtiltX) ; Interpolate over gaps for radiation correction; set to -999 again below.
381
409
  dst = dst.where(~notOKtilt)
382
410
  return dst.interpolate_na(dim='time', use_coordinate=False) #TODO: Filling w/o considering time gaps to re-create IDL/GDL outputs. Should fill with coordinate not False. Also consider 'max_gap' option?
383
411
 
384
-
412
+
385
413
  def decodeGPS(ds, gps_names):
386
- '''Decode GPS information based on names of GPS attributes. This should be
414
+ '''Decode GPS information based on names of GPS attributes. This should be
387
415
  applied if gps information does not consist of float values
388
-
416
+
389
417
  Parameters
390
418
  ----------
391
419
  ds : xr.Dataset
@@ -393,63 +421,63 @@ def decodeGPS(ds, gps_names):
393
421
  gps_names : list
394
422
  Variable names for GPS information, such as "gps_lat", "gps_lon" and
395
423
  "gps_alt"
396
-
424
+
397
425
  Returns
398
426
  -------
399
427
  ds : xr.Dataset
400
428
  Data set with decoded GPS information
401
429
  '''
402
430
  for v in gps_names:
403
- a = ds[v].attrs
431
+ a = ds[v].attrs
404
432
  str2nums = [re.findall(r"[-+]?\d*\.\d+|\d+", _) if isinstance(_, str) else [np.nan] for _ in ds[v].values]
405
433
  ds[v][:] = pd.DataFrame(str2nums).astype(float).T.values[0]
406
434
  ds[v] = ds[v].astype(float)
407
- ds[v].attrs = a
435
+ ds[v].attrs = a
408
436
  return ds
409
437
 
410
438
  def reformatGPS(pos_arr, attrs):
411
439
  '''Correct latitude and longitude from native format to decimal degrees.
412
-
440
+
413
441
  v2 stations should send "NH6429.01544","WH04932.86061" (NUK_L 2022)
414
442
  v3 stations should send coordinates as "6628.93936","04617.59187" (DY2) or 6430,4916 (NUK_Uv3)
415
443
  decodeGPS should have decoded these strings to floats in ddmm.mmmm format
416
444
  v1 stations however only saved decimal minutes (mm.mmmmm) as float<=60. '
417
- In this case, we use the integer part of the latitude given in the config
445
+ In this case, we use the integer part of the latitude given in the config
418
446
  file and append the gps value after it.
419
-
447
+
420
448
  Parameters
421
449
  ----------
422
450
  pos_arr : xr.Dataarray
423
451
  Array of latitude or longitude measured by the GPS
424
452
  attrs : dict
425
- The global attribute 'latitude' or 'longitude' associated with the
426
- file being processed. It is the standard latitude/longitude given in the
453
+ The global attribute 'latitude' or 'longitude' associated with the
454
+ file being processed. It is the standard latitude/longitude given in the
427
455
  config file for that station.
428
-
456
+
429
457
  Returns
430
458
  -------
431
459
  pos_arr : xr.Dataarray
432
460
  Formatted GPS position array in decimal degree
433
- '''
434
- if np.any((pos_arr <= 90) & (pos_arr > 0)):
435
- # then pos_arr is in decimal minutes, so we add to it the integer
461
+ '''
462
+ if np.any((pos_arr <= 90) & (pos_arr > 0)):
463
+ # then pos_arr is in decimal minutes, so we add to it the integer
436
464
  # part of the latitude given in the config file x100
437
465
  # so that it reads ddmm.mmmmmm like for v2 and v3 files
438
466
  # Note that np.sign and np.attrs handles negative longitudes.
439
467
  pos_arr = np.sign(attrs) * (pos_arr + 100*np.floor(np.abs(attrs)))
440
- a = pos_arr.attrs
468
+ a = pos_arr.attrs
441
469
  pos_arr = np.floor(pos_arr / 100) + (pos_arr / 100 - np.floor(pos_arr / 100)) * 100 / 60
442
- pos_arr.attrs = a
443
- return pos_arr
470
+ pos_arr.attrs = a
471
+ return pos_arr
444
472
 
445
473
  def _reformatArray(ds_arr):
446
474
  '''Reformat DataArray values and attributes
447
-
475
+
448
476
  Parameters
449
477
  ----------
450
478
  ds_arr : xr.Dataarray
451
479
  Data array
452
-
480
+
453
481
  Returns
454
482
  -------
455
483
  ds_arr : xr.Dataarray
@@ -458,18 +486,18 @@ def _reformatArray(ds_arr):
458
486
  a = ds_arr.attrs # Store
459
487
  ds_arr.values = pd.to_numeric(ds_arr, errors='coerce')
460
488
  ds_arr.attrs = a # Reformat
461
- return ds_arr
489
+ return ds_arr
462
490
 
463
491
  def _removeVars(ds, v_names):
464
492
  '''Remove redundant variables if present in dataset
465
-
493
+
466
494
  Parameters
467
495
  ----------
468
496
  ds : xr.Dataset
469
497
  Data set
470
498
  v_names : list
471
499
  List of column names to drop
472
-
500
+
473
501
  Returns
474
502
  -------
475
503
  ds : xr.Dataset
@@ -481,7 +509,7 @@ def _removeVars(ds, v_names):
481
509
 
482
510
  def _popCols(ds, booms, data_type, vars_df, cols):
483
511
  '''Populate data array columns with given variable names from look-up table
484
-
512
+
485
513
  Parameters
486
514
  ----------
487
515
  ds : xr.Dataset
@@ -494,7 +522,7 @@ def _popCols(ds, booms, data_type, vars_df, cols):
494
522
  Variables lookup table
495
523
  cols : list
496
524
  Names of columns to populate
497
-
525
+
498
526
  Returns
499
527
  -------
500
528
  ds : xr.Dataset
@@ -505,10 +533,10 @@ def _popCols(ds, booms, data_type, vars_df, cols):
505
533
 
506
534
  elif booms==2:
507
535
  names = vars_df.loc[(vars_df[cols[0]]!='one-boom')]
508
-
536
+
509
537
  for v in list(names.index):
510
538
  if v not in list(ds.variables):
511
- ds[v] = (('time'), np.arange(ds['time'].size)*np.nan)
539
+ ds[v] = (('time'), np.arange(ds['time'].size)*np.nan)
512
540
  return ds
513
541
 
514
542
  # def _popCols(ds, booms, data_type, vars_df, cols):
@@ -517,20 +545,20 @@ def _popCols(ds, booms, data_type, vars_df, cols):
517
545
  # names = vars_df.loc[(vars_df[cols[0]]!='two-boom')]
518
546
  # else:
519
547
  # names = vars_df.loc[(vars_df[cols[0]] != 'two-boom') & vars_df[cols[1]] != 'tx']
520
-
548
+
521
549
  # elif booms==2:
522
550
  # if data_type !='TX':
523
551
  # names = vars_df.loc[(vars_df[cols[0]]!='two-boom')]
524
552
  # else:
525
553
  # names = vars_df.loc[(vars_df[cols[0]] != 'two-boom') & vars_df[cols[1]] != 'tx']
526
-
554
+
527
555
  # for v in list(names.index):
528
556
  # if v not in list(ds.variables):
529
- # ds[v] = (('time'), np.arange(ds['time'].size)*np.nan)
557
+ # ds[v] = (('time'), np.arange(ds['time'].size)*np.nan)
530
558
  # return ds
531
559
 
532
560
  #------------------------------------------------------------------------------
533
561
 
534
- if __name__ == "__main__":
535
- # unittest.main()
536
- pass
562
+ if __name__ == "__main__":
563
+ # unittest.main()
564
+ pass