pypromice 1.4.3__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pypromice might be problematic. Click here for more details.

@@ -13,7 +13,7 @@ import logging
13
13
 
14
14
  logger = logging.getLogger(__name__)
15
15
 
16
- def toL3(L2,
16
+ def toL3(L2,
17
17
  data_adjustments_dir: Path,
18
18
  station_config={},
19
19
  T_0=273.15):
@@ -23,46 +23,46 @@ def toL3(L2,
23
23
  - smoothed and inter/extrapolated GPS coordinates
24
24
  - continuous surface height, ice surface height, snow height
25
25
  - thermistor depths
26
-
27
-
26
+
27
+
28
28
  Parameters
29
29
  ----------
30
30
  L2 : xarray:Dataset
31
31
  L2 AWS data
32
32
  station_config : Dict
33
- Dictionary containing the information necessary for the processing of
33
+ Dictionary containing the information necessary for the processing of
34
34
  L3 variables (relocation dates for coordinates processing, or thermistor
35
35
  string maintenance date for the thermistors depth)
36
- T_0 : int
36
+ T_0 : int
37
37
  Freezing point temperature. Default is 273.15.
38
38
  '''
39
39
  ds = L2
40
40
  ds.attrs['level'] = 'L3'
41
41
 
42
- T_100 = T_0+100 # Get steam point temperature as K
43
-
42
+ T_100 = T_0+100 # Get steam point temperature as K
43
+
44
44
  # Turbulent heat flux calculation
45
45
  if ('t_u' in ds.keys()) and \
46
46
  ('p_u' in ds.keys()) and \
47
- ('rh_u_cor' in ds.keys()):
47
+ ('rh_u_wrt_ice_or_water' in ds.keys()):
48
48
  # Upper boom bulk calculation
49
49
  T_h_u = ds['t_u'].copy() # Copy for processing
50
50
  p_h_u = ds['p_u'].copy()
51
- RH_cor_h_u = ds['rh_u_cor'].copy()
52
-
53
- q_h_u = calculate_specific_humidity(T_0, T_100, T_h_u, p_h_u, RH_cor_h_u) # Calculate specific humidity
51
+ rh_h_u_wrt_ice_or_water = ds['rh_u_wrt_ice_or_water'].copy()
52
+
53
+ q_h_u = calculate_specific_humidity(T_0, T_100, T_h_u, p_h_u, rh_h_u_wrt_ice_or_water) # Calculate specific humidity
54
54
  if ('wspd_u' in ds.keys()) and \
55
55
  ('t_surf' in ds.keys()) and \
56
56
  ('z_boom_u' in ds.keys()):
57
57
  WS_h_u = ds['wspd_u'].copy()
58
58
  Tsurf_h = ds['t_surf'].copy() # T surf from derived upper boom product. TODO is this okay to use with lower boom parameters?
59
- z_WS_u = ds['z_boom_u'].copy() + 0.4 # Get height of Anemometer
60
- z_T_u = ds['z_boom_u'].copy() - 0.1 # Get height of thermometer
61
-
62
- if not ds.attrs['bedrock']:
59
+ z_WS_u = ds['z_boom_u'].copy() + 0.4 # Get height of Anemometer
60
+ z_T_u = ds['z_boom_u'].copy() - 0.1 # Get height of thermometer
61
+
62
+ if not ds.attrs['bedrock']:
63
63
  SHF_h_u, LHF_h_u= calculate_tubulent_heat_fluxes(T_0, T_h_u, Tsurf_h, WS_h_u, # Calculate latent and sensible heat fluxes
64
- z_WS_u, z_T_u, q_h_u, p_h_u)
65
-
64
+ z_WS_u, z_T_u, q_h_u, p_h_u)
65
+
66
66
  ds['dshf_u'] = (('time'), SHF_h_u.data)
67
67
  ds['dlhf_u'] = (('time'), LHF_h_u.data)
68
68
  else:
@@ -71,80 +71,80 @@ def toL3(L2,
71
71
  q_h_u = 1000 * q_h_u # Convert sp.humid from kg/kg to g/kg
72
72
  ds['qh_u'] = (('time'), q_h_u.data)
73
73
  else:
74
- logger.info('t_u, p_u or rh_u_cor missing, cannot calulate tubrulent heat fluxes')
74
+ logger.info('t_u, p_u or rh_u_wrt_ice_or_water missing, cannot calulate tubrulent heat fluxes')
75
75
 
76
76
  # Lower boom bulk calculation
77
77
  if ds.attrs['number_of_booms']==2:
78
78
  if ('t_l' in ds.keys()) and \
79
79
  ('p_l' in ds.keys()) and \
80
- ('rh_l_cor' in ds.keys()):
80
+ ('rh_l_wrt_ice_or_water' in ds.keys()):
81
81
  T_h_l = ds['t_l'].copy() # Copy for processing
82
- p_h_l = ds['p_l'].copy()
83
- RH_cor_h_l = ds['rh_l_cor'].copy()
82
+ p_h_l = ds['p_l'].copy()
83
+ rh_h_l_wrt_ice_or_water = ds['rh_l_wrt_ice_or_water'].copy()
84
84
 
85
- q_h_l = calculate_specific_humidity(T_0, T_100, T_h_l, p_h_l, RH_cor_h_l) # Calculate sp.humidity
85
+ q_h_l = calculate_specific_humidity(T_0, T_100, T_h_l, p_h_l, rh_h_l_wrt_ice_or_water) # Calculate sp.humidity
86
86
 
87
87
  if ('wspd_l' in ds.keys()) and \
88
88
  ('t_surf' in ds.keys()) and \
89
89
  ('z_boom_l' in ds.keys()):
90
- z_WS_l = ds['z_boom_l'].copy() + 0.4 # Get height of W
91
- z_T_l = ds['z_boom_l'].copy() - 0.1 # Get height of thermometer
92
- WS_h_l = ds['wspd_l'].copy()
93
- if not ds.attrs['bedrock']:
94
- SHF_h_l, LHF_h_l= calculate_tubulent_heat_fluxes(T_0, T_h_l, Tsurf_h, WS_h_l, # Calculate latent and sensible heat fluxes
95
- z_WS_l, z_T_l, q_h_l, p_h_l)
96
-
90
+ z_WS_l = ds['z_boom_l'].copy() + 0.4 # Get height of W
91
+ z_T_l = ds['z_boom_l'].copy() - 0.1 # Get height of thermometer
92
+ WS_h_l = ds['wspd_l'].copy()
93
+ if not ds.attrs['bedrock']:
94
+ SHF_h_l, LHF_h_l= calculate_tubulent_heat_fluxes(T_0, T_h_l, Tsurf_h, WS_h_l, # Calculate latent and sensible heat fluxes
95
+ z_WS_l, z_T_l, q_h_l, p_h_l)
96
+
97
97
  ds['dshf_l'] = (('time'), SHF_h_l.data)
98
98
  ds['dlhf_l'] = (('time'), LHF_h_l.data)
99
99
  else:
100
100
  logger.info('wspd_l, t_surf or z_boom_l missing, cannot calulate tubrulent heat fluxes')
101
-
101
+
102
102
  q_h_l = 1000 * q_h_l # Convert sp.humid from kg/kg to g/kg
103
103
  ds['qh_l'] = (('time'), q_h_l.data)
104
104
  else:
105
- logger.info('t_l, p_l or rh_l_cor missing, cannot calulate tubrulent heat fluxes')
105
+ logger.info('t_l, p_l or rh_l_wrt_ice_or_water missing, cannot calulate tubrulent heat fluxes')
106
106
 
107
107
  if len(station_config)==0:
108
108
  logger.warning('\n***\nThe station configuration file is missing or improperly passed to pypromice. Some processing steps might fail.\n***\n')
109
-
110
- # Smoothing and inter/extrapolation of GPS coordinates
109
+
110
+ # Smoothing and inter/extrapolation of GPS coordinates
111
111
  for var in ['gps_lat', 'gps_lon', 'gps_alt']:
112
112
  ds[var.replace('gps_','')] = ('time', gps_coordinate_postprocessing(ds, var, station_config))
113
-
113
+
114
114
  # processing continuous surface height, ice surface height, snow height
115
115
  try:
116
116
  ds = process_surface_height(ds, data_adjustments_dir, station_config)
117
117
  except Exception as e:
118
118
  logger.error("Error processing surface height at %s"%L2.attrs['station_id'])
119
119
  logging.error(e, exc_info=True)
120
-
120
+
121
121
  # making sure dataset has the attributes contained in the config files
122
122
  if 'project' in station_config.keys():
123
123
  ds.attrs['project'] = station_config['project']
124
124
  else:
125
125
  logger.error('No project info in station_config. Using \"PROMICE\".')
126
126
  ds.attrs['project'] = "PROMICE"
127
-
127
+
128
128
  if 'location_type' in station_config.keys():
129
129
  ds.attrs['location_type'] = station_config['location_type']
130
130
  else:
131
131
  logger.error('No project info in station_config. Using \"ice sheet\".')
132
132
  ds.attrs['location_type'] = "ice sheet"
133
-
133
+
134
134
  return ds
135
135
 
136
136
 
137
137
  def process_surface_height(ds, data_adjustments_dir, station_config={}):
138
138
  """
139
- Process surface height data for different site types and create
139
+ Process surface height data for different site types and create
140
140
  surface height variables.
141
141
 
142
142
  Parameters
143
143
  ----------
144
144
  ds : xarray.Dataset
145
145
  The dataset containing various measurements and attributes including
146
- 'site_type' which determines the type of site (e.g., 'ablation',
147
- 'accumulation', 'bedrock') and other relevant data variables such as
146
+ 'site_type' which determines the type of site (e.g., 'ablation',
147
+ 'accumulation', 'bedrock') and other relevant data variables such as
148
148
  'z_boom_u', 'z_stake', 'z_pt_cor', etc.
149
149
 
150
150
  Returns
@@ -164,18 +164,18 @@ def process_surface_height(ds, data_adjustments_dir, station_config={}):
164
164
  if ds.z_stake.notnull().any():
165
165
  first_valid_index = ds.time.where((ds.z_stake + ds.z_boom_u).notnull(), drop=True).data[0]
166
166
  ds['z_surf_2'] = ds.z_surf_1.sel(time=first_valid_index) + ds.z_stake.sel(time=first_valid_index) - ds['z_stake']
167
-
167
+
168
168
  # Use corrected point data if available
169
169
  if 'z_pt_cor' in ds.data_vars:
170
170
  ds['z_ice_surf'] = ('time', ds['z_pt_cor'].data)
171
-
171
+
172
172
  else:
173
173
  # Calculate surface heights for other site types
174
174
  first_valid_index = ds.time.where(ds.z_boom_u.notnull(), drop=True).data[0]
175
175
  ds['z_surf_1'] = ds.z_boom_u.sel(time=first_valid_index) - ds['z_boom_u']
176
176
  if 'z_stake' in ds.data_vars and ds.z_stake.notnull().any():
177
177
  first_valid_index = ds.time.where(ds.z_stake.notnull(), drop=True).data[0]
178
- ds['z_surf_2'] = ds.z_stake.sel(time=first_valid_index) - ds['z_stake']
178
+ ds['z_surf_2'] = ds.z_stake.sel(time=first_valid_index) - ds['z_stake']
179
179
  if 'z_boom_l' in ds.data_vars:
180
180
  # need a combine first because KAN_U switches from having a z_stake
181
181
  # to having a z_boom_l
@@ -191,7 +191,7 @@ def process_surface_height(ds, data_adjustments_dir, station_config={}):
191
191
 
192
192
  (ds['z_surf_combined'], ds['z_ice_surf'],
193
193
  ds['z_surf_1_adj'], ds['z_surf_2_adj']) = combine_surface_height(df_in, ds.attrs['site_type'])
194
-
194
+
195
195
 
196
196
  if ds.attrs['site_type'] == 'ablation':
197
197
  # Calculate rolling minimum for ice surface height and snow height
@@ -217,22 +217,22 @@ def process_surface_height(ds, data_adjustments_dir, station_config={}):
217
217
  z_ice_surf = (ts_interpolated
218
218
  .rolling('1D', center=True, min_periods=1)
219
219
  .median())
220
-
220
+
221
221
  z_ice_surf = z_ice_surf.loc[ds.time]
222
222
  # here we make sure that the periods where both z_stake and z_pt are
223
223
  # missing are also missing in z_ice_surf
224
224
  msk = ds['z_ice_surf'].notnull() | ds['z_surf_2_adj'].notnull()
225
- z_ice_surf = z_ice_surf.where(msk)
226
-
225
+ z_ice_surf = z_ice_surf.where(msk)
226
+
227
227
  # taking running minimum to get ice
228
228
  z_ice_surf = z_ice_surf.cummin()
229
229
 
230
230
  # filling gaps only if they are less than a year long and if values on both
231
231
  # sides are less than 0.01 m appart
232
-
232
+
233
233
  # Forward and backward fill to identify bounds of gaps
234
234
  df_filled = z_ice_surf.fillna(method='ffill').fillna(method='bfill')
235
-
235
+
236
236
  # Identify gaps and their start and end dates
237
237
  gaps = pd.DataFrame(index=z_ice_surf[z_ice_surf.isna()].index)
238
238
  gaps['prev_value'] = df_filled.shift(1)
@@ -241,17 +241,17 @@ def process_surface_height(ds, data_adjustments_dir, station_config={}):
241
241
  gaps['gap_end'] = gaps.index.to_series().shift(-1)
242
242
  gaps['gap_duration'] = (gaps['gap_end'] - gaps['gap_start']).dt.days
243
243
  gaps['value_diff'] = (gaps['next_value'] - gaps['prev_value']).abs()
244
-
244
+
245
245
  # Determine which gaps to fill
246
246
  mask = (gaps['gap_duration'] < 365) & (gaps['value_diff'] < 0.01)
247
247
  gaps_to_fill = gaps[mask].index
248
-
248
+
249
249
  # Fill gaps in the original Series
250
250
  z_ice_surf.loc[gaps_to_fill] = df_filled.loc[gaps_to_fill]
251
-
251
+
252
252
  # bringing the variable into the dataset
253
253
  ds['z_ice_surf'] = z_ice_surf
254
-
254
+
255
255
  ds['z_surf_combined'] = np.maximum(ds['z_surf_combined'], ds['z_ice_surf'])
256
256
  ds['snow_height'] = np.maximum(0, ds['z_surf_combined'] - ds['z_ice_surf'])
257
257
  ds['z_ice_surf'] = ds['z_ice_surf'].where(ds.snow_height.notnull())
@@ -274,43 +274,43 @@ def process_surface_height(ds, data_adjustments_dir, station_config={}):
274
274
  station_config)
275
275
  for var in df_out.columns:
276
276
  ds[var] = ('time', df_out[var].values)
277
-
277
+
278
278
  return ds
279
279
 
280
280
  def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
281
281
  '''Combines the data from three sensor: the two sonic rangers and the
282
282
  pressure transducer, to recreate the surface height, the ice surface height
283
283
  and the snow depth through the years. For the accumulation sites, it is
284
- only the average of the two sonic rangers (after manual adjustments to
285
- correct maintenance shifts). For the ablation sites, first an ablation
284
+ only the average of the two sonic rangers (after manual adjustments to
285
+ correct maintenance shifts). For the ablation sites, first an ablation
286
286
  period is estimated each year (either the period when z_pt_cor decreases
287
- or JJA if no better estimate) then different adjustmnents are conducted
287
+ or JJA if no better estimate) then different adjustmnents are conducted
288
288
  to stitch the three time series together: z_ice_surface (adjusted from
289
289
  z_pt_cor) or if unvailable, z_surf_2 (adjusted from z_stake)
290
290
  are used in the ablation period while an average of z_surf_1 and z_surf_2
291
- are used otherwise, after they are being adjusted to z_ice_surf at the end
291
+ are used otherwise, after they are being adjusted to z_ice_surf at the end
292
292
  of the ablation season.
293
-
293
+
294
294
  Parameters
295
295
  ----------
296
296
  df : pandas.dataframe
297
297
  Dataframe with datetime index and variables z_surf_1, z_surf_2 and z_ice_surf
298
- site_type : str
298
+ site_type : str
299
299
  Either 'accumulation' or 'ablation'
300
300
  threshold_ablation : float
301
301
  Threshold to which a z_pt_cor hourly decrease is compared. If the decrease
302
302
  is higher, then there is ablation.
303
303
  '''
304
304
  logger.info('Combining surface height')
305
-
305
+
306
306
  if 'z_surf_2' not in df.columns:
307
307
  logger.info('-> did not find z_surf_2')
308
308
  df["z_surf_2"] = df["z_surf_1"].values*np.nan
309
-
309
+
310
310
  if 'z_ice_surf' not in df.columns:
311
311
  logger.info('-> did not find z_ice_surf')
312
312
  df["z_ice_surf"] = df["z_surf_1"].values*np.nan
313
-
313
+
314
314
  if site_type in ['accumulation', 'bedrock']:
315
315
  logger.info('-> no z_pt or accumulation site: averaging z_surf_1 and z_surf_2')
316
316
  df["z_surf_1_adj"] = hampel(df["z_surf_1"].interpolate(limit=72)).values
@@ -326,7 +326,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
326
326
  df['z_surf_combined'] = df.z_surf_2_adj.values
327
327
 
328
328
  # df["z_surf_combined"] = hampel(df["z_surf_combined"].interpolate(limit=72)).values
329
- return (df['z_surf_combined'], df["z_surf_combined"]*np.nan,
329
+ return (df['z_surf_combined'], df["z_surf_combined"]*np.nan,
330
330
  df["z_surf_1_adj"], df["z_surf_2_adj"])
331
331
 
332
332
  else:
@@ -340,7 +340,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
340
340
  df["z_surf_2_adj"] = hampel(df["z_surf_2"].interpolate(limit=72), k=24, t0=5).values
341
341
 
342
342
  # defining ice ablation period from the decrease of a smoothed version of z_pt
343
- # meaning when smoothed_z_pt.diff() < threshold_ablation
343
+ # meaning when smoothed_z_pt.diff() < threshold_ablation
344
344
  # first smoothing
345
345
  smoothed_PT = (df['z_ice_surf']
346
346
  .resample('h')
@@ -354,14 +354,14 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
354
354
  # smoothed_PT.loc[df.z_ice_surf.isnull()] = np.nan
355
355
 
356
356
  # logical index where ablation is detected
357
- ind_ablation = np.logical_and(smoothed_PT.diff().values < threshold_ablation,
357
+ ind_ablation = np.logical_and(smoothed_PT.diff().values < threshold_ablation,
358
358
  np.isin(smoothed_PT.diff().index.month, [6, 7, 8, 9]))
359
359
 
360
360
 
361
361
  # finding the beginning and end of each period with True
362
362
  idx = np.argwhere(np.diff(np.r_[False,ind_ablation, False])).reshape(-1, 2)
363
363
  idx[:, 1] -= 1
364
-
364
+
365
365
  # fill small gaps in the ice ablation periods.
366
366
  for i in range(len(idx)-1):
367
367
  ind = idx[i]
@@ -371,20 +371,20 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
371
371
  # season
372
372
  if df.index[ind_next[0]]-df.index[ind[1]]<pd.to_timedelta('60 days'):
373
373
  ind_ablation[ind[1]:ind_next[0]]=True
374
-
374
+
375
375
  # finding the beginning and end of each period with True
376
376
  idx = np.argwhere(np.diff(np.r_[False,ind_ablation, False])).reshape(-1, 2)
377
377
  idx[:, 1] -= 1
378
-
379
- # because the smooth_PT sees 7 days ahead, it starts showing a decline
380
- # 7 days in advance, we therefore need to exclude the first 7 days of
378
+
379
+ # because the smooth_PT sees 7 days ahead, it starts showing a decline
380
+ # 7 days in advance, we therefore need to exclude the first 7 days of
381
381
  # each ablation period
382
382
  for start, end in idx:
383
383
  period_start = df.index[start]
384
384
  period_end = period_start + pd.Timedelta(days=7)
385
385
  exclusion_period = (df.index >= period_start) & (df.index < period_end)
386
386
  ind_ablation[exclusion_period] = False
387
-
387
+
388
388
  hs1=df["z_surf_1_adj"].interpolate(limit=24*2).copy()
389
389
  hs2=df["z_surf_2_adj"].interpolate(limit=24*2).copy()
390
390
  z=df["z_ice_surf_adj"].interpolate(limit=24*2).copy()
@@ -397,9 +397,9 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
397
397
 
398
398
  if any(~np.isnan(hs2.iloc[:24*7])) & any(~np.isnan(hs1.iloc[:24*7])):
399
399
  hs2 = hs2 + hs1.iloc[:24*7].mean() - hs2.iloc[:24*7].mean()
400
-
400
+
401
401
  if any(~np.isnan(z.iloc[:24*7])):
402
- # expressing ice surface height relative to its mean value in the
402
+ # expressing ice surface height relative to its mean value in the
403
403
  # first week of the record
404
404
  z = z - z.iloc[:24*7].mean()
405
405
  elif z.notnull().any():
@@ -414,16 +414,16 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
414
414
  z.first_valid_index():(z.first_valid_index()+pd.to_timedelta('14D'))
415
415
  ].mean() + hs1.iloc[:24*7].mean()
416
416
  else:
417
- # if there is more than a year (actually 251 days) between the
417
+ # if there is more than a year (actually 251 days) between the
418
418
  # initiation of the AWS and the installation of the pressure transducer
419
419
  # we remove the intercept in the pressure transducer data.
420
- # Removing the intercept
420
+ # Removing the intercept
421
421
  # means that we consider the ice surface height at 0 when the AWS
422
422
  # is installed, and not when the pressure transducer is installed.
423
423
  Y = z.iloc[:].values.reshape(-1, 1)
424
- X = z.iloc[~np.isnan(Y)].index.astype(np.int64).values.reshape(-1, 1)
425
- Y = Y[~np.isnan(Y)]
426
- linear_regressor = LinearRegression()
424
+ X = z.iloc[~np.isnan(Y)].index.astype(np.int64).values.reshape(-1, 1)
425
+ Y = Y[~np.isnan(Y)]
426
+ linear_regressor = LinearRegression()
427
427
  linear_regressor.fit(X, Y)
428
428
  Y_pred = linear_regressor.predict(z.index.astype(np.int64).values.reshape(-1, 1) )
429
429
  z = z-Y_pred[0]
@@ -444,7 +444,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
444
444
  ind_abl_yr = np.logical_and(ind_yr, df.index.month.isin([6,7,8]))
445
445
  ind_ablation[ind_yr] = ind_abl_yr[ind_yr]
446
446
  logger.debug(str(y)+' no z_ice_surf, just using JJA')
447
-
447
+
448
448
  else:
449
449
  logger.debug(str(y)+ ' derived from z_ice_surf')
450
450
 
@@ -494,7 +494,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
494
494
 
495
495
  if all(np.isnan(z_jja)) and any(~np.isnan(hs2_jja)):
496
496
  # if there is no PT for a given year, but there is some hs2
497
- # then z will be adjusted to hs2 next time it is available
497
+ # then z will be adjusted to hs2 next time it is available
498
498
  hs2_ref = 1
499
499
 
500
500
  if all(np.isnan(z_winter)) and all(np.isnan(hs2_winter)):
@@ -518,7 +518,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
518
518
 
519
519
  # in some instances, the PT data is available but no ablation
520
520
  # is recorded, then hs2 remains the reference during that time.
521
- # When eventually there is ablation, then we need to find the
521
+ # When eventually there is ablation, then we need to find the
522
522
  # first index in these preceding ablation-free years
523
523
  # the shift will be applied back from this point
524
524
  # first_index = z[:z[str(y)].first_valid_index()].isnull().iloc[::-1].idxmax()
@@ -538,13 +538,13 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
538
538
  else:
539
539
  logger.debug('adjusting z to hs1')
540
540
  first_index = hs2.iloc[ind_start[i]:].first_valid_index()
541
- z[first_index:] = z[first_index:] - z[first_index] + hs2[first_index]
541
+ z[first_index:] = z[first_index:] - z[first_index] + hs2[first_index]
542
542
  else:
543
- logger.debug('adjusting z to hs1')
544
- z[first_index:] = z[first_index:] - z[first_index] + hs2[first_index]
543
+ logger.debug('adjusting z to hs1')
544
+ z[first_index:] = z[first_index:] - z[first_index] + hs2[first_index]
545
545
  hs2_ref = 0 # from now on PT is the reference
546
-
547
-
546
+
547
+
548
548
  else:
549
549
  # if z_pt is the reference and there is some ablation
550
550
  # then hs1 and hs2 are adjusted to z_pt
@@ -560,7 +560,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
560
560
  [first_index,
561
561
  hs2_year.first_valid_index()]))
562
562
 
563
- # if PT, hs1 and hs2 are all nan until station is reactivated, then
563
+ # if PT, hs1 and hs2 are all nan until station is reactivated, then
564
564
  first_day_of_year = pd.to_datetime(str(y)+'-01-01')
565
565
 
566
566
  if len(z[first_day_of_year:first_index-pd.to_timedelta('1D')])>0:
@@ -568,8 +568,8 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
568
568
  hs1[first_day_of_year:first_index-pd.to_timedelta('1D')].isnull().all() & \
569
569
  hs2[first_day_of_year:first_index-pd.to_timedelta('1D')].isnull().all():
570
570
  if (~np.isnan(np.nanmean(z[first_index:first_index+pd.to_timedelta('1D')])) \
571
- and ~np.isnan(np.nanmean(hs2[first_index:first_index+pd.to_timedelta('1D')]))):
572
- logger.debug(' ======= adjusting hs1 and hs2 to z_pt')
571
+ and ~np.isnan(np.nanmean(hs2[first_index:first_index+pd.to_timedelta('1D')]))):
572
+ logger.debug(' ======= adjusting hs1 and hs2 to z_pt')
573
573
  if ~np.isnan(np.nanmean(hs1[first_index:first_index+pd.to_timedelta('1D')]) ):
574
574
  hs1[first_index:] = hs1[first_index:] \
575
575
  - np.nanmean(hs1[first_index:first_index+pd.to_timedelta('1D')]) \
@@ -580,23 +580,23 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
580
580
  + np.nanmean(z[first_index:first_index+pd.to_timedelta('1D')])
581
581
 
582
582
  # adjustment taking place at the end of the ablation period
583
- if (ind_end[i] != -999):
583
+ if (ind_end[i] != -999):
584
584
  # if y == 2023:
585
585
  # import pdb; pdb.set_trace()
586
586
  # if there's ablation and
587
587
  # if there are PT data available at the end of the melt season
588
588
  if z.iloc[(ind_end[i]-24*7):(ind_end[i]+24*7)].notnull().any():
589
589
  logger.debug('adjusting hs2 to z')
590
- # then we adjust hs2 to the end-of-ablation z
590
+ # then we adjust hs2 to the end-of-ablation z
591
591
  # first trying at the end of melt season
592
- if ~np.isnan(np.nanmean(hs2.iloc[(ind_end[i]-24*7):(ind_end[i]+24*30)])):
592
+ if ~np.isnan(np.nanmean(hs2.iloc[(ind_end[i]-24*7):(ind_end[i]+24*30)])):
593
593
  logger.debug('using end of melt season')
594
594
  hs2.iloc[ind_end[i]:] = hs2.iloc[ind_end[i]:] - \
595
595
  np.nanmean(hs2.iloc[(ind_end[i]-24*7):(ind_end[i]+24*30)]) + \
596
596
  np.nanmean(z.iloc[(ind_end[i]-24*7):(ind_end[i]+24*30)])
597
597
  # if not possible, then trying the end of the following accumulation season
598
598
  elif (i+1 < len(ind_start)):
599
- if ind_start[i+1]!=-999 and any(~np.isnan(hs2.iloc[(ind_start[i+1]-24*7):(ind_start[i+1]+24*7)]+ z.iloc[(ind_start[i+1]-24*7):(ind_start[i+1]+24*7)])):
599
+ if ind_start[i+1]!=-999 and any(~np.isnan(hs2.iloc[(ind_start[i+1]-24*7):(ind_start[i+1]+24*7)]+ z.iloc[(ind_start[i+1]-24*7):(ind_start[i+1]+24*7)])):
600
600
  logger.debug('using end of accumulation season')
601
601
  hs2.iloc[ind_end[i]:] = hs2.iloc[ind_end[i]:] - \
602
602
  np.nanmean(hs2.iloc[(ind_start[i+1]-24*7):(ind_start[i+1]+24*7)]) + \
@@ -614,7 +614,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
614
614
  if any(~np.isnan(hs1_following_winter)):
615
615
  logger.debug('to hs1')
616
616
  # then we adjust hs1 to hs2 during the accumulation area
617
- # adjustment is done so that the mean hs1 and mean hs2 match
617
+ # adjustment is done so that the mean hs1 and mean hs2 match
618
618
  # for the period when both are available
619
619
  hs2_following_winter[np.isnan(hs1_following_winter)] = np.nan
620
620
  hs1_following_winter[np.isnan(hs2_following_winter)] = np.nan
@@ -628,12 +628,12 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
628
628
  hs2_following_winter = hs2[str(y)+'-09-01':str(y+1)+'-03-01'].copy()
629
629
  # adjusting hs1 to hs2 (no ablation case)
630
630
  if any(~np.isnan(hs1_following_winter)):
631
- logger.debug('adjusting hs1')
631
+ logger.debug('adjusting hs1')
632
632
  # and if there are some hs2 during the accumulation period
633
633
  if any(~np.isnan(hs2_following_winter)):
634
634
  logger.debug('to hs2')
635
635
  # then we adjust hs1 to hs2 during the accumulation area
636
- # adjustment is done so that the mean hs1 and mean hs2 match
636
+ # adjustment is done so that the mean hs1 and mean hs2 match
637
637
  # for the period when both are available
638
638
  hs1_following_winter[np.isnan(hs2_following_winter)] = np.nan
639
639
  hs2_following_winter[np.isnan(hs1_following_winter)] = np.nan
@@ -652,7 +652,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
652
652
  if any(~np.isnan(hs2_following_winter)):
653
653
  logger.debug('to hs2, minimizing winter difference')
654
654
  # then we adjust hs1 to hs2 during the accumulation area
655
- # adjustment is done so that the mean hs1 and mean hs2 match
655
+ # adjustment is done so that the mean hs1 and mean hs2 match
656
656
  # for the period when both are available
657
657
  tmp1 = hs1.iloc[ind_end[i]:min(len(hs1),ind_end[i]+24*30*9)].copy()
658
658
  tmp2 = hs2.iloc[ind_end[i]:min(len(hs2),ind_end[i]+24*30*9)].copy()
@@ -670,15 +670,15 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
670
670
  # if no hs2, then use PT data available at the end of the melt season
671
671
  elif np.any(~np.isnan(z.iloc[(ind_end[i]-24*14):(ind_end[i]+24*7)])):
672
672
  logger.debug('to z')
673
- # then we adjust hs2 to the end-of-ablation z
673
+ # then we adjust hs2 to the end-of-ablation z
674
674
  # first trying at the end of melt season
675
- if ~np.isnan(np.nanmean(hs1.iloc[(ind_end[i]-24*14):(ind_end[i]+24*30)])):
675
+ if ~np.isnan(np.nanmean(hs1.iloc[(ind_end[i]-24*14):(ind_end[i]+24*30)])):
676
676
  logger.debug('using end of melt season')
677
677
  hs1.iloc[ind_end[i]:] = hs1.iloc[ind_end[i]:] - \
678
678
  np.nanmean(hs1.iloc[(ind_end[i]-24*14):(ind_end[i]+24*30)]) + \
679
679
  np.nanmean(z.iloc[(ind_end[i]-24*14):(ind_end[i]+24*30)])
680
680
  # if not possible, then trying the end of the following accumulation season
681
- elif ind_start[i+1]!=-999 and any(~np.isnan(hs1.iloc[(ind_start[i+1]-24*14):(ind_start[i+1]+24*7)]+ z.iloc[(ind_start[i+1]-24*14):(ind_start[i+1]+24*7)])):
681
+ elif ind_start[i+1]!=-999 and any(~np.isnan(hs1.iloc[(ind_start[i+1]-24*14):(ind_start[i+1]+24*7)]+ z.iloc[(ind_start[i+1]-24*14):(ind_start[i+1]+24*7)])):
682
682
  logger.debug('using end of accumulation season')
683
683
  hs1.iloc[ind_end[i]:] = hs1.iloc[ind_end[i]:] - \
684
684
  np.nanmean(hs1.iloc[(ind_start[i+1]-24*14):(ind_start[i+1]+24*7)]) + \
@@ -686,7 +686,7 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
686
686
  elif any(~np.isnan(hs2_year)):
687
687
  logger.debug('to the last value of hs2')
688
688
  # then we adjust hs1 to hs2 during the accumulation area
689
- # adjustment is done so that the mean hs1 and mean hs2 match
689
+ # adjustment is done so that the mean hs1 and mean hs2 match
690
690
  # for the period when both are available
691
691
  half_span = pd.to_timedelta('7D')
692
692
  tmp1 = hs1_year.loc[(hs2_year.last_valid_index()-half_span):(hs2_year.last_valid_index()+half_span)].copy()
@@ -702,25 +702,25 @@ def combine_surface_height(df, site_type, threshold_ablation = -0.0002):
702
702
  df["z_surf_combined"] = np.nan
703
703
 
704
704
  # in winter, both SR1 and SR2 are used
705
- df["z_surf_combined"] = df["z_surf_2_adj"].interpolate(limit=72).values
706
-
705
+ df["z_surf_combined"] = df["z_surf_2_adj"].interpolate(limit=72).values
706
+
707
707
 
708
708
  # in ablation season we use SR2 instead of the SR1&2 average
709
709
  # here two options:
710
710
  # 1) we ignore the SR1 and only use SR2
711
- # 2) we use SR1 when SR2 is not available (commented)
711
+ # 2) we use SR1 when SR2 is not available (commented)
712
712
  # the later one can cause jumps when SR2 starts to be available few days after SR1
713
713
  data_update = df[["z_surf_1_adj", "z_surf_2_adj"]].mean(axis=1).values
714
-
715
- ind_update = ~ind_ablation
714
+
715
+ ind_update = ~ind_ablation
716
716
  #ind_update = np.logical_and(ind_ablation, ~np.isnan(data_update))
717
- df.loc[ind_update,"z_surf_combined"] = data_update[ind_update]
717
+ df.loc[ind_update,"z_surf_combined"] = data_update[ind_update]
718
718
 
719
719
  # in ablation season we use pressure transducer over all other options
720
- data_update = df[ "z_ice_surf_adj"].interpolate(limit=72).values
720
+ data_update = df[ "z_ice_surf_adj"].interpolate(limit=72).values
721
721
  ind_update = np.logical_and(ind_ablation, ~np.isnan(data_update))
722
- df.loc[ind_update,"z_surf_combined"] = data_update[ind_update]
723
-
722
+ df.loc[ind_update,"z_surf_combined"] = data_update[ind_update]
723
+
724
724
  logger.info('surface height combination finished')
725
725
  return df['z_surf_combined'], df["z_ice_surf_adj"], df["z_surf_1_adj"], df["z_surf_2_adj"]
726
726
 
@@ -730,7 +730,7 @@ def hampel(vals_orig, k=7*24, t0=15):
730
730
  k: size of window (including the sample; 7 is equal to 3 on either side of value)
731
731
  '''
732
732
  #Make copy so original not edited
733
- vals=vals_orig.copy()
733
+ vals=vals_orig.copy()
734
734
  #Hampel Filter
735
735
  L= 1.4826
736
736
  rolling_median=vals.rolling(k).median()
@@ -744,19 +744,19 @@ def hampel(vals_orig, k=7*24, t0=15):
744
744
 
745
745
 
746
746
  def get_thermistor_depth(df_in, site, station_config):
747
- '''Calculates the depth of the thermistors through time based on their
747
+ '''Calculates the depth of the thermistors through time based on their
748
748
  installation depth (collected in a google sheet) and on the change of surface
749
749
  height: instruments getting buried under new snow or surfacing due to ablation.
750
750
  There is a potential for additional filtering of thermistor data for surfaced
751
751
  (or just noisy) thermistors, but that is currently deactivated because slow.
752
-
752
+
753
753
  Parameters
754
754
  ----------
755
755
  df_in : pandas:dataframe
756
- dataframe containing the ice/firn temperature t_i_* as well as the
756
+ dataframe containing the ice/firn temperature t_i_* as well as the
757
757
  combined surface height z_surf_combined
758
758
  site : str
759
- stid, so that maintenance date and sensor installation depths can be found
759
+ stid, so that maintenance date and sensor installation depths can be found
760
760
  in database
761
761
  station_config : dict
762
762
  potentially containing the key string_maintenance
@@ -768,17 +768,17 @@ def get_thermistor_depth(df_in, site, station_config):
768
768
  # Add more entries as needed
769
769
  ]
770
770
  '''
771
-
771
+
772
772
  temp_cols_name = ['t_i_'+str(i) for i in range(12) if 't_i_'+str(i) in df_in.columns]
773
773
  num_therm = len(temp_cols_name)
774
- depth_cols_name = ['d_t_i_'+str(i) for i in range(1,num_therm+1)]
775
-
774
+ depth_cols_name = ['d_t_i_'+str(i) for i in range(1,num_therm+1)]
775
+
776
776
  if df_in['z_surf_combined'].isnull().all():
777
777
  logger.info('No valid surface height at '+site+', cannot calculate thermistor depth')
778
778
  df_in[depth_cols_name + ['t_i_10m']] = np.nan
779
779
  else:
780
780
  logger.info('Calculating thermistor depth')
781
-
781
+
782
782
  # Convert maintenance_info to DataFrame for easier manipulation
783
783
  maintenance_string = pd.DataFrame(
784
784
  station_config.get("string_maintenance",[]),
@@ -786,14 +786,14 @@ def get_thermistor_depth(df_in, site, station_config):
786
786
  )
787
787
  maintenance_string["date"] = pd.to_datetime(maintenance_string["date"])
788
788
  maintenance_string = maintenance_string.sort_values(by='date', ascending=True)
789
-
789
+
790
790
 
791
791
  if num_therm == 8:
792
792
  ini_depth = [1, 2, 3, 4, 5, 6, 7, 10]
793
793
  else:
794
794
  ini_depth = [0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5]
795
795
  df_in[depth_cols_name] = np.nan
796
-
796
+
797
797
  # filtering the surface height
798
798
  surface_height = df_in["z_surf_combined"].copy()
799
799
  ind_filter = surface_height.rolling(window=14, center=True).var() > 0.1
@@ -801,7 +801,7 @@ def get_thermistor_depth(df_in, site, station_config):
801
801
  surface_height[ind_filter] = np.nan
802
802
  df_in["z_surf_combined"] = surface_height.values
803
803
  z_surf_interp = df_in["z_surf_combined"].interpolate()
804
-
804
+
805
805
  # first initialization of the depths
806
806
  for i, col in enumerate(depth_cols_name):
807
807
  df_in[col] = (
@@ -809,18 +809,18 @@ def get_thermistor_depth(df_in, site, station_config):
809
809
  + z_surf_interp.values
810
810
  - z_surf_interp[z_surf_interp.first_valid_index()]
811
811
  )
812
-
812
+
813
813
  # reseting depth at maintenance
814
814
  if len(maintenance_string.date) == 0:
815
815
  logger.info("No maintenance at "+site)
816
-
816
+
817
817
  for date in maintenance_string.date:
818
818
  if date > z_surf_interp.last_valid_index():
819
819
  continue
820
820
  new_depth = maintenance_string.loc[
821
821
  maintenance_string.date == date
822
822
  ].installation_depths.values[0]
823
-
823
+
824
824
  for i, col in enumerate(depth_cols_name[:len(new_depth)]):
825
825
  tmp = df_in[col].copy()
826
826
  tmp.loc[date:] = (
@@ -831,11 +831,11 @@ def get_thermistor_depth(df_in, site, station_config):
831
831
  ]
832
832
  )
833
833
  df_in[col] = tmp.values
834
-
834
+
835
835
  # % Filtering thermistor data
836
836
  for i in range(len(temp_cols_name)):
837
837
  tmp = df_in[temp_cols_name[i]].copy()
838
-
838
+
839
839
  # variance filter
840
840
  # ind_filter = (
841
841
  # df_in[temp_cols_name[i]]
@@ -850,7 +850,7 @@ def get_thermistor_depth(df_in, site, station_config):
850
850
  # ind_filter.loc[np.isin(month, [5, 6, 7])] = False
851
851
  # if any(ind_filter):
852
852
  # tmp.loc[ind_filter] = np.nan
853
-
853
+
854
854
  # before and after maintenance adaptation filter
855
855
  if len(maintenance_string.date) > 0:
856
856
  for date in maintenance_string.date:
@@ -866,7 +866,7 @@ def get_thermistor_depth(df_in, site, station_config):
866
866
  ) < np.timedelta64(7, "D")
867
867
  if any(ind_adapt):
868
868
  tmp.loc[ind_adapt] = np.nan
869
-
869
+
870
870
  # surfaced thermistor
871
871
  ind_pos = df_in[depth_cols_name[i]] < 0.1
872
872
  if any(ind_pos):
@@ -874,7 +874,7 @@ def get_thermistor_depth(df_in, site, station_config):
874
874
 
875
875
  # copying the filtered values to the original table
876
876
  df_in[temp_cols_name[i]] = tmp.values
877
-
877
+
878
878
  # removing negative depth
879
879
  df_in.loc[df_in[depth_cols_name[i]]<0, depth_cols_name[i]] = np.nan
880
880
  logger.info("interpolating 10 m firn/ice temperature")
@@ -885,7 +885,7 @@ def get_thermistor_depth(df_in, site, station_config):
885
885
  kind="linear",
886
886
  min_diff_to_depth=1.5,
887
887
  ).set_index('date').values
888
-
888
+
889
889
  # filtering
890
890
  ind_pos = df_in["t_i_10m"] > 0.1
891
891
  ind_low = df_in["t_i_10m"] < -70
@@ -897,12 +897,12 @@ def get_thermistor_depth(df_in, site, station_config):
897
897
 
898
898
  def interpolate_temperature(dates, depth_cor, temp, depth=10, min_diff_to_depth=2,
899
899
  kind="quadratic"):
900
- '''Calculates the depth of the thermistors through time based on their
900
+ '''Calculates the depth of the thermistors through time based on their
901
901
  installation depth (collected in a google sheet) and on the change of surface
902
902
  height: instruments getting buried under new snow or surfacing due to ablation.
903
903
  There is a potential for additional filtering of thermistor data for surfaced
904
904
  (or just noisy) thermistors, but that is currently deactivated because slow.
905
-
905
+
906
906
  Parameters
907
907
  ----------
908
908
  dates : numpy.array
@@ -959,20 +959,20 @@ def gps_coordinate_postprocessing(ds, var, station_config={}):
959
959
  static_value = float(ds.attrs[coord_names[var_out]])
960
960
  else:
961
961
  static_value = np.nan
962
-
962
+
963
963
  # if there is no gps observations, then we use the static value repeated
964
964
  # for each time stamp
965
- if var not in ds.data_vars:
965
+ if var not in ds.data_vars:
966
966
  print('no',var,'at', ds.attrs['station_id'])
967
967
  return np.ones_like(ds['t_u'].data)*static_value
968
-
968
+
969
969
  if ds[var].isnull().all():
970
970
  print('no',var,'at',ds.attrs['station_id'])
971
971
  return np.ones_like(ds['t_u'].data)*static_value
972
-
972
+
973
973
  # Extract station relocations from the config dict
974
974
  station_relocations = station_config.get("station_relocation", [])
975
-
975
+
976
976
  # Convert the ISO8601 strings to pandas datetime objects
977
977
  breaks = [pd.to_datetime(date_str) for date_str in station_relocations]
978
978
  if len(breaks)==0:
@@ -981,16 +981,16 @@ def gps_coordinate_postprocessing(ds, var, station_config={}):
981
981
  logger.info('processing '+var+' with relocation on ' + ', '.join([br.strftime('%Y-%m-%dT%H:%M:%S') for br in breaks]))
982
982
 
983
983
  return piecewise_smoothing_and_interpolation(ds[var].to_series(), breaks)
984
-
984
+
985
985
  def piecewise_smoothing_and_interpolation(data_series, breaks):
986
- '''Smoothes, inter- or extrapolate the GPS observations. The processing is
987
- done piecewise so that each period between station relocations are done
988
- separately (no smoothing of the jump due to relocation). Piecewise linear
989
- regression is then used to smooth the available observations. Then this
990
- smoothed curve is interpolated linearly over internal gaps. Eventually, this
991
- interpolated curve is extrapolated linearly for timestamps before the first
986
+ '''Smoothes, inter- or extrapolate the GPS observations. The processing is
987
+ done piecewise so that each period between station relocations are done
988
+ separately (no smoothing of the jump due to relocation). Piecewise linear
989
+ regression is then used to smooth the available observations. Then this
990
+ smoothed curve is interpolated linearly over internal gaps. Eventually, this
991
+ interpolated curve is extrapolated linearly for timestamps before the first
992
992
  valid measurement and after the last valid measurement.
993
-
993
+
994
994
  Parameters
995
995
  ----------
996
996
  data_series : pandas.Series
@@ -998,7 +998,7 @@ def piecewise_smoothing_and_interpolation(data_series, breaks):
998
998
  breaks: list
999
999
  List of timestamps of station relocation. First and last item should be
1000
1000
  None so that they can be used in slice(breaks[i], breaks[i+1])
1001
-
1001
+
1002
1002
  Returns
1003
1003
  -------
1004
1004
  np.ndarray
@@ -1009,44 +1009,44 @@ def piecewise_smoothing_and_interpolation(data_series, breaks):
1009
1009
  _inferred_series = []
1010
1010
  for i in range(len(breaks) - 1):
1011
1011
  df = data_series.loc[slice(breaks[i], breaks[i+1])]
1012
-
1012
+
1013
1013
  # Drop NaN values and calculate the number of segments based on valid data
1014
1014
  df_valid = df.dropna()
1015
- if df_valid.shape[0] > 2:
1015
+ if df_valid.shape[0] > 2:
1016
1016
  # Fit linear regression model to the valid data range
1017
1017
  x = pd.to_numeric(df_valid.index).values.reshape(-1, 1)
1018
1018
  y = df_valid.values.reshape(-1, 1)
1019
-
1019
+
1020
1020
  model = LinearRegression()
1021
1021
  model.fit(x, y)
1022
-
1022
+
1023
1023
  # Predict using the model for the entire segment range
1024
1024
  x_pred = pd.to_numeric(df.index).values.reshape(-1, 1)
1025
-
1025
+
1026
1026
  y_pred = model.predict(x_pred)
1027
1027
  df = pd.Series(y_pred.flatten(), index=df.index)
1028
1028
  # adds to list the predicted values for the current segment
1029
1029
  _inferred_series.append(df)
1030
-
1030
+
1031
1031
  df_all = pd.concat(_inferred_series)
1032
-
1032
+
1033
1033
  # Fill internal gaps with linear interpolation
1034
1034
  df_all = df_all.interpolate(method='linear', limit_area='inside')
1035
-
1035
+
1036
1036
  # Remove duplicate indices and return values as numpy array
1037
1037
  df_all = df_all[~df_all.index.duplicated(keep='last')]
1038
1038
  return df_all.values
1039
-
1040
- def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1041
- kappa=0.4, WS_lim=1., z_0=0.001, g=9.82, es_0=6.1071, eps=0.622,
1042
- gamma=16., L_sub=2.83e6, L_dif_max=0.01, c_pd=1005., aa=0.7,
1043
- bb=0.75, cc=5., dd=0.35, R_d=287.05):
1044
- '''Calculate latent and sensible heat flux using the bulk calculation
1045
- method
1046
-
1039
+
1040
+ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1041
+ kappa=0.4, WS_lim=1., z_0=0.001, g=9.82, es_0=6.1071, eps=0.622,
1042
+ gamma=16., L_sub=2.83e6, L_dif_max=0.01, c_pd=1005., aa=0.7,
1043
+ bb=0.75, cc=5., dd=0.35, R_d=287.05):
1044
+ '''Calculate latent and sensible heat flux using the bulk calculation
1045
+ method
1046
+
1047
1047
  Parameters
1048
1048
  ----------
1049
- T_0 : int
1049
+ T_0 : int
1050
1050
  Freezing point temperature
1051
1051
  T_h : xarray.DataArray
1052
1052
  Air temperature
@@ -1065,55 +1065,55 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1065
1065
  p_h : xarray.DataArray
1066
1066
  Air pressure
1067
1067
  kappa : int
1068
- Von Karman constant (0.35-0.42). Default is 0.4.
1068
+ Von Karman constant (0.35-0.42). Default is 0.4.
1069
1069
  WS_lim : int
1070
- Default is 1.
1070
+ Default is 1.
1071
1071
  z_0 : int
1072
- Aerodynamic surface roughness length for momention, assumed constant
1072
+ Aerodynamic surface roughness length for momention, assumed constant
1073
1073
  for all ice/snow surfaces. Default is 0.001.
1074
- g : int
1075
- Gravitational acceleration (m/s2). Default is 9.82.
1076
- es_0 : int
1077
- Saturation vapour pressure at the melting point (hPa). Default is 6.1071.
1078
- eps : int
1074
+ g : int
1075
+ Gravitational acceleration (m/s2). Default is 9.82.
1076
+ es_0 : int
1077
+ Saturation vapour pressure at the melting point (hPa). Default is 6.1071.
1078
+ eps : int
1079
1079
  Ratio of molar masses of vapor and dry air (0.622).
1080
1080
  gamma : int
1081
1081
  Flux profile correction (Paulson & Dyer). Default is 16..
1082
- L_sub : int
1082
+ L_sub : int
1083
1083
  Latent heat of sublimation (J/kg). Default is 2.83e6.
1084
- L_dif_max : int
1085
- Default is 0.01.
1084
+ L_dif_max : int
1085
+ Default is 0.01.
1086
1086
  c_pd : int
1087
1087
  Specific heat of dry air (J/kg/K). Default is 1005..
1088
- aa : int
1089
- Flux profile correction constants (Holtslag & De Bruin '88). Default is
1088
+ aa : int
1089
+ Flux profile correction constants (Holtslag & De Bruin '88). Default is
1090
1090
  0.7.
1091
- bb : int
1092
- Flux profile correction constants (Holtslag & De Bruin '88). Default is
1091
+ bb : int
1092
+ Flux profile correction constants (Holtslag & De Bruin '88). Default is
1093
1093
  0.75.
1094
1094
  cc : int
1095
- Flux profile correction constants (Holtslag & De Bruin '88). Default is
1095
+ Flux profile correction constants (Holtslag & De Bruin '88). Default is
1096
1096
  5.
1097
1097
  dd : int
1098
- Flux profile correction constants (Holtslag & De Bruin '88). Default is
1098
+ Flux profile correction constants (Holtslag & De Bruin '88). Default is
1099
1099
  0.35.
1100
- R_d : int
1100
+ R_d : int
1101
1101
  Gas constant of dry air. Default is 287.05.
1102
-
1102
+
1103
1103
  Returns
1104
1104
  -------
1105
1105
  SHF_h : xarray.DataArray
1106
1106
  Sensible heat flux
1107
1107
  LHF_h : xarray.DataArray
1108
1108
  Latent heat flux
1109
- '''
1110
- rho_atm = 100 * p_h / R_d / (T_h + T_0) # Calculate atmospheric density
1111
- nu = calculate_viscosity(T_h, T_0, rho_atm) # Calculate kinematic viscosity
1112
-
1109
+ '''
1110
+ rho_atm = 100 * p_h / R_d / (T_h + T_0) # Calculate atmospheric density
1111
+ nu = calculate_viscosity(T_h, T_0, rho_atm) # Calculate kinematic viscosity
1112
+
1113
1113
  SHF_h = xr.zeros_like(T_h) # Create empty xarrays
1114
1114
  LHF_h = xr.zeros_like(T_h)
1115
1115
  L = xr.full_like(T_h, 1E5)
1116
-
1116
+
1117
1117
  u_star = kappa * WS_h.where(WS_h>0) / np.log(z_WS / z_0) # Rough surfaces, from Smeets & Van den Broeke 2008
1118
1118
  Re = u_star * z_0 / nu
1119
1119
  z_0h = u_star
@@ -1126,12 +1126,12 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1126
1126
  * (1 - (Tsurf_h + T_0) / T_0)
1127
1127
  + np.log10(es_0))
1128
1128
  q_surf = eps * es_ice_surf / (p_h - (1 - eps) * es_ice_surf)
1129
- theta = T_h + z_T *g / c_pd
1129
+ theta = T_h + z_T *g / c_pd
1130
1130
  stable = (theta > Tsurf_h) & (WS_h > WS_lim)
1131
1131
  unstable = (theta < Tsurf_h) & (WS_h > WS_lim) #TODO: check if unstable = ~stable? And if not why not
1132
- #no_wind = (WS_h <= WS_lim)
1132
+ #no_wind = (WS_h <= WS_lim)
1133
1133
  # Calculate stable stratification
1134
- for i in np.arange(0,31):
1134
+ for i in np.arange(0,31):
1135
1135
  psi_m1 = -(aa* z_0/L[stable] + bb*( z_0/L[stable]-cc/dd)*np.exp(-dd* z_0/L[stable]) + bb*cc/dd)
1136
1136
  psi_m2 = -(aa*z_WS[stable]/L[stable] + bb*(z_WS[stable]/L[stable]-cc/dd)*np.exp(-dd*z_WS[stable]/L[stable]) + bb*cc/dd)
1137
1137
  psi_h1 = -(aa*z_0h[stable]/L[stable] + bb*(z_0h[stable]/L[stable]-cc/dd)*np.exp(-dd*z_0h[stable]/L[stable]) + bb*cc/dd)
@@ -1139,8 +1139,8 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1139
1139
  u_star[stable] = kappa*WS_h[stable]/(np.log(z_WS[stable]/z_0)-psi_m2+psi_m1)
1140
1140
  Re[stable] = u_star[stable]*z_0/nu[stable]
1141
1141
  z_0h[stable] = z_0*np.exp(1.5-0.2*np.log(Re[stable])-0.11*(np.log(Re[stable]))**2)
1142
-
1143
- # If n_elements(where(z_0h[stable] < 1e-6)) get 1 then
1142
+
1143
+ # If n_elements(where(z_0h[stable] < 1e-6)) get 1 then
1144
1144
  # z_0h[stable[where(z_0h[stable] < 1e-6)]] = 1e-6
1145
1145
  z_0h[stable][z_0h[stable] < 1E-6] == 1E-6
1146
1146
  th_star = kappa \
@@ -1156,12 +1156,12 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1156
1156
  * (1 + ((1-eps) / eps) * q_h[stable]) \
1157
1157
  / (g * kappa * th_star * (1 + ((1-eps)/eps) * q_star))
1158
1158
  L_dif = np.abs((L_prev-L[stable])/L_prev)
1159
-
1159
+
1160
1160
  # If n_elements(where(L_dif > L_dif_max)) eq 1 then break
1161
1161
  if np.all(L_dif <= L_dif_max):
1162
1162
  break
1163
1163
 
1164
- # Calculate unstable stratification
1164
+ # Calculate unstable stratification
1165
1165
  if len(unstable) > 0:
1166
1166
  for i in np.arange(0,21):
1167
1167
  x1 = (1-gamma*z_0 /L[unstable])**0.25
@@ -1176,8 +1176,8 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1176
1176
  Re[unstable] = u_star[unstable]*z_0/nu[unstable]
1177
1177
  z_0h[unstable] = z_0 * np.exp(1.5 - 0.2 * np.log(Re[unstable]) - 0.11 \
1178
1178
  * (np.log(Re[unstable]))**2)
1179
-
1180
- # If n_elements(where(z_0h[unstable] < 1e-6)) > 1 then
1179
+
1180
+ # If n_elements(where(z_0h[unstable] < 1e-6)) > 1 then
1181
1181
  # z_0h[unstable[where(z_0h[unstable] < 1e-6)]] = 1e-6
1182
1182
  z_0h[stable][z_0h[stable] < 1E-6] == 1E-6
1183
1183
  th_star = kappa * (theta[unstable] - Tsurf_h[unstable]) \
@@ -1191,7 +1191,7 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1191
1191
  * ( 1 + ((1-eps) / eps) * q_h[unstable]) \
1192
1192
  / (g * kappa * th_star * ( 1 + ((1-eps) / eps) * q_star))
1193
1193
  L_dif = abs((L_prev-L[unstable])/L_prev)
1194
-
1194
+
1195
1195
  # If n_elements(where(L_dif > L_dif_max)) eq 1 then break
1196
1196
  if np.all(L_dif <= L_dif_max):
1197
1197
  break
@@ -1199,12 +1199,12 @@ def calculate_tubulent_heat_fluxes(T_0, T_h, Tsurf_h, WS_h, z_WS, z_T, q_h, p_h,
1199
1199
  HF_nan = np.isnan(p_h) | np.isnan(T_h) | np.isnan(Tsurf_h) \
1200
1200
  | np.isnan(q_h) | np.isnan(WS_h) | np.isnan(z_T)
1201
1201
  SHF_h[HF_nan] = np.nan
1202
- LHF_h[HF_nan] = np.nan
1202
+ LHF_h[HF_nan] = np.nan
1203
1203
  return SHF_h, LHF_h
1204
1204
 
1205
- def calculate_viscosity(T_h, T_0, rho_atm):
1205
+ def calculate_viscosity(T_h, T_0, rho_atm):
1206
1206
  '''Calculate kinematic viscosity of air
1207
-
1207
+
1208
1208
  Parameters
1209
1209
  ----------
1210
1210
  T_h : xarray.DataArray
@@ -1213,7 +1213,7 @@ def calculate_viscosity(T_h, T_0, rho_atm):
1213
1213
  Steam point temperature
1214
1214
  rho_atm : xarray.DataArray
1215
1215
  Surface temperature
1216
-
1216
+
1217
1217
  Returns
1218
1218
  -------
1219
1219
  xarray.DataArray
@@ -1221,15 +1221,15 @@ def calculate_viscosity(T_h, T_0, rho_atm):
1221
1221
  '''
1222
1222
  # Dynamic viscosity of air in Pa s (Sutherlands' equation using C = 120 K)
1223
1223
  mu = 18.27e-6 * (291.15 + 120) / ((T_h + T_0) + 120) * ((T_h + T_0) / 291.15)**1.5
1224
-
1224
+
1225
1225
  # Kinematic viscosity of air in m^2/s
1226
- return mu / rho_atm
1226
+ return mu / rho_atm
1227
1227
 
1228
- def calculate_specific_humidity(T_0, T_100, T_h, p_h, RH_cor_h, es_0=6.1071, es_100=1013.246, eps=0.622):
1228
+ def calculate_specific_humidity(T_0, T_100, T_h, p_h, rh_h_wrt_ice_or_water, es_0=6.1071, es_100=1013.246, eps=0.622):
1229
1229
  '''Calculate specific humidity
1230
1230
  Parameters
1231
1231
  ----------
1232
- T_0 : float
1232
+ T_0 : float
1233
1233
  Steam point temperature. Default is 273.15.
1234
1234
  T_100 : float
1235
1235
  Steam point temperature in Kelvin
@@ -1237,20 +1237,20 @@ def calculate_specific_humidity(T_0, T_100, T_h, p_h, RH_cor_h, es_0=6.1071, es_
1237
1237
  Air temperature
1238
1238
  p_h : xarray.DataArray
1239
1239
  Air pressure
1240
- RH_cor_h : xarray.DataArray
1240
+ rh_h_wrt_ice_or_water : xarray.DataArray
1241
1241
  Relative humidity corrected
1242
1242
  es_0 : float
1243
1243
  Saturation vapour pressure at the melting point (hPa)
1244
1244
  es_100 : float
1245
1245
  Saturation vapour pressure at steam point temperature (hPa)
1246
- eps : int
1246
+ eps : int
1247
1247
  ratio of molar masses of vapor and dry air (0.622)
1248
-
1248
+
1249
1249
  Returns
1250
1250
  -------
1251
1251
  xarray.DataArray
1252
1252
  Specific humidity data array
1253
- '''
1253
+ '''
1254
1254
  # Saturation vapour pressure above 0 C (hPa)
1255
1255
  es_wtr = 10**(-7.90298 * (T_100 / (T_h + T_0) - 1) + 5.02808 * np.log10(T_100 / (T_h + T_0))
1256
1256
  - 1.3816E-7 * (10**(11.344 * (1 - (T_h + T_0) / T_100)) - 1)
@@ -1260,21 +1260,21 @@ def calculate_specific_humidity(T_0, T_100, T_h, p_h, RH_cor_h, es_0=6.1071, es_
1260
1260
  es_ice = 10**(-9.09718 * (T_0 / (T_h + T_0) - 1) - 3.56654
1261
1261
  * np.log10(T_0 / (T_h + T_0)) + 0.876793
1262
1262
  * (1 - (T_h + T_0) / T_0)
1263
- + np.log10(es_0))
1263
+ + np.log10(es_0))
1264
1264
 
1265
1265
  # Specific humidity at saturation (incorrect below melting point)
1266
- q_sat = eps * es_wtr / (p_h - (1 - eps) * es_wtr)
1267
-
1266
+ q_sat = eps * es_wtr / (p_h - (1 - eps) * es_wtr)
1267
+
1268
1268
  # Replace saturation specific humidity values below melting point
1269
- freezing = T_h < 0
1269
+ freezing = T_h < 0
1270
1270
  q_sat[freezing] = eps * es_ice[freezing] / (p_h[freezing] - (1 - eps) * es_ice[freezing])
1271
-
1271
+
1272
1272
  q_nan = np.isnan(T_h) | np.isnan(p_h)
1273
1273
  q_sat[q_nan] = np.nan
1274
1274
 
1275
1275
  # Convert to kg/kg
1276
- return RH_cor_h * q_sat / 100
1277
-
1278
- if __name__ == "__main__":
1279
- # unittest.main()
1280
- pass
1276
+ return rh_h_wrt_ice_or_water * q_sat / 100
1277
+
1278
+ if __name__ == "__main__":
1279
+ # unittest.main()
1280
+ pass