cloudnetpy 1.55.20__py3-none-any.whl → 1.55.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. cloudnetpy/categorize/atmos.py +46 -14
  2. cloudnetpy/categorize/atmos_utils.py +11 -1
  3. cloudnetpy/categorize/categorize.py +38 -21
  4. cloudnetpy/categorize/classify.py +31 -9
  5. cloudnetpy/categorize/containers.py +19 -7
  6. cloudnetpy/categorize/droplet.py +24 -8
  7. cloudnetpy/categorize/falling.py +17 -7
  8. cloudnetpy/categorize/freezing.py +19 -5
  9. cloudnetpy/categorize/insects.py +27 -14
  10. cloudnetpy/categorize/lidar.py +38 -36
  11. cloudnetpy/categorize/melting.py +19 -9
  12. cloudnetpy/categorize/model.py +28 -9
  13. cloudnetpy/categorize/mwr.py +4 -2
  14. cloudnetpy/categorize/radar.py +58 -22
  15. cloudnetpy/cloudnetarray.py +15 -6
  16. cloudnetpy/concat_lib.py +39 -16
  17. cloudnetpy/constants.py +7 -0
  18. cloudnetpy/datasource.py +39 -19
  19. cloudnetpy/instruments/basta.py +6 -2
  20. cloudnetpy/instruments/campbell_scientific.py +33 -16
  21. cloudnetpy/instruments/ceilo.py +30 -13
  22. cloudnetpy/instruments/ceilometer.py +76 -37
  23. cloudnetpy/instruments/cl61d.py +8 -3
  24. cloudnetpy/instruments/cloudnet_instrument.py +2 -1
  25. cloudnetpy/instruments/copernicus.py +27 -14
  26. cloudnetpy/instruments/disdrometer/common.py +51 -32
  27. cloudnetpy/instruments/disdrometer/parsivel.py +79 -48
  28. cloudnetpy/instruments/disdrometer/thies.py +10 -6
  29. cloudnetpy/instruments/galileo.py +23 -12
  30. cloudnetpy/instruments/hatpro.py +27 -11
  31. cloudnetpy/instruments/instruments.py +4 -1
  32. cloudnetpy/instruments/lufft.py +20 -11
  33. cloudnetpy/instruments/mira.py +60 -49
  34. cloudnetpy/instruments/mrr.py +31 -20
  35. cloudnetpy/instruments/nc_lidar.py +15 -6
  36. cloudnetpy/instruments/nc_radar.py +31 -22
  37. cloudnetpy/instruments/pollyxt.py +36 -21
  38. cloudnetpy/instruments/radiometrics.py +32 -18
  39. cloudnetpy/instruments/rpg.py +48 -22
  40. cloudnetpy/instruments/rpg_reader.py +39 -30
  41. cloudnetpy/instruments/vaisala.py +39 -27
  42. cloudnetpy/instruments/weather_station.py +15 -11
  43. cloudnetpy/metadata.py +3 -1
  44. cloudnetpy/model_evaluation/file_handler.py +31 -21
  45. cloudnetpy/model_evaluation/metadata.py +3 -1
  46. cloudnetpy/model_evaluation/model_metadata.py +1 -1
  47. cloudnetpy/model_evaluation/plotting/plot_tools.py +20 -15
  48. cloudnetpy/model_evaluation/plotting/plotting.py +114 -64
  49. cloudnetpy/model_evaluation/products/advance_methods.py +48 -28
  50. cloudnetpy/model_evaluation/products/grid_methods.py +44 -19
  51. cloudnetpy/model_evaluation/products/model_products.py +22 -18
  52. cloudnetpy/model_evaluation/products/observation_products.py +15 -9
  53. cloudnetpy/model_evaluation/products/product_resampling.py +14 -4
  54. cloudnetpy/model_evaluation/products/tools.py +16 -7
  55. cloudnetpy/model_evaluation/statistics/statistical_methods.py +28 -15
  56. cloudnetpy/model_evaluation/tests/e2e/conftest.py +3 -3
  57. cloudnetpy/model_evaluation/tests/e2e/process_cf/main.py +9 -5
  58. cloudnetpy/model_evaluation/tests/e2e/process_cf/tests.py +14 -13
  59. cloudnetpy/model_evaluation/tests/e2e/process_iwc/main.py +9 -5
  60. cloudnetpy/model_evaluation/tests/e2e/process_iwc/tests.py +14 -13
  61. cloudnetpy/model_evaluation/tests/e2e/process_lwc/main.py +9 -5
  62. cloudnetpy/model_evaluation/tests/e2e/process_lwc/tests.py +14 -13
  63. cloudnetpy/model_evaluation/tests/unit/conftest.py +11 -11
  64. cloudnetpy/model_evaluation/tests/unit/test_advance_methods.py +33 -27
  65. cloudnetpy/model_evaluation/tests/unit/test_grid_methods.py +83 -83
  66. cloudnetpy/model_evaluation/tests/unit/test_model_products.py +23 -21
  67. cloudnetpy/model_evaluation/tests/unit/test_observation_products.py +24 -25
  68. cloudnetpy/model_evaluation/tests/unit/test_plot_tools.py +40 -39
  69. cloudnetpy/model_evaluation/tests/unit/test_plotting.py +12 -11
  70. cloudnetpy/model_evaluation/tests/unit/test_statistical_methods.py +30 -30
  71. cloudnetpy/model_evaluation/tests/unit/test_tools.py +18 -17
  72. cloudnetpy/model_evaluation/utils.py +3 -2
  73. cloudnetpy/output.py +45 -19
  74. cloudnetpy/plotting/plot_meta.py +35 -11
  75. cloudnetpy/plotting/plotting.py +172 -104
  76. cloudnetpy/products/classification.py +20 -8
  77. cloudnetpy/products/der.py +25 -10
  78. cloudnetpy/products/drizzle.py +41 -26
  79. cloudnetpy/products/drizzle_error.py +10 -5
  80. cloudnetpy/products/drizzle_tools.py +43 -24
  81. cloudnetpy/products/ier.py +10 -5
  82. cloudnetpy/products/iwc.py +16 -9
  83. cloudnetpy/products/lwc.py +34 -12
  84. cloudnetpy/products/mwr_multi.py +4 -1
  85. cloudnetpy/products/mwr_single.py +4 -1
  86. cloudnetpy/products/product_tools.py +33 -10
  87. cloudnetpy/utils.py +175 -74
  88. cloudnetpy/version.py +1 -1
  89. {cloudnetpy-1.55.20.dist-info → cloudnetpy-1.55.22.dist-info}/METADATA +11 -10
  90. cloudnetpy-1.55.22.dist-info/RECORD +114 -0
  91. docs/source/conf.py +2 -2
  92. cloudnetpy-1.55.20.dist-info/RECORD +0 -114
  93. {cloudnetpy-1.55.20.dist-info → cloudnetpy-1.55.22.dist-info}/LICENSE +0 -0
  94. {cloudnetpy-1.55.20.dist-info → cloudnetpy-1.55.22.dist-info}/WHEEL +0 -0
  95. {cloudnetpy-1.55.20.dist-info → cloudnetpy-1.55.22.dist-info}/top_level.txt +0 -0
@@ -3,15 +3,16 @@ import datetime
3
3
  import logging
4
4
  import re
5
5
  from collections import defaultdict
6
- from collections.abc import Callable, Iterator, Sequence
6
+ from collections.abc import Callable, Iterable, Iterator, Sequence
7
7
  from itertools import islice
8
8
  from os import PathLike
9
- from typing import Any, Iterable, Literal
9
+ from typing import Any, Literal
10
10
 
11
11
  import numpy as np
12
12
 
13
13
  from cloudnetpy import output
14
14
  from cloudnetpy.cloudnetarray import CloudnetArray
15
+ from cloudnetpy.constants import MM_TO_M, SEC_IN_HOUR
15
16
  from cloudnetpy.exceptions import DisdrometerDataError
16
17
  from cloudnetpy.instruments import instruments
17
18
  from cloudnetpy.instruments.cloudnet_instrument import CloudnetInstrument
@@ -32,6 +33,7 @@ def parsivel2nc(
32
33
  file.
33
34
 
34
35
  Args:
36
+ ----
35
37
  disdrometer_file: Filename of disdrometer file or list of filenames.
36
38
  output_file: Output filename.
37
39
  site_meta: Dictionary containing information about the site. Required key
@@ -45,13 +47,16 @@ def parsivel2nc(
45
47
  timestamps:
46
48
 
47
49
  Returns:
50
+ -------
48
51
  UUID of the generated file.
49
52
 
50
53
  Raises:
54
+ ------
51
55
  DisdrometerDataError: Timestamps do not match the expected date, or unable
52
56
  to read the disdrometer file.
53
57
 
54
58
  Examples:
59
+ --------
55
60
  >>> from cloudnetpy.instruments import parsivel2nc
56
61
  >>> site_meta = {'name': 'Lindenberg', 'altitude': 104, 'latitude': 52.2,
57
62
  'longitude': 14.1}
@@ -69,8 +74,7 @@ def parsivel2nc(
69
74
  disdrometer.add_meta()
70
75
  attributes = output.add_time_attribute(ATTRIBUTES, disdrometer.date)
71
76
  output.update_attributes(disdrometer.data, attributes)
72
- uuid = output.save_level1b(disdrometer, output_file, uuid)
73
- return uuid
77
+ return output.save_level1b(disdrometer, output_file, uuid)
74
78
 
75
79
 
76
80
  class Parsivel(CloudnetInstrument):
@@ -95,74 +99,77 @@ class Parsivel(CloudnetInstrument):
95
99
  self._create_velocity_vectors()
96
100
  self._create_diameter_vectors()
97
101
 
98
- def _screen_time(self, expected_date: datetime.date | None = None):
102
+ def _screen_time(self, expected_date: datetime.date | None = None) -> None:
99
103
  if expected_date is None:
100
104
  self.date = self.raw_data["time"][0].astype(object).date()
101
105
  return
102
106
  self.date = expected_date
103
107
  valid_mask = self.raw_data["time"].astype("datetime64[D]") == self.date
104
108
  if np.count_nonzero(valid_mask) == 0:
105
- raise DisdrometerDataError(f"No data found on {expected_date}")
109
+ msg = f"No data found on {expected_date}"
110
+ raise DisdrometerDataError(msg)
106
111
  for key in self.raw_data:
107
112
  self.raw_data[key] = self.raw_data[key][valid_mask]
108
113
 
109
- def _append_data(self):
114
+ def _append_data(self) -> None:
110
115
  for key, values in self.raw_data.items():
111
116
  if key.startswith("_"):
112
117
  continue
118
+ name = key
119
+ values_out = values
113
120
  match key:
114
121
  case "spectrum":
115
- key = "data_raw"
122
+ name = "data_raw"
116
123
  dimensions = ["time", "diameter", "velocity"]
117
124
  case "number_concentration" | "fall_velocity":
118
125
  dimensions = ["time", "diameter"]
119
126
  case "time":
120
127
  dimensions = []
121
128
  base = values[0].astype("datetime64[D]")
122
- values = (values - base) / np.timedelta64(1, "h")
129
+ values_out = (values - base) / np.timedelta64(1, "h")
123
130
  case _:
124
131
  dimensions = ["time"]
125
- self.data[key] = CloudnetArray(values, key, dimensions=dimensions)
132
+ self.data[name] = CloudnetArray(values_out, name, dimensions=dimensions)
126
133
  if "_sensor_id" in self.raw_data:
127
134
  first_id = self.raw_data["_sensor_id"][0]
128
135
  for sensor_id in self.raw_data["_sensor_id"]:
129
136
  if sensor_id != first_id:
130
- raise DisdrometerDataError("Multiple sensor IDs are not supported")
137
+ msg = "Multiple sensor IDs are not supported"
138
+ raise DisdrometerDataError(msg)
131
139
  self.serial_number = first_id
132
140
 
133
- def _create_velocity_vectors(self):
141
+ def _create_velocity_vectors(self) -> None:
134
142
  n_values = [10, 5, 5, 5, 5, 2]
135
143
  spreads = [0.1, 0.2, 0.4, 0.8, 1.6, 3.2]
136
144
  Disdrometer.store_vectors(self.data, n_values, spreads, "velocity")
137
145
 
138
- def _create_diameter_vectors(self):
146
+ def _create_diameter_vectors(self) -> None:
139
147
  n_values = [10, 5, 5, 5, 5, 2]
140
148
  spreads = [0.125, 0.25, 0.5, 1, 2, 3]
141
149
  Disdrometer.store_vectors(self.data, n_values, spreads, "diameter")
142
150
 
143
- def convert_units(self):
144
- mm_to_m = 1e3
145
- mmh_to_ms = 3600 * mm_to_m
151
+ def convert_units(self) -> None:
152
+ mmh_to_ms = SEC_IN_HOUR / MM_TO_M
146
153
  c_to_k = 273.15
147
154
  self._convert_data(("rainfall_rate",), mmh_to_ms)
148
155
  self._convert_data(("snowfall_rate",), mmh_to_ms)
149
- self._convert_data(("diameter", "diameter_spread", "diameter_bnds"), mm_to_m)
156
+ self._convert_data(("diameter", "diameter_spread", "diameter_bnds"), 1e3)
150
157
  self._convert_data(("V_sensor_supply",), 10)
151
158
  self._convert_data(("T_sensor",), c_to_k, method="add")
152
159
 
153
- def add_meta(self):
160
+ def add_meta(self) -> None:
154
161
  valid_keys = ("latitude", "longitude", "altitude")
155
162
  for key, value in self.site_meta.items():
156
- key = key.lower()
157
- if key in valid_keys:
158
- self.data[key] = CloudnetArray(float(value), key)
163
+ name = key.lower()
164
+ if name in valid_keys:
165
+ self.data[name] = CloudnetArray(float(value), name)
159
166
 
160
167
  def _convert_data(
161
168
  self,
162
169
  keys: tuple[str, ...],
163
170
  value: float,
164
171
  method: Literal["divide", "add"] = "divide",
165
- ):
172
+ ) -> None:
166
173
  for key in keys:
167
174
  if key not in self.data:
168
175
  continue
@@ -298,9 +305,11 @@ def _parse_date(tokens: Iterator[str]) -> datetime.date:
298
305
  elif "." in token:
299
306
  day, month, year = token.split(".")
300
307
  else:
301
- raise ValueError(f"Unsupported date: '{input}'")
308
+ msg = f"Unsupported date: '{input}'"
309
+ raise ValueError(msg)
302
310
  if len(year) != 4:
303
- raise ValueError(f"Unsupported date: '{input}'")
311
+ msg = f"Unsupported date: '{input}'"
312
+ raise ValueError(msg)
304
313
  return datetime.date(int(year), int(month), int(day))
305
314
 
306
315
 
@@ -318,7 +327,14 @@ def _parse_datetime(tokens: Iterator[str]) -> datetime.datetime:
318
327
  hour = int(token[8:10])
319
328
  minute = int(token[10:12])
320
329
  second = int(token[12:14])
321
- return datetime.datetime(year, month, day, hour, minute, second)
330
+ return datetime.datetime(
331
+ year,
332
+ month,
333
+ day,
334
+ hour,
335
+ minute,
336
+ second,
337
+ )
322
338
 
323
339
 
324
340
  def _parse_vector(tokens: Iterator[str]) -> np.ndarray:
@@ -333,13 +349,15 @@ def _parse_spectrum(tokens: Iterator[str]) -> np.ndarray:
333
349
  raw = [first.removeprefix("<SPECTRUM>")]
334
350
  raw.extend(islice(tokens, 1023))
335
351
  if next(tokens) != "</SPECTRUM>":
336
- raise ValueError("Invalid spectrum format")
352
+ msg = "Invalid spectrum format"
353
+ raise ValueError(msg)
337
354
  values = [int(x) if x != "" else 0 for x in raw]
338
355
  else:
339
356
  values = [int(first)]
340
357
  values.extend(int(x) for x in islice(tokens, 1023))
341
358
  if len(values) != 1024:
342
- raise ValueError("Invalid length")
359
+ msg = f"Invalid spectrum length: {len(values)}"
360
+ raise ValueError(msg)
343
361
  return np.array(values, dtype="i2").reshape((32, 32))
344
362
 
345
363
 
@@ -391,32 +409,37 @@ def _read_rows(headers: list[str], rows: list[str]) -> dict[str, list]:
391
409
  if row == "":
392
410
  continue
393
411
  try:
394
- tokens = iter(row.removesuffix(";").split(";"))
395
- parsed = [PARSERS.get(header, next)(tokens) for header in headers]
396
- unread_tokens = list(tokens)
397
- if unread_tokens:
398
- raise ValueError("More values than expected")
399
- for header, value in zip(headers, parsed):
412
+ parsed = _parse_row(row, headers)
413
+ for header, value in zip(headers, parsed, strict=True):
400
414
  result[header].append(value)
401
415
  except (ValueError, StopIteration):
402
416
  invalid_rows += 1
403
417
  continue
404
418
  if invalid_rows == len(rows):
405
- raise DisdrometerDataError("No valid data in file")
419
+ msg = "No valid data in file"
420
+ raise DisdrometerDataError(msg)
406
421
  if invalid_rows > 0:
407
- logging.info(f"Skipped {invalid_rows} invalid rows")
422
+ logging.info("Skipped %s invalid rows", invalid_rows)
408
423
  return result
409
424
 
410
425
 
426
+ def _parse_row(row_in: str, headers: list[str]) -> list:
427
+ tokens = iter(row_in.removesuffix(";").split(";"))
428
+ parsed = [PARSERS.get(header, next)(tokens) for header in headers]
429
+ if unread_tokens := list(tokens):
430
+ msg = f"Unused tokens: {unread_tokens}"
431
+ raise ValueError(msg)
432
+ return parsed
433
+
434
+
411
435
  def _read_toa5(filename: str | PathLike) -> dict[str, list]:
412
- """
413
- Read ASCII data from Campbell Scientific datalogger such as CR1000.
436
+ """Read ASCII data from Campbell Scientific datalogger such as CR1000.
414
437
 
415
- References:
438
+ References
439
+ ----------
416
440
  CR1000 Measurement and Control System.
417
441
  https://s.campbellsci.com/documents/us/manuals/cr1000.pdf
418
442
  """
419
- # pylint: disable=too-many-branches,comparison-with-callable
420
443
  with open(filename, encoding="latin1", errors="ignore") as file:
421
444
  reader = csv.reader(file)
422
445
  _origin_line = next(reader)
@@ -438,12 +461,13 @@ def _read_toa5(filename: str | PathLike) -> dict[str, list]:
438
461
  "spectrum": [],
439
462
  }
440
463
  try:
441
- for header, value in zip(headers, data_line):
464
+ for header, value in zip(headers, data_line, strict=True):
442
465
  if header is None:
443
466
  continue
444
467
  if header == "_datetime":
445
468
  scalars[header] = datetime.datetime.strptime(
446
- value, "%Y-%m-%d %H:%M:%S"
469
+ value,
470
+ "%Y-%m-%d %H:%M:%S",
447
471
  )
448
472
  elif header in ("number_concentration", "fall_velocity"):
449
473
  arrays[header].append(float(value))
@@ -460,21 +484,26 @@ def _read_toa5(filename: str | PathLike) -> dict[str, list]:
460
484
  data[header].append(scalar)
461
485
  if "spectrum" in headers:
462
486
  data["spectrum"].append(
463
- np.array(arrays["spectrum"], dtype="i2").reshape((32, 32))
487
+ np.array(arrays["spectrum"], dtype="i2").reshape((32, 32)),
464
488
  )
465
489
  if "number_concentration" in headers:
466
490
  data["number_concentration"].append(arrays["number_concentration"])
467
491
  if "fall_velocity" in headers:
468
492
  data["fall_velocity"].append(arrays["fall_velocity"])
469
493
  if n_invalid_rows == n_rows:
470
- raise DisdrometerDataError("No valid data in file")
494
+ msg = "No valid data in file"
495
+ raise DisdrometerDataError(msg)
471
496
  if n_invalid_rows > 0:
472
- logging.info(f"Skipped {n_invalid_rows} invalid rows")
497
+ logging.info("Skipped %s invalid rows", n_invalid_rows)
473
498
  return data
474
499
 
475
500
 
476
501
  def _read_typ_op4a(filename: str | PathLike) -> dict[str, list]:
477
- """Read file starting with "TYP OP4A" that contains one measurement."""
502
+ """Read output of "CS/PA" command. The output starts with line "TYP OP4A"
503
+ followed by one line per measured variable in format: <number>:<value>.
504
+ Output ends with characters: <ETX><CR><LF><NUL>. Lines are separated by
505
+ <CR><LF>.
506
+ """
478
507
  data = {}
479
508
  with open(filename, encoding="latin1", errors="ignore") as file:
480
509
  for line in file:
@@ -503,7 +532,8 @@ def _read_parsivel(
503
532
  with open(filename, encoding="latin1", errors="ignore") as file:
504
533
  lines = file.read().splitlines()
505
534
  if not lines:
506
- raise DisdrometerDataError("File is empty")
535
+ msg = f"File '{filename}' is empty"
536
+ raise DisdrometerDataError(msg)
507
537
  if "TOA5" in lines[0]:
508
538
  data = _read_toa5(filename)
509
539
  elif "TYP OP4A" in lines[0]:
@@ -515,11 +545,12 @@ def _read_parsivel(
515
545
  headers = _parse_telegram(telegram)
516
546
  data = _read_rows(headers, lines)
517
547
  else:
518
- raise ValueError("telegram must be specified for files without header")
548
+ msg = "telegram must be specified for files without header"
549
+ raise ValueError(msg)
519
550
  if "_datetime" not in data and timestamps is None:
520
551
  data["_datetime"] = [
521
552
  datetime.datetime.combine(date, time)
522
- for date, time in zip(data["_date"], data["_time"])
553
+ for date, time in zip(data["_date"], data["_time"], strict=True)
523
554
  ]
524
555
  for key, values in data.items():
525
556
  combined_data[key].extend(values)
@@ -15,6 +15,7 @@ def thies2nc(
15
15
  """Converts Thies-LNM disdrometer data into Cloudnet Level 1b netCDF file.
16
16
 
17
17
  Args:
18
+ ----
18
19
  disdrometer_file: Filename of disdrometer .log file.
19
20
  output_file: Output filename.
20
21
  site_meta: Dictionary containing information about the site. Required key
@@ -23,13 +24,16 @@ def thies2nc(
23
24
  date: Expected date of the measurements as YYYY-MM-DD.
24
25
 
25
26
  Returns:
27
+ -------
26
28
  UUID of the generated file.
27
29
 
28
30
  Raises:
31
+ ------
29
32
  DisdrometerDataError: Timestamps do not match the expected date, or unable
30
33
  to read the disdrometer file.
31
34
 
32
35
  Examples:
36
+ --------
33
37
  >>> from cloudnetpy.instruments import thies2nc
34
38
  >>> site_meta = {'name': 'Lindenberg', 'altitude': 104, 'latitude': 52.2,
35
39
  'longitude': 14.1}
@@ -39,7 +43,8 @@ def thies2nc(
39
43
  try:
40
44
  disdrometer = Thies(disdrometer_file, site_meta)
41
45
  except (ValueError, IndexError) as err:
42
- raise DisdrometerDataError("Can not read disdrometer file") from err
46
+ msg = "Unable to read disdrometer file"
47
+ raise DisdrometerDataError(msg) from err
43
48
  if date is not None:
44
49
  disdrometer.validate_date(date)
45
50
  disdrometer.init_data()
@@ -50,8 +55,7 @@ def thies2nc(
50
55
  disdrometer.convert_units()
51
56
  attributes = output.add_time_attribute(ATTRIBUTES, disdrometer.date)
52
57
  output.update_attributes(disdrometer.data, attributes)
53
- uuid = output.save_level1b(disdrometer, output_file, uuid)
54
- return uuid
58
+ return output.save_level1b(disdrometer, output_file, uuid)
55
59
 
56
60
 
57
61
  class Thies(Disdrometer):
@@ -64,7 +68,7 @@ class Thies(Disdrometer):
64
68
  self._create_diameter_vectors()
65
69
  self.instrument = instruments.THIES
66
70
 
67
- def init_data(self):
71
+ def init_data(self) -> None:
68
72
  """According to
69
73
  https://www.biral.com/wp-content/uploads/2015/01/5.4110.xx_.xxx_.pdf
70
74
  """
@@ -127,12 +131,12 @@ class Thies(Disdrometer):
127
131
  first_date = _format_thies_date(first_date)
128
132
  return first_date.split("-")
129
133
 
130
- def _create_velocity_vectors(self):
134
+ def _create_velocity_vectors(self) -> None:
131
135
  n_values = [5, 6, 7, 1, 1]
132
136
  spreads = [0.2, 0.4, 0.8, 1, 10]
133
137
  self.store_vectors(self.data, n_values, spreads, "velocity")
134
138
 
135
- def _create_diameter_vectors(self):
139
+ def _create_diameter_vectors(self) -> None:
136
140
  n_values = [3, 6, 13]
137
141
  spreads = [0.125, 0.25, 0.5]
138
142
  self.store_vectors(self.data, n_values, spreads, "diameter", start=0.125)
@@ -1,6 +1,6 @@
1
1
  """Module for reading raw Galileo cloud radar data."""
2
2
  import os
3
- from tempfile import TemporaryDirectory
3
+ from tempfile import NamedTemporaryFile, TemporaryDirectory
4
4
 
5
5
  import numpy as np
6
6
 
@@ -20,6 +20,7 @@ def galileo2nc(
20
20
  """Converts 'Galileo' cloud radar data into Cloudnet Level 1b netCDF file.
21
21
 
22
22
  Args:
23
+ ----
23
24
  raw_files: Input file name or folder containing multiple input files.
24
25
  output_file: Output filename.
25
26
  site_meta: Dictionary containing information about the site. Required key
@@ -29,12 +30,15 @@ def galileo2nc(
29
30
  date: Expected date as YYYY-MM-DD of all profiles in the file.
30
31
 
31
32
  Returns:
33
+ -------
32
34
  UUID of the generated file.
33
35
 
34
36
  Raises:
37
+ ------
35
38
  ValidTimeStampError: No valid timestamps found.
36
39
 
37
40
  Examples:
41
+ --------
38
42
  >>> from cloudnetpy.instruments import galileo2nc
39
43
  >>> site_meta = {'name': 'Chilbolton'}
40
44
  >>> galileo2nc('raw_radar.nc', 'radar.nc', site_meta)
@@ -57,13 +61,20 @@ def galileo2nc(
57
61
 
58
62
  with TemporaryDirectory() as temp_dir:
59
63
  if os.path.isdir(raw_files):
60
- nc_filename = f"{temp_dir}/tmp.nc"
61
- valid_filenames = utils.get_sorted_filenames(raw_files, ".nc")
62
- valid_filenames = utils.get_files_with_common_range(valid_filenames)
63
- variables = list(keymap.keys())
64
- concat_lib.concatenate_files(
65
- valid_filenames, nc_filename, variables=variables
66
- )
64
+ with NamedTemporaryFile(
65
+ dir=temp_dir,
66
+ suffix=".nc",
67
+ delete=False,
68
+ ) as temp_file:
69
+ nc_filename = temp_file.name
70
+ valid_filenames = utils.get_sorted_filenames(raw_files, ".nc")
71
+ valid_filenames = utils.get_files_with_common_range(valid_filenames)
72
+ variables = list(keymap.keys())
73
+ concat_lib.concatenate_files(
74
+ valid_filenames,
75
+ nc_filename,
76
+ variables=variables,
77
+ )
67
78
  else:
68
79
  nc_filename = raw_files
69
80
 
@@ -87,14 +98,14 @@ def galileo2nc(
87
98
  galileo.add_height()
88
99
  attributes = output.add_time_attribute(ATTRIBUTES, galileo.date)
89
100
  output.update_attributes(galileo.data, attributes)
90
- uuid = output.save_level1b(galileo, output_file, uuid)
91
- return uuid
101
+ return output.save_level1b(galileo, output_file, uuid)
92
102
 
93
103
 
94
104
  class Galileo(ChilboltonRadar):
95
105
  """Class for Galileo raw radar data. Child of ChilboltonRadar().
96
106
 
97
107
  Args:
108
+ ----
98
109
  full_path: Filename of a daily Galileo .nc NetCDF file.
99
110
  site_meta: Site properties in a dictionary. Required keys are: `name`.
100
111
 
@@ -105,12 +116,12 @@ class Galileo(ChilboltonRadar):
105
116
  self.date = self._init_date()
106
117
  self.instrument = GALILEO
107
118
 
108
- def mask_clutter(self):
119
+ def mask_clutter(self) -> None:
109
120
  """Masks clutter."""
110
121
  # Only strong Z values are valid
111
122
  n_low_gates = 15
112
123
  ind = np.where(self.data["Zh"][:, :n_low_gates] < -15) and np.where(
113
- self.data["ldr"][:, :n_low_gates] > -5
124
+ self.data["ldr"][:, :n_low_gates] > -5,
114
125
  )
115
126
  self.data["v"].mask_indices(ind)
116
127
 
@@ -34,6 +34,7 @@ def hatpro2l1c(
34
34
  """Converts RPG HATPRO microwave radiometer data into Cloudnet Level 1c netCDF file.
35
35
 
36
36
  Args:
37
+ ----
37
38
  mwr_dir: Folder containing one day of HATPRO files.
38
39
  output_file: Output file name.
39
40
  site_meta: Dictionary containing information about the site and instrument
@@ -41,9 +42,9 @@ def hatpro2l1c(
41
42
  date: Expected date in the input files.
42
43
 
43
44
  Returns:
45
+ -------
44
46
  UUID of the generated file.
45
47
  """
46
-
47
48
  coeff_files = site_meta.get("coefficientFiles", None)
48
49
 
49
50
  hatpro_raw = mwrpy.lev1_to_nc(
@@ -59,9 +60,16 @@ def hatpro2l1c(
59
60
  timestamps = hatpro.data["time"][:]
60
61
  if date is not None:
61
62
  # Screen timestamps if these assertions start to fail
62
- assert np.all(np.diff(timestamps) > 0)
63
- dates = [str(datetime.datetime.utcfromtimestamp(t).date()) for t in timestamps]
64
- assert len(set(dates)) == 1
63
+ if not np.all(np.diff(timestamps) > 0):
64
+ msg = "Timestamps are not increasing"
65
+ raise RuntimeError(msg)
66
+ dates = [
67
+ str(datetime.datetime.fromtimestamp(t, tz=datetime.timezone.utc).date())
68
+ for t in timestamps
69
+ ]
70
+ if len(set(dates)) != 1:
71
+ msg = f"Several dates, something is wrong: {set(dates)}"
72
+ raise RuntimeError(msg)
65
73
 
66
74
  decimal_hours = utils.seconds2hours(timestamps)
67
75
  hatpro.data["time"] = CloudnetArray(decimal_hours, "time", data_type="f8")
@@ -115,6 +123,7 @@ def hatpro2nc(
115
123
  concatenates the data and writes it into netCDF file.
116
124
 
117
125
  Args:
126
+ ----
118
127
  path_to_files: Folder containing one day of RPG HATPRO files.
119
128
  output_file: Output file name.
120
129
  site_meta: Dictionary containing information about the site with keys:
@@ -131,15 +140,18 @@ def hatpro2nc(
131
140
  only files that match the date will be used.
132
141
 
133
142
  Returns:
143
+ -------
134
144
  2-element tuple containing
135
145
 
136
146
  - UUID of the generated file.
137
147
  - Files used in the processing.
138
148
 
139
149
  Raises:
150
+ ------
140
151
  ValidTimeStampError: No valid timestamps found.
141
152
 
142
153
  Examples:
154
+ --------
143
155
  >>> from cloudnetpy.instruments import hatpro2nc
144
156
  >>> site_meta = {'name': 'Hyytiala', 'altitude': 174}
145
157
  >>> hatpro2nc('/path/to/files/', 'hatpro.nc', site_meta)
@@ -165,7 +177,8 @@ def hatpro2nc(
165
177
 
166
178
 
167
179
  def _get_hatpro_objects(
168
- directory: Path, expected_date: str | None
180
+ directory: Path,
181
+ expected_date: str | None,
169
182
  ) -> tuple[list[HatproBinCombined], list[str]]:
170
183
  objects = defaultdict(list)
171
184
  for filename in directory.iterdir():
@@ -183,7 +196,7 @@ def _get_hatpro_objects(
183
196
  obj = _validate_date(obj, expected_date)
184
197
  objects[filename.stem].append(obj)
185
198
  except (TypeError, ValueError, ValidTimeStampError) as err:
186
- logging.warning(f"Ignoring file '{filename}': {err}")
199
+ logging.warning("Ignoring file '%s': %s", filename, err)
187
200
  continue
188
201
 
189
202
  valid_files: list[str] = []
@@ -194,28 +207,31 @@ def _get_hatpro_objects(
194
207
  valid_files.extend(str(obj.filename) for obj in objs)
195
208
  except (TypeError, ValueError) as err:
196
209
  files = "'" + "', '".join(str(obj.filename) for obj in objs) + "'"
197
- logging.warning(f"Ignoring files {files}: {err}")
210
+ logging.warning("Ignoring files %s: %s", files, err)
198
211
  continue
199
212
 
200
213
  return combined_objs, valid_files
201
214
 
202
215
 
203
- def _validate_date(obj: HatproBin, expected_date: str):
216
+ def _validate_date(obj: HatproBin, expected_date: str) -> HatproBin:
204
217
  if obj.header["_time_reference"] != 1:
205
- raise ValueError("Can not validate non-UTC dates")
218
+ msg = "Can not validate non-UTC dates"
219
+ raise ValueError(msg)
206
220
  inds = []
207
221
  for ind, timestamp in enumerate(obj.data["time"][:]):
208
222
  date = "-".join(utils.seconds2date(timestamp)[:3])
209
223
  if date == expected_date:
210
224
  inds.append(ind)
211
225
  if not inds:
212
- raise ValueError("Timestamps not what expected")
226
+ msg = f"No valid timestamps found for date {expected_date}"
227
+ raise ValueError(msg)
213
228
  obj.data = obj.data[:][inds]
214
229
  return obj
215
230
 
216
231
 
217
232
  def _add_missing_variables(
218
- hatpro_objects: list[HatproBinCombined], keys: tuple
233
+ hatpro_objects: list[HatproBinCombined],
234
+ keys: tuple,
219
235
  ) -> list[HatproBinCombined]:
220
236
  for obj in hatpro_objects:
221
237
  for key in keys:
@@ -116,7 +116,10 @@ FMCW35 = Instrument(
116
116
  )
117
117
 
118
118
  BASTA = Instrument(
119
- domain="radar", category="cloud radar", model="BASTA", frequency=95.0
119
+ domain="radar",
120
+ category="cloud radar",
121
+ model="BASTA",
122
+ frequency=95.0,
120
123
  )
121
124
 
122
125
  HATPRO = Instrument(