pyxecm 1.6__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyxecm might be problematic. Click here for more details.

Files changed (78) hide show
  1. pyxecm/__init__.py +7 -4
  2. pyxecm/avts.py +727 -254
  3. pyxecm/coreshare.py +686 -467
  4. pyxecm/customizer/__init__.py +16 -4
  5. pyxecm/customizer/__main__.py +58 -0
  6. pyxecm/customizer/api/__init__.py +5 -0
  7. pyxecm/customizer/api/__main__.py +6 -0
  8. pyxecm/customizer/api/app.py +163 -0
  9. pyxecm/customizer/api/auth/__init__.py +1 -0
  10. pyxecm/customizer/api/auth/functions.py +92 -0
  11. pyxecm/customizer/api/auth/models.py +13 -0
  12. pyxecm/customizer/api/auth/router.py +78 -0
  13. pyxecm/customizer/api/common/__init__.py +1 -0
  14. pyxecm/customizer/api/common/functions.py +47 -0
  15. pyxecm/customizer/api/common/metrics.py +92 -0
  16. pyxecm/customizer/api/common/models.py +21 -0
  17. pyxecm/customizer/api/common/payload_list.py +870 -0
  18. pyxecm/customizer/api/common/router.py +72 -0
  19. pyxecm/customizer/api/settings.py +128 -0
  20. pyxecm/customizer/api/terminal/__init__.py +1 -0
  21. pyxecm/customizer/api/terminal/router.py +87 -0
  22. pyxecm/customizer/api/v1_csai/__init__.py +1 -0
  23. pyxecm/customizer/api/v1_csai/router.py +87 -0
  24. pyxecm/customizer/api/v1_maintenance/__init__.py +1 -0
  25. pyxecm/customizer/api/v1_maintenance/functions.py +100 -0
  26. pyxecm/customizer/api/v1_maintenance/models.py +12 -0
  27. pyxecm/customizer/api/v1_maintenance/router.py +76 -0
  28. pyxecm/customizer/api/v1_otcs/__init__.py +1 -0
  29. pyxecm/customizer/api/v1_otcs/functions.py +61 -0
  30. pyxecm/customizer/api/v1_otcs/router.py +179 -0
  31. pyxecm/customizer/api/v1_payload/__init__.py +1 -0
  32. pyxecm/customizer/api/v1_payload/functions.py +179 -0
  33. pyxecm/customizer/api/v1_payload/models.py +51 -0
  34. pyxecm/customizer/api/v1_payload/router.py +499 -0
  35. pyxecm/customizer/browser_automation.py +721 -286
  36. pyxecm/customizer/customizer.py +1076 -1425
  37. pyxecm/customizer/exceptions.py +35 -0
  38. pyxecm/customizer/guidewire.py +1186 -0
  39. pyxecm/customizer/k8s.py +901 -379
  40. pyxecm/customizer/log.py +107 -0
  41. pyxecm/customizer/m365.py +2967 -920
  42. pyxecm/customizer/nhc.py +1169 -0
  43. pyxecm/customizer/openapi.py +258 -0
  44. pyxecm/customizer/payload.py +18228 -7820
  45. pyxecm/customizer/pht.py +717 -286
  46. pyxecm/customizer/salesforce.py +516 -342
  47. pyxecm/customizer/sap.py +58 -41
  48. pyxecm/customizer/servicenow.py +611 -372
  49. pyxecm/customizer/settings.py +445 -0
  50. pyxecm/customizer/successfactors.py +408 -346
  51. pyxecm/customizer/translate.py +83 -48
  52. pyxecm/helper/__init__.py +5 -2
  53. pyxecm/helper/assoc.py +83 -43
  54. pyxecm/helper/data.py +2406 -870
  55. pyxecm/helper/logadapter.py +27 -0
  56. pyxecm/helper/web.py +229 -101
  57. pyxecm/helper/xml.py +596 -171
  58. pyxecm/maintenance_page/__init__.py +5 -0
  59. pyxecm/maintenance_page/__main__.py +6 -0
  60. pyxecm/maintenance_page/app.py +51 -0
  61. pyxecm/maintenance_page/settings.py +28 -0
  62. pyxecm/maintenance_page/static/favicon.avif +0 -0
  63. pyxecm/maintenance_page/templates/maintenance.html +165 -0
  64. pyxecm/otac.py +235 -141
  65. pyxecm/otawp.py +2668 -1220
  66. pyxecm/otca.py +569 -0
  67. pyxecm/otcs.py +7956 -3237
  68. pyxecm/otds.py +2178 -925
  69. pyxecm/otiv.py +36 -21
  70. pyxecm/otmm.py +1272 -325
  71. pyxecm/otpd.py +231 -127
  72. pyxecm-2.0.1.dist-info/METADATA +122 -0
  73. pyxecm-2.0.1.dist-info/RECORD +76 -0
  74. {pyxecm-1.6.dist-info → pyxecm-2.0.1.dist-info}/WHEEL +1 -1
  75. pyxecm-1.6.dist-info/METADATA +0 -53
  76. pyxecm-1.6.dist-info/RECORD +0 -32
  77. {pyxecm-1.6.dist-info → pyxecm-2.0.1.dist-info/licenses}/LICENSE +0 -0
  78. {pyxecm-1.6.dist-info → pyxecm-2.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1169 @@
1
+ """NHC stands for National Hurricane Center.
2
+
3
+ It is a comprehensive data source for tropical storms around the US (Atlantic + Pacific basin).
4
+
5
+ See: https://www.nhc.noaa.gov
6
+ """
7
+
8
+ __author__ = "Dr. Marc Diefenbruch"
9
+ __copyright__ = "Copyright (C) 2024-2025, OpenText"
10
+ __credits__ = ["Kai-Philip Gatzweiler"]
11
+ __maintainer__ = "Dr. Marc Diefenbruch"
12
+ __email__ = "mdiefenb@opentext.com"
13
+
14
+ import logging
15
+ import multiprocessing
16
+ import os
17
+ import tempfile
18
+ import threading
19
+ import time
20
+
21
+ from pyxecm.helper import Data
22
+
23
+ default_logger = logging.getLogger("pyxecm.customizer.nhc")
24
+
25
+ try:
26
+ import pandas as pd
27
+
28
+ pandas_installed = True
29
+ except ModuleNotFoundError:
30
+ default_logger.warning(
31
+ "Module pandas is not installed. Customizer will not support bulk workspace creation.",
32
+ )
33
+ pandas_installed = False
34
+
35
+ try:
36
+ import matplotlib as mpl
37
+ from tropycal import rain, tracks
38
+
39
+ mpl.use("Agg")
40
+ tropycal_installed = True
41
+ except ModuleNotFoundError:
42
+ default_logger.warning(
43
+ "Module tropycal is not installed. Customizer will not support NHC storm data source.",
44
+ )
45
+ tropycal_installed = False
46
+
47
+ STORM_IMAGE_BASE_PATH = "nhc/images/"
48
+ STORM_DATA_BASE_PATH = "nhc/data/"
49
+ STORM_IMAGE_PLOT_MAX_RETRY = 7
50
+
51
+ STORM_NUMBERS = {
52
+ "01": "ONE",
53
+ "02": "TWO",
54
+ "03": "THREE",
55
+ "04": "FOUR",
56
+ "05": "FIVE",
57
+ "06": "SIX",
58
+ "07": "SEVEN",
59
+ "08": "EIGHT",
60
+ "09": "NINE",
61
+ "10": "TEN",
62
+ "11": "ELEVEN",
63
+ "12": "TWELVE",
64
+ "13": "THIRTEEN",
65
+ "14": "FOURTEEN",
66
+ "15": "FIFTEEN",
67
+ "16": "SIXTEEN",
68
+ "17": "SEVENTEEN",
69
+ "18": "EIGHTEEN",
70
+ "19": "NINETEEN",
71
+ "20": "TWENTY",
72
+ "21": "TWENTY-ONE",
73
+ "22": "TWENTY-TWO",
74
+ "23": "TWENTY-THREE",
75
+ "24": "TWENTY-FOUR",
76
+ "25": "TWENTY-FIVE",
77
+ "26": "TWENTY-SIX",
78
+ "27": "TWENTY-SEVEN",
79
+ "28": "TWENTY-EIGHT",
80
+ "29": "TWENTY-NINE",
81
+ "30": "THIRTY",
82
+ "31": "THIRTY-ONE",
83
+ "32": "THIRTY-TWO",
84
+ "33": "THIRTY-THREE",
85
+ "34": "THIRTY-FOUR",
86
+ "35": "THIRTY-FIVE",
87
+ "36": "THIRTY-SIX",
88
+ "37": "THIRTY-SEVEN",
89
+ "38": "THIRTY-EIGHT",
90
+ "39": "THIRTY-NINE",
91
+ "40": "FORTY",
92
+ "41": "FORTY-ONE",
93
+ "42": "FORTY-TWO",
94
+ "43": "FORTY-THREE",
95
+ "44": "FORTY-FOUR",
96
+ "45": "FORTY-FIVE",
97
+ "46": "FORTY-SIX",
98
+ "47": "FORTY-SEVEN",
99
+ "48": "FORTY-Eight",
100
+ "49": "FORTY-Nine",
101
+ "50": "FIFTY",
102
+ "51": "FIFTY-ONE",
103
+ "52": "FIFTY-TWO",
104
+ "53": "FIFTY-THREE",
105
+ "54": "FIFTY-FOUR",
106
+ "55": "FIFTY-FIVE",
107
+ "56": "FIFTY-SIX",
108
+ "57": "FIFTY-SEVEN",
109
+ "58": "FIFTY-EIGHT",
110
+ "59": "FIFTY-NINE",
111
+ "60": "SIXTY",
112
+ "61": "SIXTY-ONE",
113
+ "62": "SIXTY-TWO",
114
+ "63": "SIXTY-THREE",
115
+ "64": "SIXTY-FOUR",
116
+ "65": "SIXTY-FIVE",
117
+ "66": "SIXTY-SIX",
118
+ "67": "SIXTY-SEVEN",
119
+ "68": "SIXTY-EIGHT",
120
+ "69": "SIXTY-NINE",
121
+ "70": "SEVENTY",
122
+ "71": "SEVENTY-ONE",
123
+ "72": "SEVENTY-TWO",
124
+ "73": "SEVENTY-THREE",
125
+ "74": "SEVENTY-FOUR",
126
+ "75": "SEVENTY-FIVE",
127
+ "76": "SEVENTY-SIX",
128
+ "77": "SEVENTY-SEVEN",
129
+ "78": "SEVENTY-EIGHT",
130
+ "79": "SEVENTY-NINE",
131
+ "80": "EIGHTY",
132
+ "81": "EIGHTY-ONE",
133
+ "82": "EIGHTY-TWO",
134
+ "83": "EIGHTY-THREE",
135
+ "84": "EIGHTY-FOUR",
136
+ "85": "EIGHTY-FIVE",
137
+ "86": "EIGHTY-SIX",
138
+ "87": "EIGHTY-SEVEN",
139
+ "88": "EIGHTY-EIGHT",
140
+ "89": "EIGHTY-NINE",
141
+ "90": "NINETY",
142
+ "91": "NINETY-ONE",
143
+ "92": "NINETY-TWO",
144
+ "93": "NINETY-THREE",
145
+ "94": "NINETY-FOUR",
146
+ "95": "NINETY-FIVE",
147
+ "96": "NINETY-SIX",
148
+ "97": "NINETY-SEVEN",
149
+ "98": "NINETY-EIGHT",
150
+ "99": "NINETY-NINE",
151
+ }
152
+
153
+
154
+ class NHC:
155
+ """Class NHC is used to retrieve data from National Hurricane Center."""
156
+
157
+ logger: logging.Logger = default_logger
158
+
159
+ _basin: str | None = None
160
+ _basin_data = None # don't use tropycal specific data types here - this clashes if the module is not installed!
161
+ _rain_data: pd.DataFrame
162
+ # currently the rain data source seems to only go from ... to 2020
163
+ _rain_min_year: int | None = None
164
+ _rain_max_year: int | None = None
165
+ _session = None
166
+
167
+ _download_dir_images: str
168
+ _download_dir_data: str
169
+
170
+ _strom_plot_exclusions = None
171
+
172
+ def __init__(
173
+ self,
174
+ basin: str = "both",
175
+ source: str = "hurdat",
176
+ load_rain_data: bool = True,
177
+ include_btk: bool = True,
178
+ storm_plot_exclusions: list | None = None,
179
+ download_dir_images: str = STORM_IMAGE_BASE_PATH,
180
+ download_dir_data: str = STORM_DATA_BASE_PATH,
181
+ logger: logging.Logger = default_logger,
182
+ ) -> None:
183
+ """Initialize the NHC object.
184
+
185
+ Args:
186
+ basin (str, optional):
187
+ The name of the basin. Possible values:
188
+ - "north_atlantic" (using HURDAT2 and IBTrACS data source)
189
+ - "east_pacific" (using HURDAT2 and IBTrACS data source)
190
+ - "both" ("north_atlantic" & "east_pacific" combined)
191
+ - "west_pacific" (using IBTrACS data source)
192
+ - "north_indian" (using IBTrACS data source)
193
+ - "south_indian" (using IBTrACS data source)
194
+ - "australia" (using IBTrACS* : special case)
195
+ - "south_pacific" (using IBTrACS)
196
+ - "south_atlantic" (using IBTrACS)
197
+ - "all" (suing all IBTrACS)
198
+ source (str, optional):
199
+ Data source to read in. Default is HURDAT2.
200
+ Possible values:
201
+ "hurdat" - HURDAT2 data source for the North Atlantic and East/Central Pacific basins
202
+ "ibtracs" - ibtracs data source for regional or global data
203
+ load_rain_data (bool ,optional):
204
+ Controls whether or not the rain data is loaded as well.
205
+ This usees a Pandas data frame.
206
+ include_btk (bool, optional):
207
+ If True, the best track data from NHC for the most recent years where it doesn't
208
+ exist in HURDAT2 will be added into the dataset. Valid for “north_atlantic” and
209
+ “east_pacific” basins. Default is True.
210
+ storm_plot_exclusions (list | None, optional):
211
+ An optional list of storms to exclude from plotting. Defaults to None.
212
+ Use the storm codes like "AL022018" as values for the exclusion list.
213
+ download_dir_images (str, optional):
214
+ Where to store the downloaded storm images. It can be a relative or absolute path.
215
+ If it is a relative path the default tmp path of the operating system will be used
216
+ as a prefix.
217
+ download_dir_data (str, optional):
218
+ Where to store the downloaded storm data files. It can be a relative or absolute path.
219
+ If it is a relative path the default tmp path of the operating system will be used
220
+ as a prefix.
221
+ logger (logging.Logger, optional):
222
+ The logging object to use for all log messages. Defaults to default_logger.
223
+
224
+ """
225
+
226
+ if logger != default_logger:
227
+ self.logger = logger.getChild("nhc")
228
+ for logfilter in logger.filters:
229
+ self.logger.addFilter(logfilter)
230
+
231
+ # Store the credentials and parameters in a config dictionary:
232
+
233
+ if basin:
234
+ # Load the storm basin dataset
235
+ self._basin_data = tracks.TrackDataset(basin=basin, source=source, include_btk=include_btk)
236
+ self._basin = basin
237
+
238
+ # rain data from Weather Prediction Center (WPC) data source
239
+ if load_rain_data:
240
+ self._rain_data = rain.RainDataset()
241
+ if self._rain_data:
242
+ self._rain_min_year = int(self._rain_data.rain_df["Year"].min())
243
+ self._rain_max_year = int(self._rain_data.rain_df["Year"].max())
244
+ else:
245
+ self._rain_data = None
246
+
247
+ self._data = Data(logger=self.logger)
248
+
249
+ self._download_dir_images = download_dir_images
250
+ self._download_dir_data = download_dir_data
251
+
252
+ self._strom_plot_exclusions = storm_plot_exclusions
253
+
254
+ # end method definition
255
+
256
+ def get_data(self) -> Data:
257
+ """Get the Data object that holds all processed PHT products.
258
+
259
+ Returns:
260
+ Data:
261
+ Datastructure with all processed NHC storm data.
262
+
263
+ """
264
+
265
+ return self._data
266
+
267
+ # end method definition
268
+
269
+ def get_basin_data(self) -> tracks.TrackDataset:
270
+ """Return the tracks data set.
271
+
272
+ Returns:
273
+ tracks.TrackDataset:
274
+ The track data set for the basin(s).
275
+
276
+ """
277
+
278
+ return self._basin_data
279
+
280
+ # end method definition
281
+
282
+ def get_rain_data(self) -> pd.DataFrame | None:
283
+ """Get the complete rainfall data.
284
+
285
+ Returns:
286
+ pd.DataFrame:
287
+ Rain data from Weather Prediction Center (WPC) data source.
288
+ None in case there's no data.
289
+
290
+ """
291
+
292
+ if not self._rain_data:
293
+ self._rain_data = rain.RainDataset()
294
+
295
+ return self._rain_data.rain_df
296
+
297
+ # end method definition
298
+
299
+ def get_season(self, year: int, basin: str = "both", source: str = "hurdat") -> tracks.Season | None:
300
+ """Get data on a storm season (all stroms in a particular year).
301
+
302
+ See: https://tropycal.github.io/tropycal/api/generated/tropycal.tracks.TrackDataset.html
303
+
304
+ Args:
305
+ year (int):
306
+ The year of the storm, e.g. 2005.
307
+ basin (str, optional):
308
+ The basic of the storm, values can be:
309
+ * "north_atlantic" (using HURDAT2 and IBTrACS data source)
310
+ * "east_pacific" (using HURDAT2 and IBTrACS data source)
311
+ * "both" ("north_atlantic" & "east_pacific" combined)
312
+ * "west_pacific" (using IBTrACS data source)
313
+ * "north_indian" (using IBTrACS data source)
314
+ * "south_indian" (using IBTrACS data source)
315
+ * "australia" (using IBTrACS* : special case)
316
+ * "south_pacific" (using IBTrACS)
317
+ * "south_atlantic" (using IBTrACS)
318
+ * "all" (suing all IBTrACS)
319
+ source (str, optional):
320
+ Data source to read in. Default is HURDAT2.
321
+ Possible values:
322
+ "hurdat" - HURDAT2 data source for the North Atlantic and East/Central Pacific basins
323
+ "ibtracs" - ibtracs data source for regional or global data
324
+
325
+ Returns:
326
+ tracks.Season:
327
+ Season data object or None in case of an error.
328
+
329
+ """
330
+
331
+ # Load the storm basin dataset
332
+ if not self._basin_data:
333
+ self._basin_data = tracks.TrackDataset(basin=basin, source=source)
334
+
335
+ # Get storm data by name and year
336
+ try:
337
+ season_data = self._basin_data.get_season(year=year)
338
+ except ValueError as e:
339
+ self.logger.info(
340
+ "Cannot find season data for year -> %s!; error -> %s",
341
+ str(year),
342
+ str(e),
343
+ )
344
+ return None
345
+
346
+ return season_data
347
+
348
+ # end method definition
349
+
350
+ def get_storm(
351
+ self,
352
+ name: str | None = None,
353
+ year: int | None = None,
354
+ storm_id: str | None = None,
355
+ basin: str = "both",
356
+ ) -> tracks.Storm:
357
+ """Get data on a particular storm.
358
+
359
+ Args:
360
+ name (str):
361
+ The nickname of the storm, like 'Katrina'.
362
+ year (int):
363
+ The year of the storm, e.g. 2005.
364
+ storm_id (str):
365
+ Alternatively to name and year you can provide the id of the storm
366
+ basin (str):
367
+ The basic of the storm, values can be:
368
+ * "north_atlantic" (using HURDAT2 and IBTrACS data source)
369
+ * "east_pacific" (using HURDAT2 and IBTrACS data source)
370
+ * "both" ("north_atlantic" & "east_pacific" combined)
371
+ * "west_pacific" (using IBTrACS data source)
372
+ * "north_indian" (using IBTrACS data source)
373
+ * "south_indian" (using IBTrACS data source)
374
+ * "australia" (using IBTrACS* : special case)
375
+ * "south_pacific" (using IBTrACS)
376
+ * "south_atlantic" (using IBTrACS)
377
+ * "all" (suing all IBTrACS)
378
+
379
+ Returns:
380
+ dict:
381
+ The storm data or None in case of an error.
382
+
383
+ """
384
+
385
+ # Load the storm basin dataset
386
+ if not self._basin_data:
387
+ self._basin_data = tracks.TrackDataset(basin=basin)
388
+
389
+ # Get storm data by name and year
390
+ try:
391
+ storm_data = (
392
+ self._basin_data.get_storm(storm=storm_id)
393
+ if storm_id
394
+ else self._basin_data.get_storm(storm=(name, year))
395
+ )
396
+ except (ValueError, KeyError) as e:
397
+ self.logger.info(
398
+ "Cannot find storm data for storm -> '%s'%s; error -> %s",
399
+ name if name else storm_id,
400
+ " and year -> {}".format(str(year)) if year else "",
401
+ str(e),
402
+ )
403
+ return None
404
+
405
+ return storm_data
406
+
407
+ # end method definition
408
+
409
+ def get_storm_rainfall(self, storm_data: tracks.Storm) -> pd.DataFrame:
410
+ """Get the rainfall data of a given storm.
411
+
412
+ Args:
413
+ storm_data (tracks.Storm): Storm data. This needs to be retrieved
414
+ with get_storm() before.
415
+
416
+ Returns:
417
+ pd.DataFrame: Pandas data frame with the storm rain data.
418
+
419
+ """
420
+
421
+ if not self._rain_data:
422
+ self._rain_data = rain.RainDataset()
423
+
424
+ try:
425
+ storm_rain = self._rain_data.get_storm_rainfall(storm_data)
426
+ except RuntimeError as re:
427
+ self.logger.info(
428
+ "Cannot find rain data for storm -> '%s' in year -> %s; message -> %s",
429
+ storm_data["name"],
430
+ str(storm_data["season"]),
431
+ str(re),
432
+ )
433
+ return None
434
+
435
+ return storm_rain
436
+
437
+ # end method definition
438
+
439
+ def get_storm_image_path(self) -> str:
440
+ """Get the path to the filesystem directory where storm plot images should be saved.
441
+
442
+ Returns:
443
+ str:
444
+ The path to the filesystem directory where storm plot images should be saved.
445
+
446
+ """
447
+
448
+ return os.path.join(tempfile.gettempdir(), self._download_dir_images)
449
+
450
+ # end method definition
451
+
452
+ def get_storm_data_path(self) -> str:
453
+ """Get the path to the filesystem directory where storm data should be saved.
454
+
455
+ Returns:
456
+ str:
457
+ The path to the filesystem directory where storm data should be saved.
458
+
459
+ """
460
+
461
+ return os.path.join(tempfile.gettempdir(), self._download_dir_data)
462
+
463
+ # end method definition
464
+
465
+ def get_storm_file_name(self, storm_id: str, file_type: str, suffix: str = "") -> str:
466
+ """Determine the save path and filename of the plot image or data files of a given storm.
467
+
468
+ Args:
469
+ storm_id (str):
470
+ The ID of the storm.
471
+ file_type (str):
472
+ The file type. Can be "svg", "png", "json", ...
473
+ suffix (str, optional):
474
+ For special image files (like rain) we want to add a special name suffix.
475
+
476
+ """
477
+
478
+ file_name = storm_id
479
+ # Add a suffix for special cases:
480
+ if suffix:
481
+ file_name += suffix
482
+ file_name += "." + file_type
483
+
484
+ return file_name
485
+
486
+ # end method definition
487
+
488
+ def save_storm_track_image(
489
+ self,
490
+ storm_data: tracks.Storm,
491
+ image_path: str,
492
+ domain: str | dict = "dynamic",
493
+ ) -> tuple[bool, str]:
494
+ """Save an image (map) file of a given storm track.
495
+
496
+ Args:
497
+ storm_data (tracks.Storm):
498
+ The storm data.
499
+ image_path (str):
500
+ Where to store the image file. If the directory
501
+ does not exist it is created.
502
+ domain (str | dict, optional):
503
+ Zoom area in geo coordinates. Defaults to "dynamic".
504
+
505
+ Returns:
506
+ bool:
507
+ True = success
508
+ False = error (at least issues
509
+ str:
510
+ Error / warning message.
511
+
512
+ """
513
+
514
+ retries = 0
515
+
516
+ # Loop for retries:
517
+ while True:
518
+ try:
519
+ storm_data.plot(
520
+ domain=domain,
521
+ save_path=image_path,
522
+ )
523
+ except Exception as plot_error:
524
+ if retries > STORM_IMAGE_PLOT_MAX_RETRY:
525
+ return (False, str(plot_error))
526
+ retries += 1
527
+ else:
528
+ return (True, "Success" if retries == 0 else "Success after {} retries".format(retries))
529
+
530
+ # end method definition
531
+
532
+ def save_storm_track_data(
533
+ self,
534
+ storm_data: tracks.Storm,
535
+ data_path: str,
536
+ save_general_storm_attributes: bool = False,
537
+ ) -> None:
538
+ """Save a data file of a given storm track (this is a data series over time).
539
+
540
+ Args:
541
+ storm_data (tracks.Storm):
542
+ The storm object (retreived by get_storm() before).
543
+ data_path (str):
544
+ Where to store the data file.
545
+ save_general_storm_attributes (bool, optional):
546
+ Do we want to have (repeatedly) the
547
+ general storm attributes in the data set?
548
+
549
+ """
550
+
551
+ data = Data(
552
+ storm_data.to_dataframe(attrs_as_columns=save_general_storm_attributes),
553
+ logger=self.logger,
554
+ )
555
+ if not data:
556
+ return
557
+ if ("basin" not in data.get_columns() or []) and ("wmo_basin" in data.get_columns() or []):
558
+ data.rename_column(old_column_name="wmo_basin", new_column_name="basin")
559
+ data.drop_columns(
560
+ column_names=["extra_obs", "special", "operational_id", "wmo_basin"],
561
+ )
562
+ data.rename_column(old_column_name="lat", new_column_name="latitude")
563
+ data.rename_column(old_column_name="lon", new_column_name="longitude")
564
+ if data_path.endswith("json"):
565
+ result = data.save_json_data(json_path=data_path)
566
+ elif data_path.endswith("xlsx"):
567
+ result = data.save_excel_data(excel_path=data_path)
568
+ else:
569
+ self.logger.error("Illegal file type for storm track data!")
570
+ return
571
+
572
+ # We try to be nice to memory consumption:
573
+ del data
574
+
575
+ if result:
576
+ self.logger.info(
577
+ "Successfully saved track data of storm -> '%s' (%s) to file -> '%s'",
578
+ storm_data["name"],
579
+ storm_data["id"],
580
+ data_path,
581
+ )
582
+ else:
583
+ self.logger.error(
584
+ "Failed to save track data of storm -> '%s' (%s) to file -> '%s'",
585
+ storm_data["name"],
586
+ storm_data["id"],
587
+ data_path,
588
+ )
589
+
590
+ # end method definition
591
+
592
+ def save_storm_rain_image(
593
+ self,
594
+ storm_data: tracks.Storm,
595
+ image_path: str,
596
+ domain: str | dict = "dynamic",
597
+ ) -> tuple[bool, str]:
598
+ """Save an image file for a given storm rainfall.
599
+
600
+ Args:
601
+ storm_data (tracks.Storm):
602
+ The storm object (retreived by get_storm() before).
603
+ image_path (str):
604
+ Where to store the image file. If the directory
605
+ does not exist it is created.
606
+ domain (str | dict, optional):
607
+ Zoom area in geo coordinates. Defaults to "dynamic".
608
+
609
+ Returns:
610
+ bool:
611
+ True = success
612
+ False = error (at least issues
613
+ str:
614
+ Error / warning message.
615
+
616
+ """
617
+
618
+ retries = 0
619
+
620
+ # Loop for retries:
621
+ while True:
622
+ try:
623
+ # Interpolate to grid
624
+ grid = self._rain_data.interpolate_to_grid(storm_data, return_xarray=True)
625
+ levels = [1, 2, 4, 8, 12, 16, 20, 30, 40, 50, 60]
626
+ self._rain_data.plot_rain_grid(
627
+ storm_data,
628
+ grid,
629
+ levels,
630
+ domain=domain,
631
+ save_path=image_path,
632
+ )
633
+ except Exception as plot_error:
634
+ if retries > STORM_IMAGE_PLOT_MAX_RETRY:
635
+ return (False, str(plot_error))
636
+ retries += 1
637
+ else:
638
+ return (True, "Success" if retries == 0 else "Success after {} retries".format(retries))
639
+
640
+ # end method definition
641
+
642
+ def save_storm_rain_data(
643
+ self,
644
+ storm_data: tracks.Storm,
645
+ data_path: str,
646
+ ) -> None:
647
+ """Save a data file for a given storm rainfall.
648
+
649
+ Args:
650
+ storm_data (tracks.Storm):
651
+ The storm object (retreived by get_storm() before).
652
+ data_path (str):
653
+ Where to store the data file.
654
+
655
+ """
656
+
657
+ data = Data(self._rain_data.get_storm_rainfall(storm_data), logger=self.logger)
658
+ if not data:
659
+ return
660
+ if data_path.endswith("json"):
661
+ result = data.save_json_data(json_path=data_path)
662
+ elif data_path.endswith("xlsx"):
663
+ result = data.save_excel_data(excel_path=data_path)
664
+ else:
665
+ self.logger.error("Illegal file type!")
666
+ return
667
+
668
+ if result:
669
+ self.logger.info(
670
+ "Successfully saved rainfall data of storm -> '%s' (%s) to file -> '%s'",
671
+ storm_data["name"],
672
+ storm_data["id"],
673
+ data_path,
674
+ )
675
+ else:
676
+ self.logger.error(
677
+ "Failed to save rainfall data of storm -> '%s' (%s) to file -> '%s'",
678
+ storm_data["name"],
679
+ storm_data["id"],
680
+ data_path,
681
+ )
682
+
683
+ # end method definition
684
+
685
+ def load_storms(
686
+ self,
687
+ year_start: int,
688
+ year_end: int,
689
+ save_track_images: list | None = None,
690
+ save_track_data: list | None = None,
691
+ save_rain_images: list | None = None,
692
+ save_rain_data: list | None = None,
693
+ skip_existing_files: bool = True,
694
+ load_async: bool = True,
695
+ async_processes: int = 8,
696
+ ) -> bool:
697
+ """Load storm into a data frame and save files for storm tracks and rainfall (data and image files).
698
+
699
+ Args:
700
+ year_start (int):
701
+ The start year (season).
702
+ year_end (int):
703
+ The end year (season).
704
+ save_track_images (list, optional):
705
+ A list of image types, e.g. ["svg", "png"], to save the storm track.
706
+ save_track_data (list, optional):
707
+ A list of data types, e.g. ["json", "xlsx"], to save the storm track.
708
+ save_rain_images (list, optional):
709
+ A list of image types, e.g. ["svg", "png"], to save the storm rainfall.
710
+ save_rain_data (list, optional):
711
+ A list of data types, e.g. ["json", "xlsx"], to save the storm rainfall.
712
+ skip_existing_files (bool, optional):
713
+ Skip files that have been saved before.
714
+ load_async (bool, optional):
715
+ Whether or not we want the plot method to run asynchronous. Default
716
+ is True. In case of issues or deadlocks you may want to set it to False.
717
+ async_processes (int, optional):
718
+ Number of async processes to generate the plot files.
719
+ Default is 5.
720
+
721
+ """
722
+
723
+ data = self.get_data()
724
+ image_dir = self.get_storm_image_path()
725
+ # Create folder if it does not exist
726
+ if not os.path.exists(image_dir):
727
+ os.makedirs(image_dir)
728
+ data_dir = self.get_storm_data_path()
729
+ # Create folder if it does not exist
730
+ if not os.path.exists(data_dir):
731
+ os.makedirs(data_dir)
732
+
733
+ self.logger.info(
734
+ "Loading data from National Hurricane Center from year -> %s to year -> %s for basin -> '%s'",
735
+ year_start,
736
+ year_end,
737
+ self._basin,
738
+ )
739
+
740
+ if save_track_images:
741
+ self.logger.info("Generate track plot files -> %s", str(save_track_images))
742
+ if save_track_data:
743
+ self.logger.info("Generate track data files -> %s", str(save_track_data))
744
+ if save_rain_images:
745
+ self.logger.info("Generate rain plot files -> %s", str(save_rain_images))
746
+ if save_rain_data:
747
+ self.logger.info("Generate rain data files -> %s", str(save_rain_data))
748
+ self.logger.info("Existing plot files will %sbe reused.", "" if skip_existing_files else "not ")
749
+
750
+ if load_async:
751
+ self.logger.info("Initiate plot storm worker pool of size -> %d (asynchronous)...", async_processes)
752
+ else:
753
+ self.logger.info("Initiate plot storm worker pool of size -> %d (synchronous)...", async_processes)
754
+
755
+ # Create the pool with a given number of processes.
756
+ # maxtasksperchild=1 makes sure that the processes really
757
+ # terminate after the called method completes. We want
758
+ # to do this to really free up the memory as the plot()
759
+ # methods tend to "memory leak" and pods will be evicted
760
+ # over time.
761
+ pool = multiprocessing.Pool(processes=async_processes, maxtasksperchild=1)
762
+ # Collect the results of the processes in this list:
763
+ results = []
764
+
765
+ if load_async:
766
+ done_event = threading.Event()
767
+
768
+ # Start the result collector thread before submitting plot tasks.
769
+ # This thread is used for printing success or failure messages.
770
+ # We don't want to do this in the actual plot files as this
771
+ # causes issues (deadlock) if printing log messages in the process
772
+ # worker methods.
773
+ collector_thread = threading.Thread(
774
+ name="NHC Result Collector",
775
+ target=self.result_collector,
776
+ args=(results, done_event),
777
+ )
778
+ self.logger.info("Start collector thread for logging plot process results...")
779
+ collector_thread.start()
780
+
781
+ for year in range(year_start, year_end + 1):
782
+ season = self.get_season(year=year)
783
+ data.append(season.to_dataframe())
784
+ for storm_id, storm_value in season.dict.items():
785
+ storm_name = storm_value["name"]
786
+ storm_operational_id = storm_value.get("operational_id", storm_id)
787
+ if storm_operational_id and storm_operational_id != storm_id:
788
+ self.logger.info(
789
+ "Storm '%s' has an operational ID -> '%s' which is different from the storm ID -> '%s",
790
+ storm_name,
791
+ storm_operational_id,
792
+ storm_id,
793
+ )
794
+ storm_data = self.get_storm(storm_id=storm_id)
795
+ if not storm_data:
796
+ self.logger.debug("Cannot get storm data form storm -> '%s' (%s)", storm_name, storm_id)
797
+ return False
798
+ storm_dict = storm_data.to_dict()
799
+ if storm_name == "UNNAMED":
800
+ # Tropycal (based on HURDAT) has unnamed storms with name "UNNAMED"
801
+ # while most other data sources have them as English number names
802
+ # like "Two" or "Eight". For this reason we change the name of unnamed
803
+ # storms here as well and write it back into the data frame and
804
+ # the storm data structure:
805
+ storm_name = STORM_NUMBERS[storm_id[2:4]]
806
+ storm_data["name"] = storm_name
807
+ data.set_value(
808
+ column="name",
809
+ value=storm_name,
810
+ condition=(data["id"] == storm_id), # this is a boolean pd.Series
811
+ )
812
+ self.logger.info(
813
+ "Processing storm -> '%s' (%s) in year -> %s...",
814
+ storm_name,
815
+ storm_id,
816
+ year,
817
+ )
818
+ # The category ("type") of the storm is a time series as the storm has
819
+ # a different category over time. We want the peak category:
820
+ type_series = storm_dict.get("type", None)
821
+ if type_series:
822
+ peak_index = storm_data.dict["vmax"].index(
823
+ max(storm_data.dict["vmax"]),
824
+ )
825
+ storm_type = type_series[peak_index]
826
+
827
+ # Add storm type as this is not included in season.to_dataframe():
828
+ if "type" not in data.get_columns():
829
+ data.add_column(new_column="type", data_type="string")
830
+ data.set_value(
831
+ column="type",
832
+ value=storm_type,
833
+ condition=(data["id"] == storm_id), # this is a boolean pd.Series
834
+ )
835
+
836
+ # Add year as this is not included in season.to_dataframe():
837
+ if "year" not in data.get_columns():
838
+ data.add_column(new_column="year", data_type="Int64")
839
+ data.set_value(
840
+ column="year",
841
+ value=year,
842
+ condition=(data["id"] == storm_id),
843
+ )
844
+ # Add basin as this is not included in season.to_dataframe():
845
+ if "basin" not in data.get_columns():
846
+ data.add_column(new_column="basin", data_type="string")
847
+ data.set_value(
848
+ column="basin",
849
+ value=storm_data["basin"],
850
+ condition=(data["id"] == storm_id), # boolean pd.Series
851
+ )
852
+ # Add counter (number of storm in season) as this is not included in season.to_dataframe():
853
+ if "counter" not in data.get_columns():
854
+ data.add_column(new_column="counter", data_type="string")
855
+ data.set_value(
856
+ column="counter",
857
+ value=storm_data["id"][2:4],
858
+ condition=(data["id"] == storm_id), # boolean pd.Series
859
+ )
860
+ # Add counter (number of storm in season) as this is not included in season.to_dataframe():
861
+ if "source_info" not in data.get_columns():
862
+ data.add_column(new_column="source_info", data_type="string")
863
+ data.set_value(
864
+ column="source_info",
865
+ value=storm_data["source_info"],
866
+ condition=(data["id"] == storm_id), # boolean pd.Series
867
+ )
868
+
869
+ if not self._strom_plot_exclusions or storm_id not in self._strom_plot_exclusions:
870
+ # Create the image files of the storm track:
871
+ for image_type in save_track_images or []:
872
+ image_file = self.get_storm_file_name(storm_id=storm_data["id"], file_type=image_type)
873
+ data.set_value(
874
+ column="image_file_" + image_type,
875
+ value=image_file,
876
+ condition=(data["id"] == storm_id), # boolean pd.Series
877
+ )
878
+ image_path = os.path.join(image_dir, image_file)
879
+ if skip_existing_files and os.path.exists(image_path):
880
+ self.logger.info(
881
+ "Storm track image file -> '%s' has been saved before - skipping...",
882
+ image_path,
883
+ )
884
+ continue
885
+ self.logger.info(
886
+ "Plot storm track image file -> '%s'%s...",
887
+ image_path,
888
+ " (replace existing file)" if os.path.exists(image_path) else "",
889
+ )
890
+ success_message = (
891
+ "Successfully saved track image for storm -> '{}' ({}) to file -> '{}'".format(
892
+ storm_name,
893
+ storm_data["id"],
894
+ image_path,
895
+ )
896
+ )
897
+ failure_message = (
898
+ "Issues while plotting track image for storm -> '{}' ({}) to file -> '{}'".format(
899
+ storm_name,
900
+ storm_data["id"],
901
+ image_path,
902
+ )
903
+ )
904
+ if load_async:
905
+ result = pool.apply_async(self.save_storm_track_image, args=(storm_data, image_path))
906
+ results.append(
907
+ (
908
+ result,
909
+ success_message,
910
+ failure_message,
911
+ "image_file_" + image_type,
912
+ storm_id,
913
+ image_path,
914
+ ),
915
+ )
916
+ else:
917
+ result = pool.apply(self.save_storm_track_image, args=(storm_data, image_path))
918
+ if result:
919
+ self.logger.info(success_message)
920
+ else:
921
+ self.logger.warning(failure_message)
922
+ continue
923
+ # end for image_type in save_track_images or []
924
+ # end if not self._strom_plot_exclusions or storm_key not in self._strom_plot_exclusions
925
+
926
+ # Create the data files of the storm track:
927
+ for data_type in save_track_data or []:
928
+ data_file = self.get_storm_file_name(storm_id=storm_data["id"], file_type=data_type)
929
+ data.set_value(
930
+ column="data_file_" + data_type,
931
+ value=data_file,
932
+ condition=(data["id"] == storm_id), # boolean pd.Series
933
+ )
934
+ data_path = os.path.join(data_dir, data_file)
935
+ if skip_existing_files and os.path.exists(data_path):
936
+ self.logger.info(
937
+ "Storm track data file -> '%s' has been saved before - skipping...",
938
+ data_path,
939
+ )
940
+ continue
941
+ self.save_storm_track_data(
942
+ storm_data=storm_data,
943
+ data_path=data_path,
944
+ )
945
+
946
+ # Sadly, rain data is only available up to year 2020.
947
+ if year < self._rain_min_year or year > self._rain_max_year:
948
+ self.logger.debug(
949
+ "There's no rain data for year -> %s. Skipping rain plots for this year...",
950
+ year,
951
+ )
952
+ continue
953
+
954
+ storm_rain_data = self.get_storm_rainfall(storm_data=storm_data)
955
+ if storm_rain_data is None:
956
+ self.logger.debug(
957
+ "There's no rain data for storm -> '%s' in year -> %s. Skipping rain plots for this storm...",
958
+ storm_name,
959
+ year,
960
+ )
961
+ continue
962
+
963
+ if not self._strom_plot_exclusions or storm_id not in self._strom_plot_exclusions:
964
+ # Create the images of the storm rain:
965
+ for image_type in save_rain_images or []:
966
+ image_file_rain = self.get_storm_file_name(
967
+ storm_id=storm_data["id"],
968
+ file_type=image_type,
969
+ suffix="-rainfall",
970
+ )
971
+ data.set_value(
972
+ column="image_file_rain_" + image_type,
973
+ value=image_file_rain,
974
+ condition=(data["id"] == storm_id), # boolean pd.Series
975
+ )
976
+ image_path_rain = os.path.join(image_dir, image_file_rain)
977
+ if skip_existing_files and os.path.exists(image_path_rain):
978
+ self.logger.info(
979
+ "Storm rain image file -> '%s' has been saved before - skipping...",
980
+ image_path_rain,
981
+ )
982
+ continue
983
+ self.logger.info(
984
+ "Plot storm rain image file -> '%s'...",
985
+ image_path_rain,
986
+ )
987
+ success_message = "Successfully saved rain image for storm -> '{}' ({}) to file -> '{}'".format(
988
+ storm_data["name"],
989
+ storm_data["id"],
990
+ image_path_rain,
991
+ )
992
+ failure_message = (
993
+ "Issues while plotting rain image for storm -> '{}' ({}) to file -> '{}'".format(
994
+ storm_data["name"],
995
+ storm_data["id"],
996
+ image_path_rain,
997
+ )
998
+ )
999
+ if load_async:
1000
+ result = pool.apply_async(self.save_storm_rain_image, args=(storm_data, image_path_rain))
1001
+ results.append(
1002
+ (
1003
+ result,
1004
+ success_message,
1005
+ failure_message,
1006
+ "image_file_rain_" + image_type,
1007
+ storm_id,
1008
+ image_path_rain,
1009
+ ),
1010
+ )
1011
+ else:
1012
+ result = pool.apply(self.save_storm_rain_image, args=(storm_data, image_path_rain))
1013
+ if result:
1014
+ self.logger.info(success_message)
1015
+ else:
1016
+ self.logger.warning(failure_message)
1017
+ continue
1018
+ # end for image_type in save_rain_images or []
1019
+ # end if not self._strom_plot_exclusions or storm_key not in self._strom_plot_exclusions
1020
+
1021
+ # Create the data files of the storm rain:
1022
+ for data_type in save_rain_data or []:
1023
+ data_file_rain = self.get_storm_file_name(
1024
+ storm_id=storm_data["id"],
1025
+ file_type=data_type,
1026
+ suffix="-rainfall",
1027
+ )
1028
+ data.set_value(
1029
+ column="data_file_rain_" + data_type,
1030
+ value=data_file_rain,
1031
+ condition=(data["id"] == storm_id), # boolean pd.Series
1032
+ )
1033
+ data_path_rain = os.path.join(data_dir, data_file_rain)
1034
+ if skip_existing_files and os.path.exists(data_path_rain):
1035
+ self.logger.info(
1036
+ "Storm rain data file -> '%s' has been saved before - skipping...",
1037
+ data_path_rain,
1038
+ )
1039
+ continue
1040
+ self.save_storm_rain_data(
1041
+ storm_data=storm_data,
1042
+ data_path=data_path_rain,
1043
+ )
1044
+ # end for data_type in save_rain_data
1045
+ # end for storm_key, storm_value in season.dict.items()
1046
+ # end for year in range(year_start, year_end + 1)
1047
+
1048
+ # Add a column with the image directory and the data directory
1049
+ # (value is the same for all rows that's why we do it outside the loop):
1050
+ data.set_value(
1051
+ column="image_dir",
1052
+ value=image_dir,
1053
+ )
1054
+ data.set_value(
1055
+ column="data_dir",
1056
+ value=data_dir,
1057
+ )
1058
+
1059
+ self.logger.info("Close plot storm worker pool...")
1060
+ pool.close() # Close the pool to new tasks
1061
+ self.logger.info("Plot storm worker pool is closed.")
1062
+
1063
+ if load_async:
1064
+ self.logger.info("Send 'done' event to collector thread...")
1065
+ done_event.set()
1066
+
1067
+ # Wait for the collector thread to finish (this will be after pool join)
1068
+ self.logger.info("Waiting for plot collector thread to finish...")
1069
+ collector_thread.join()
1070
+ self.logger.info("Plot collector thread is finished.")
1071
+
1072
+ # Run the termination and cleanup in a daemon thread to not block the code if
1073
+ # a worker process refuses to terminate:
1074
+ daemon_thread = threading.Thread(
1075
+ name="NHC Termination & Cleanup",
1076
+ target=self.terminate_and_cleanup,
1077
+ args=(pool,),
1078
+ )
1079
+ daemon_thread.daemon = True # Set as a daemon thread, so it won't block program exit
1080
+ daemon_thread.start()
1081
+
1082
+ return True
1083
+
1084
+ # end method definition
1085
+
1086
+ def result_collector(self, results: list, done_event: threading.Event) -> None:
1087
+ """Collect results from async processes and logs them.
1088
+
1089
+ Args:
1090
+ results (list):
1091
+ A list of tuples containing task metadata, async results, and pre-baked log messages.
1092
+ done_event (threading.Event):
1093
+ Event signaling that no more tasks will be submitted.
1094
+
1095
+ """
1096
+
1097
+ self.logger.info("Collector thread for plot results started...")
1098
+
1099
+ # Keep running while results remain or not signalled 'done':
1100
+ while not done_event.is_set() or results:
1101
+ # Iterate over a copy of the list of tuples:
1102
+ for result in results[:]:
1103
+ # Unpack the result tuple:
1104
+ async_result, success_message, failure_message, column_name, storm_id, image_file = result
1105
+ # Check if the process has finished:
1106
+ if async_result.ready():
1107
+ try:
1108
+ # Retrieve result (ensuring no exception):
1109
+ success, message = async_result.get()
1110
+ if success:
1111
+ self.logger.info(
1112
+ "%s. %s",
1113
+ success_message,
1114
+ "Plot result: " + message if message else "",
1115
+ )
1116
+ elif os.path.exists(image_file):
1117
+ self.logger.warning(
1118
+ "%s. %s",
1119
+ failure_message,
1120
+ "Plot result: " + message if message else "",
1121
+ )
1122
+ else:
1123
+ self.logger.warning("%s. %s", failure_message, "Plot result: " + message if message else "")
1124
+ self.get_data().set_value(
1125
+ column=column_name,
1126
+ value="", # set to empty to make sure the bulk loader are not trying to find the non-existing file
1127
+ condition=(self.get_data()["id"] == storm_id), # boolean pd.Series
1128
+ )
1129
+ except Exception:
1130
+ self.logger.warning(failure_message)
1131
+ # Remove logged result:
1132
+ results.remove(result)
1133
+ # Prevent excessive CPU usage:
1134
+ time.sleep(0.5)
1135
+
1136
+ self.logger.info("Collector thread for plot results got 'done' event and no further results to process.")
1137
+
1138
+ # end method definition
1139
+
1140
+ def terminate_and_cleanup(self, pool: multiprocessing.pool.Pool) -> None:
1141
+ """Terminate and clean up the worker pool in a daemon thread.
1142
+
1143
+ Args:
1144
+ pool (multiprocessing.pool.Pool):
1145
+ The pool of worker processes to terminate.
1146
+
1147
+ """
1148
+
1149
+ for worker in pool._pool or []: # noqa: SLF001
1150
+ if not worker.is_alive() and worker.exitcode is None:
1151
+ self.logger.warning("Worker with PID -> %s is defunct (zombie state).", worker.pid)
1152
+ elif worker.is_alive():
1153
+ self.logger.warning("Worker with PID -> %s is still alive.", worker.pid)
1154
+ else:
1155
+ self.logger.info(
1156
+ "Worker with PID -> %s finished with exit code -> %s.",
1157
+ worker.pid,
1158
+ str(worker.exitcode),
1159
+ )
1160
+
1161
+ self.logger.info("Terminating the worker pool due to potentially hanging tasks...")
1162
+ pool.terminate() # Terminate the pool to stop all workers
1163
+ self.logger.info("Plot storm worker pool terminated.")
1164
+
1165
+ self.logger.info("Joining plot storm worker pool...")
1166
+ pool.join() # Timeout after 10 seconds
1167
+ self.logger.info("Plot storm worker pool is finished (joined).")
1168
+
1169
+ # end method definition