water-column-sonar-processing 0.0.1__py3-none-any.whl → 25.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of water-column-sonar-processing might be problematic. Click here for more details.

Files changed (60) hide show
  1. water_column_sonar_processing/__init__.py +13 -0
  2. water_column_sonar_processing/aws/__init__.py +7 -0
  3. water_column_sonar_processing/aws/dynamodb_manager.py +355 -0
  4. water_column_sonar_processing/aws/s3_manager.py +420 -0
  5. water_column_sonar_processing/aws/s3fs_manager.py +72 -0
  6. {model → water_column_sonar_processing}/aws/sns_manager.py +10 -21
  7. {model → water_column_sonar_processing}/aws/sqs_manager.py +11 -19
  8. water_column_sonar_processing/cruise/__init__.py +4 -0
  9. water_column_sonar_processing/cruise/create_empty_zarr_store.py +191 -0
  10. water_column_sonar_processing/cruise/datatree_manager.py +21 -0
  11. water_column_sonar_processing/cruise/resample_regrid.py +339 -0
  12. water_column_sonar_processing/geometry/__init__.py +11 -0
  13. water_column_sonar_processing/geometry/elevation_manager.py +111 -0
  14. water_column_sonar_processing/geometry/geometry_manager.py +243 -0
  15. water_column_sonar_processing/geometry/line_simplification.py +176 -0
  16. water_column_sonar_processing/geometry/pmtile_generation.py +261 -0
  17. water_column_sonar_processing/index/__init__.py +3 -0
  18. water_column_sonar_processing/index/index_manager.py +384 -0
  19. water_column_sonar_processing/model/__init__.py +3 -0
  20. water_column_sonar_processing/model/zarr_manager.py +722 -0
  21. water_column_sonar_processing/process.py +149 -0
  22. water_column_sonar_processing/processing/__init__.py +4 -0
  23. water_column_sonar_processing/processing/raw_to_netcdf.py +320 -0
  24. water_column_sonar_processing/processing/raw_to_zarr.py +425 -0
  25. water_column_sonar_processing/utility/__init__.py +13 -0
  26. {model → water_column_sonar_processing}/utility/cleaner.py +7 -8
  27. water_column_sonar_processing/utility/constants.py +118 -0
  28. {model → water_column_sonar_processing}/utility/pipeline_status.py +47 -24
  29. water_column_sonar_processing/utility/timestamp.py +12 -0
  30. water_column_sonar_processing-25.11.1.dist-info/METADATA +182 -0
  31. water_column_sonar_processing-25.11.1.dist-info/RECORD +34 -0
  32. {water_column_sonar_processing-0.0.1.dist-info → water_column_sonar_processing-25.11.1.dist-info}/WHEEL +1 -1
  33. {water_column_sonar_processing-0.0.1.dist-info → water_column_sonar_processing-25.11.1.dist-info/licenses}/LICENSE +1 -1
  34. water_column_sonar_processing-25.11.1.dist-info/top_level.txt +1 -0
  35. __init__.py +0 -0
  36. model/__init__.py +0 -0
  37. model/aws/__init__.py +0 -0
  38. model/aws/dynamodb_manager.py +0 -149
  39. model/aws/s3_manager.py +0 -356
  40. model/aws/s3fs_manager.py +0 -74
  41. model/cruise/__init__.py +0 -0
  42. model/cruise/create_empty_zarr_store.py +0 -166
  43. model/cruise/resample_regrid.py +0 -248
  44. model/geospatial/__init__.py +0 -0
  45. model/geospatial/geometry_manager.py +0 -194
  46. model/geospatial/geometry_simplification.py +0 -81
  47. model/geospatial/pmtile_generation.py +0 -74
  48. model/index/__init__.py +0 -0
  49. model/index/index.py +0 -228
  50. model/model.py +0 -138
  51. model/utility/__init__.py +0 -0
  52. model/utility/constants.py +0 -56
  53. model/utility/timestamp.py +0 -12
  54. model/zarr/__init__.py +0 -0
  55. model/zarr/bar.py +0 -28
  56. model/zarr/foo.py +0 -11
  57. model/zarr/zarr_manager.py +0 -298
  58. water_column_sonar_processing-0.0.1.dist-info/METADATA +0 -89
  59. water_column_sonar_processing-0.0.1.dist-info/RECORD +0 -32
  60. water_column_sonar_processing-0.0.1.dist-info/top_level.txt +0 -2
@@ -0,0 +1,420 @@
1
+ import json
2
+ import os
3
+ from collections.abc import Generator
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from time import sleep
6
+ from typing import Optional
7
+
8
+ import boto3
9
+ import botocore
10
+ from boto3.s3.transfer import TransferConfig
11
+ from botocore.config import Config
12
+ from botocore.exceptions import ClientError
13
+
14
+ MAX_POOL_CONNECTIONS = 64
15
+ MAX_CONCURRENCY = 64
16
+ MAX_WORKERS = 64
17
+ GB = 1024**3
18
+
19
+
20
+ #########################################################################
21
+ def chunked(ll: list, n: int) -> Generator:
22
+ # Yields successively n-sized chunks from ll.
23
+ for i in range(0, len(ll), n):
24
+ yield ll[i : i + n]
25
+
26
+
27
+ class S3Manager:
28
+ #####################################################################
29
+ def __init__(
30
+ self,
31
+ endpoint_url: Optional[str] = None,
32
+ ):
33
+ self.endpoint_url = endpoint_url
34
+ # self.input_bucket_name = os.environ.get("INPUT_BUCKET_NAME")
35
+ # self.output_bucket_name = os.environ.get("OUTPUT_BUCKET_NAME")
36
+ self.s3_region = os.environ.get("AWS_REGION", default="us-east-1")
37
+ self.s3_client_config = Config(max_pool_connections=MAX_POOL_CONNECTIONS)
38
+ self.s3_transfer_config = TransferConfig(
39
+ max_concurrency=MAX_CONCURRENCY,
40
+ use_threads=True,
41
+ max_bandwidth=None,
42
+ multipart_threshold=10 * GB,
43
+ )
44
+ self.s3_session = boto3.Session(
45
+ aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
46
+ aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
47
+ region_name=self.s3_region,
48
+ )
49
+ self.s3_client = self.s3_session.client(
50
+ service_name="s3",
51
+ config=self.s3_client_config,
52
+ region_name=self.s3_region,
53
+ endpoint_url=self.endpoint_url,
54
+ )
55
+ self.s3_resource = boto3.resource(
56
+ service_name="s3",
57
+ config=self.s3_client_config,
58
+ region_name=self.s3_region,
59
+ )
60
+ self.s3_session_noaa_wcsd_zarr_pds = boto3.Session(
61
+ aws_access_key_id=os.environ.get("OUTPUT_BUCKET_ACCESS_KEY"),
62
+ aws_secret_access_key=os.environ.get("OUTPUT_BUCKET_SECRET_ACCESS_KEY"),
63
+ region_name=self.s3_region,
64
+ )
65
+ self.s3_client_noaa_wcsd_zarr_pds = self.s3_session_noaa_wcsd_zarr_pds.client(
66
+ service_name="s3",
67
+ config=self.s3_client_config,
68
+ region_name=self.s3_region,
69
+ endpoint_url=self.endpoint_url,
70
+ )
71
+ self.s3_resource_noaa_wcsd_zarr_pds = (
72
+ self.s3_session_noaa_wcsd_zarr_pds.resource(
73
+ service_name="s3",
74
+ config=self.s3_client_config,
75
+ region_name=self.s3_region,
76
+ endpoint_url=self.endpoint_url,
77
+ )
78
+ )
79
+ self.paginator = self.s3_client.get_paginator("list_objects_v2")
80
+ self.paginator_noaa_wcsd_zarr_pds = (
81
+ self.s3_client_noaa_wcsd_zarr_pds.get_paginator("list_objects_v2")
82
+ )
83
+
84
+ #####################################################################
85
+ # tested
86
+ def create_bucket(
87
+ self,
88
+ bucket_name: str,
89
+ ):
90
+ """
91
+ Note: this function is only really meant to be used for creating test
92
+ buckets. It allows public read of all objects.
93
+ """
94
+ # https://github.com/aodn/aodn_cloud_optimised/blob/e5035495e782783cc8b9e58711d63ed466420350/test_aodn_cloud_optimised/test_schema.py#L7
95
+ # public_policy = {
96
+ # "Version": "2012-10-17",
97
+ # "Statement": [
98
+ # {
99
+ # "Effect": "Allow",
100
+ # "Principal": "*",
101
+ # "Action": "s3:GetObject",
102
+ # "Resource": f"arn:aws:s3:::{bucket_name}/*",
103
+ # }
104
+ # ],
105
+ # }
106
+ response1 = self.s3_client.create_bucket(Bucket=bucket_name, ACL="public-read")
107
+ print(response1)
108
+ # response = self.s3_client.put_bucket_policy(
109
+ # Bucket=bucket_name, Policy=json.dumps(public_policy)
110
+ # )
111
+ # print(response)
112
+
113
+ #####################################################################
114
+ # tested
115
+ def list_buckets(self):
116
+ client = self.s3_client
117
+ return client.list_buckets()
118
+
119
+ #####################################################################
120
+ # tested
121
+ def upload_nodd_file(
122
+ self,
123
+ file_name: str,
124
+ key: str,
125
+ output_bucket_name: str,
126
+ ):
127
+ """
128
+ Used to upload a single file, e.g. the GeoJSON file to the NODD bucket
129
+ """
130
+ self.s3_resource_noaa_wcsd_zarr_pds.Bucket(output_bucket_name).upload_file(
131
+ Filename=file_name, Key=key
132
+ )
133
+ return key
134
+
135
+ #####################################################################
136
+ # tested
137
+ def upload_files_with_thread_pool_executor(
138
+ self,
139
+ output_bucket_name: str,
140
+ all_files: list,
141
+ ):
142
+ # 'all_files' is passed a list of lists: [[local_path, s3_key], [...], ...]
143
+ all_uploads = []
144
+ try: # TODO: problem with threadpool here, missing child files
145
+ with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
146
+ futures = [
147
+ executor.submit(
148
+ self.upload_nodd_file, # TODO: verify which one is using this
149
+ all_file[0], # file_name
150
+ all_file[1], # key
151
+ output_bucket_name, # output_bucket_name
152
+ )
153
+ for all_file in all_files
154
+ ]
155
+ for future in as_completed(futures):
156
+ result = future.result()
157
+ if result:
158
+ all_uploads.extend([result])
159
+ except Exception as err:
160
+ raise RuntimeError(f"Problem, {err}")
161
+
162
+ print("Done uploading files using threading pool.")
163
+ return all_uploads
164
+
165
+ #####################################################################
166
+ # tested
167
+ def upload_zarr_store_to_s3(
168
+ self,
169
+ output_bucket_name: str,
170
+ local_directory: str,
171
+ object_prefix: str,
172
+ cruise_name: str,
173
+ ) -> None:
174
+ print("uploading model store to s3")
175
+ try:
176
+ #
177
+ print("Starting upload with thread pool executor.")
178
+ # # 'all_files' is passed a list of lists: [[local_path, s3_key], [...], ...]
179
+ all_files = []
180
+ for subdir, dirs, files in os.walk(f"{local_directory}/{cruise_name}.zarr"):
181
+ for file in files:
182
+ local_path = os.path.join(subdir, file)
183
+ # TODO: find a better method for splitting strings here:
184
+ # 'level_2/Henry_B._Bigelow/HB0806/EK60/HB0806.zarr/.zattrs'
185
+ # s3_key = f"{object_prefix}/{cruise_name}.zarr{local_path.split(f'{cruise_name}.zarr')[-1]}"
186
+ s3_key = os.path.join(
187
+ object_prefix,
188
+ os.path.join(
189
+ subdir[subdir.find(f"{cruise_name}.zarr") :], file
190
+ ),
191
+ )
192
+ all_files.append([local_path, s3_key])
193
+ self.upload_files_with_thread_pool_executor(
194
+ output_bucket_name=output_bucket_name,
195
+ all_files=all_files,
196
+ )
197
+ print("Done uploading with thread pool executor.")
198
+ except Exception as err:
199
+ raise RuntimeError(f"Problem uploading zarr store to s3, {err}")
200
+
201
+ #####################################################################
202
+ # tested
203
+ def upload_file(
204
+ self,
205
+ filename: str,
206
+ bucket_name: str,
207
+ key: str,
208
+ ):
209
+ self.s3_resource.Bucket(bucket_name).upload_file(Filename=filename, Key=key)
210
+
211
+ #####################################################################
212
+ # tested
213
+ def check_if_object_exists(self, bucket_name, key_name) -> bool:
214
+ s3_manager2 = S3Manager()
215
+ s3_manager2.list_objects(bucket_name=bucket_name, prefix=key_name)
216
+ s3_client_noaa_wcsd_zarr_pds = self.s3_client_noaa_wcsd_zarr_pds
217
+ try:
218
+ s3_client_noaa_wcsd_zarr_pds.head_object(Bucket=bucket_name, Key=key_name)
219
+ return True
220
+ except botocore.exceptions.ClientError as e:
221
+ if e.response["Error"]["Code"] == "404":
222
+ # The object does not exist.
223
+ return False
224
+ elif e.response["Error"]["Code"] == 403:
225
+ # Unauthorized, including invalid bucket
226
+ return False
227
+ else:
228
+ # Something else has gone wrong.
229
+ raise
230
+
231
+ #####################################################################
232
+ # tested
233
+ def list_objects(self, bucket_name, prefix): # noaa-wcsd-pds and noaa-wcsd-zarr-pds
234
+ # TODO: this isn't working for geojson detecting objects!!!!!!!
235
+ # analog to "find_children_objects"
236
+ # Returns a list of key strings for each object in bucket defined by prefix
237
+ # s3_client = self.s3_client
238
+ keys = []
239
+ # paginator = s3_client.get_paginator("list_objects_v2")
240
+ page_iterator = self.paginator.paginate(Bucket=bucket_name, Prefix=prefix)
241
+ for page in page_iterator:
242
+ if "Contents" in page.keys():
243
+ keys.extend([k["Key"] for k in page["Contents"]])
244
+ return keys
245
+
246
+ #####################################################################
247
+ # TODO: change name to "directory"
248
+ # def folder_exists_and_not_empty(self, bucket_name: str, path: str) -> bool:
249
+ # if not path.endswith("/"):
250
+ # path = path + "/"
251
+ # # s3_client = self.s3_client
252
+ # resp = self.list_objects(
253
+ # bucket_name=bucket_name, prefix=path
254
+ # ) # TODO: this is returning root folder and doesn't include children or hidden folders
255
+ # # resp = s3_client.list_objects(Bucket=bucket, Prefix=path, Delimiter='/', MaxKeys=1)
256
+ # return "Contents" in resp
257
+
258
+ #####################################################################
259
+ # private
260
+ def __paginate_child_objects(
261
+ self,
262
+ bucket_name: str,
263
+ sub_prefix: str = None,
264
+ ) -> list:
265
+ page_iterator = self.s3_client.get_paginator("list_objects_v2").paginate(
266
+ Bucket=bucket_name, Prefix=sub_prefix
267
+ )
268
+ objects = []
269
+ for page in page_iterator:
270
+ if "Contents" in page.keys():
271
+ objects.extend(page["Contents"])
272
+ return objects
273
+
274
+ #####################################################################
275
+ # tested
276
+ def get_child_objects(
277
+ self,
278
+ bucket_name: str,
279
+ sub_prefix: str,
280
+ file_suffix: str = None,
281
+ ) -> list:
282
+ print("Getting child objects")
283
+ raw_files = []
284
+ try:
285
+ children = self.__paginate_child_objects(
286
+ bucket_name=bucket_name,
287
+ sub_prefix=sub_prefix,
288
+ )
289
+ if file_suffix is None:
290
+ raw_files = children
291
+ else:
292
+ for child in children:
293
+ # Note: Any files with predicate 'NOISE' are to be ignored
294
+ # see: "Bell_M._Shimada/SH1507" cruise for more details.
295
+ if child["Key"].endswith(file_suffix) and not os.path.basename(
296
+ child["Key"]
297
+ ).startswith("NOISE"):
298
+ raw_files.append(child["Key"])
299
+ return raw_files
300
+ except ClientError as err:
301
+ print(f"Problem was encountered while getting s3 files: {err}")
302
+ raise
303
+ print(f"Found {len(raw_files)} files.")
304
+ return raw_files
305
+
306
+ #####################################################################
307
+ # tested
308
+ def get_object( # noaa-wcsd-pds or noaa-wcsd-zarr-pds
309
+ self,
310
+ bucket_name,
311
+ key_name,
312
+ ):
313
+ # Meant for getting singular objects from a bucket, used by indexing lambda
314
+ # can also return byte range potentially.
315
+ print(f"Getting object {key_name} from {bucket_name}")
316
+ try:
317
+ response = self.s3_client.get_object(
318
+ Bucket=bucket_name,
319
+ Key=key_name,
320
+ )
321
+ # status = response.get("ResponseMetadata", {}).get("HTTPStatusCode")
322
+ # if status == 200:
323
+ print(f"Done getting object {key_name} from {bucket_name}")
324
+ return response
325
+ except ClientError as err:
326
+ print(f"Problem was encountered while getting s3 file: {err}")
327
+ raise
328
+
329
+ #####################################################################
330
+ # tested
331
+ def download_file(
332
+ self,
333
+ bucket_name,
334
+ key,
335
+ file_name, # path to where the file will be saved
336
+ ):
337
+ try:
338
+ self.s3_client.download_file(
339
+ Bucket=bucket_name, Key=key, Filename=file_name
340
+ )
341
+ # TODO: if bottom file doesn't exist, don't fail downloader
342
+ print("downloaded file")
343
+ except Exception as err:
344
+ raise RuntimeError(f"Problem was encountered while downloading_file, {err}")
345
+
346
+ #####################################################################
347
+ # tested
348
+ def delete_nodd_objects( # nodd-bucket
349
+ self,
350
+ bucket_name,
351
+ objects: list,
352
+ ):
353
+ try:
354
+ print(f"Deleting {len(objects)} objects in {bucket_name} in batches.")
355
+ objects_to_delete = []
356
+ for obj in objects:
357
+ objects_to_delete.append({"Key": obj["Key"]})
358
+ # Note: request can contain a list of up to 1000 keys
359
+ for batch in chunked(ll=objects_to_delete, n=1000):
360
+ # An error occurred (SlowDown) when calling the DeleteObjects operation (reached max retries: 4):
361
+ # Please reduce your request rate.
362
+ sleep(0.5)
363
+ #
364
+ self.s3_client_noaa_wcsd_zarr_pds.delete_objects(
365
+ Bucket=bucket_name, Delete={"Objects": batch}
366
+ )
367
+ print("Deleted files.")
368
+ except Exception as err:
369
+ raise RuntimeError(f"Problem was encountered while deleting objects, {err}")
370
+
371
+ #####################################################################
372
+ # tested
373
+ def delete_nodd_object( # only used to delete geojson it looks like?! Remove.
374
+ self,
375
+ bucket_name,
376
+ key_name,
377
+ ):
378
+ try:
379
+ print(f"Deleting {key_name} objects in {bucket_name}.")
380
+ self.s3_client_noaa_wcsd_zarr_pds.delete_object(
381
+ Bucket=bucket_name, Key=key_name
382
+ )
383
+ print("Deleted file.")
384
+ except Exception as err:
385
+ raise RuntimeError(f"Problem was encountered while deleting objects, {err}")
386
+
387
+ #####################################################################
388
+ # tested
389
+ def put(self, bucket_name, key, body): # noaa-wcsd-model-pds
390
+ try:
391
+ self.s3_client.put_object(
392
+ Bucket=bucket_name, Key=key, Body=body
393
+ ) # "Body" can be a file
394
+ except Exception as err:
395
+ raise RuntimeError(f"Problem was encountered putting object, {err}")
396
+
397
+ #####################################################################
398
+ # tested
399
+ def read_s3_json(
400
+ self,
401
+ ship_name,
402
+ cruise_name,
403
+ sensor_name,
404
+ file_name_stem,
405
+ output_bucket_name, # TODO: change to just bucket_name
406
+ ) -> str:
407
+ try:
408
+ resource = self.s3_resource_noaa_wcsd_zarr_pds
409
+ content_object = resource.Object(
410
+ bucket_name=output_bucket_name,
411
+ key=f"spatial/geojson/{ship_name}/{cruise_name}/{sensor_name}/{file_name_stem}.json",
412
+ ).get()
413
+ file_content = content_object["Body"].read().decode("utf-8")
414
+ json_content = json.loads(file_content)
415
+ return json_content
416
+ except Exception as err:
417
+ raise RuntimeError(f"Exception encountered reading s3 GeoJSON, {err}")
418
+
419
+
420
+ #########################################################################
@@ -0,0 +1,72 @@
1
+ import os
2
+ from typing import Optional
3
+
4
+ import s3fs
5
+
6
+ # TODO: S3FS_LOGGING_LEVEL=DEBUG
7
+ # S3FS_LOGGING_LEVEL=DEBUG
8
+
9
+
10
+ class S3FSManager:
11
+ #####################################################################
12
+ def __init__(
13
+ self,
14
+ endpoint_url: Optional[str] = None,
15
+ ):
16
+ self.endpoint_url = endpoint_url
17
+ self.input_bucket_name = os.environ.get("INPUT_BUCKET_NAME")
18
+ self.output_bucket_name = os.environ.get("OUTPUT_BUCKET_NAME")
19
+ self.s3_region = os.environ.get("AWS_REGION", default="us-east-1")
20
+ self.s3fs = s3fs.S3FileSystem(
21
+ endpoint_url=endpoint_url,
22
+ key=os.environ.get("OUTPUT_BUCKET_ACCESS_KEY"),
23
+ secret=os.environ.get("OUTPUT_BUCKET_SECRET_ACCESS_KEY"),
24
+ # asynchronous=True,
25
+ )
26
+ # self.s3fs.ls("")
27
+
28
+ # s3_fs = s3fs.S3FileSystem( # TODO: use s3fs_manager?
29
+ # anon=True,
30
+ # client_kwargs={
31
+ # "endpoint_url": moto_server,
32
+ # "region_name": "us-east-1",
33
+ # },
34
+ # )
35
+ #####################################################################
36
+ def s3_map(
37
+ self,
38
+ s3_zarr_store_path, # f's3://{bucket}/{input_zarr_path}'
39
+ ):
40
+ # The "s3_zarr_store_path" is defined as f's3://{bucket}/{input_zarr_path}'
41
+ # create=False, not false because will be writing
42
+ # return s3fs.S3Map(root=s3_zarr_store_path, s3=self.s3fs, check=True)
43
+ return s3fs.S3Map(
44
+ root=s3_zarr_store_path, s3=self.s3fs
45
+ ) # create=False, not false because will be writing
46
+
47
+ #####################################################################
48
+ # def add_file(self, filename):
49
+ # full_path = f"{os.getenv('OUTPUT_BUCKET_NAME')}/testing/{filename}"
50
+ # print(full_path)
51
+ #
52
+ # self.s3fs.touch(full_path)
53
+ # ff = self.s3fs.ls(f"{os.getenv('OUTPUT_BUCKET_NAME')}/")
54
+ #
55
+ # print(ff)
56
+
57
+ #####################################################################
58
+ def upload_data(self, bucket_name, file_path, prefix):
59
+ # TODO: this works in theory but use boto3 to upload files
60
+ s3_path = f"s3://{bucket_name}/{prefix}/"
61
+ s3_file_system = self.s3fs
62
+ s3_file_system.put(file_path, s3_path, recursive=True)
63
+
64
+ #####################################################################
65
+ def exists(
66
+ self,
67
+ s3_path,
68
+ ):
69
+ # s3_file_system =
70
+ return self.s3fs.exists(s3_path)
71
+
72
+ #####################################################################
@@ -1,5 +1,5 @@
1
1
  import os
2
- import json
2
+
3
3
  import boto3
4
4
 
5
5
 
@@ -7,32 +7,22 @@ import boto3
7
7
  class SNSManager:
8
8
  #######################################################
9
9
  def __init__(
10
- self,
10
+ self,
11
11
  ):
12
12
  self.__sns_region = os.environ.get("AWS_REGION", default="us-east-1")
13
13
  self.__sns_session = boto3.Session(
14
- aws_access_key_id=os.environ.get('ACCESS_KEY_ID'),
15
- aws_secret_access_key=os.environ.get('SECRET_ACCESS_KEY'),
16
- region_name=self.__sns_region
14
+ aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
15
+ aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
16
+ region_name=self.__sns_region,
17
17
  )
18
18
  self.__sns_resource = self.__sns_session.resource(
19
- service_name="sns",
20
- region_name=self.__sns_region
19
+ service_name="sns", region_name=self.__sns_region
21
20
  )
22
21
  self.__sns_client = self.__sns_session.client(
23
- service_name="sns",
24
- region_name=self.__sns_region
22
+ service_name="sns", region_name=self.__sns_region
25
23
  )
26
24
 
27
25
  #######################################################
28
- # TODO: pick one
29
- # def publish_message(self, topic_arn, message):
30
- # response = self.__sns_client.publish(
31
- # TopicArn=topic_arn,
32
- # Message=message
33
- # )
34
- # print(f"Topic Response: {topic_arn} : '{message}' => {response}")
35
-
36
26
  # TODO: pick one
37
27
  def publish(self, topic_arn, message):
38
28
  response = self.__sns_client.publish(
@@ -55,13 +45,12 @@ class SNSManager:
55
45
  #######################################################
56
46
  def subscribe(self, topic_arn, endpoint):
57
47
  self.__sns_client.subscribe(
58
- TopicArn=topic_arn,
59
- Protocol='sqs',
60
- Endpoint=endpoint
48
+ TopicArn=topic_arn, Protocol="sqs", Endpoint=endpoint
61
49
  )
62
50
 
63
51
  #######################################################
64
52
  def list_topics(self):
65
53
  print(self.__sns_client.list_topics())
66
54
 
67
- ###########################################################
55
+
56
+ ###########################################################
@@ -1,34 +1,29 @@
1
1
  import os
2
+
2
3
  import boto3
3
- import time
4
4
 
5
5
 
6
6
  ###########################################################
7
7
  class SQSManager:
8
8
  #######################################################
9
9
  def __init__(
10
- self,
10
+ self,
11
11
  ):
12
12
  self.__sqs_region = os.environ.get("AWS_REGION", default="us-east-1")
13
13
  self.__sqs_session = boto3.Session(
14
- aws_access_key_id=os.environ.get('ACCESS_KEY_ID'),
15
- aws_secret_access_key=os.environ.get('SECRET_ACCESS_KEY'),
16
- region_name=self.__sqs_region
14
+ aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
15
+ aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
16
+ region_name=self.__sqs_region,
17
17
  )
18
18
  self.__sqs_resource = self.__sqs_session.resource(
19
- service_name="sqs",
20
- region_name=self.__sqs_region
19
+ service_name="sqs", region_name=self.__sqs_region
21
20
  )
22
21
  self.__sqs_client = self.__sqs_session.client(
23
- service_name="sqs",
24
- region_name=self.__sqs_region
22
+ service_name="sqs", region_name=self.__sqs_region
25
23
  )
26
24
 
27
25
  #######################################################
28
- def create_queue(
29
- self,
30
- queue_name
31
- ):
26
+ def create_queue(self, queue_name):
32
27
  response = self.__sqs_client.create_queue(QueueName=queue_name)
33
28
  return response
34
29
 
@@ -38,13 +33,10 @@ class SQSManager:
38
33
  return sqs_queue
39
34
 
40
35
  #######################################################
41
- def list_queues(
42
- self,
43
- queue_name_prefix
44
- ):
36
+ def list_queues(self, queue_name_prefix):
45
37
  # Note: SQS control plane is eventually consistent, meaning that it
46
- # takes a while to propagate the data accross the systems.
38
+ # takes a while to propagate the dataset accross the systems.
47
39
  response = self.__sqs_client.list_queues(QueueNamePrefix=queue_name_prefix)
48
40
  print(response)
49
41
 
50
- #######################################################
42
+ #######################################################
@@ -0,0 +1,4 @@
1
+ from .create_empty_zarr_store import CreateEmptyZarrStore
2
+ from .resample_regrid import ResampleRegrid
3
+
4
+ __all__ = ["CreateEmptyZarrStore", "ResampleRegrid"]