water-column-sonar-processing 25.1.2__py3-none-any.whl → 25.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of water-column-sonar-processing might be problematic. Click here for more details.

@@ -140,15 +140,7 @@ class DynamoDBManager:
140
140
  To be used to initialize a cruise, deletes all entries associated with that cruise
141
141
  in the database.
142
142
  """
143
- expression_attribute_values = {
144
- ":cr": {"S": cruise_name},
145
- ":se": {"S": sensor_name},
146
- ":sh": {"S": ship_name},
147
- }
148
- filter_expression = (
149
- "CRUISE_NAME = :cr and SENSOR_NAME = :se and SHIP_NAME = :sh"
150
- )
151
- # filter_expression = "CRUISE_NAME = :cr"
143
+ filter_expression = "CRUISE_NAME = :cr"
152
144
  response = self.dynamodb_client.scan(
153
145
  TableName=table_name,
154
146
  # Limit=1000,
@@ -156,7 +148,7 @@ class DynamoDBManager:
156
148
  # ExclusiveStartKey=where to pick up
157
149
  #ReturnConsumedCapacity='INDEXES' | 'TOTAL' | 'NONE', ...not sure
158
150
  # ProjectionExpression='#SH, #CR, #FN', # what to specifically return — from expression_attribute_names
159
- FilterExpression='CRUISE_NAME = :cr',#
151
+ FilterExpression=filter_expression,
160
152
  # ExpressionAttributeNames={
161
153
  # '#SH': 'SHIP_NAME',
162
154
  # '#CR': 'CRUISE_NAME',
@@ -170,7 +162,6 @@ class DynamoDBManager:
170
162
  ConsistentRead=True
171
163
  # ExclusiveStartKey=response["LastEvaluatedKey"],
172
164
  )
173
- print(response)
174
165
  # Note: table.scan() has 1 MB limit on results so pagination is used
175
166
 
176
167
  if len(response["Items"]) == 0 and "LastEvaluatedKey" not in response:
@@ -183,7 +174,7 @@ class DynamoDBManager:
183
174
  TableName=table_name,
184
175
  ### Either 'Select' or 'ExpressionAttributeNames'/'ProjectionExpression'
185
176
  Select='ALL_ATTRIBUTES', # or 'SPECIFIC_ATTRIBUTES',
186
- FilterExpression='CRUISE_NAME = :cr', #
177
+ FilterExpression=filter_expression,
187
178
  #ProjectionExpression='#SH, #CR, #FN', # what to specifically return — from expression_attribute_names
188
179
  # ExpressionAttributeNames={ # would need to specify all cols in df
189
180
  # '#SH': 'SHIP_NAME',
@@ -205,6 +196,66 @@ class DynamoDBManager:
205
196
 
206
197
  return df.sort_values(by="START_TIME", ignore_index=True)
207
198
 
199
+ #####################################################################
200
+ # def get_cruise_list(
201
+ # self,
202
+ # table_name,
203
+ # ) -> list:
204
+ # """
205
+ # Experimental, gets all cruise names as list
206
+ # """
207
+ # filter_expression = "CRUISE_NAME = :cr"
208
+ # response = self.dynamodb_client.scan(
209
+ # TableName=table_name,
210
+ # Select='SPECIFIC_ATTRIBUTES',
211
+ # #ReturnConsumedCapacity='INDEXES' | 'TOTAL' | 'NONE', ...not sure
212
+ # # ProjectionExpression='#SH, #CR, #FN', # what to specifically return — from expression_attribute_names
213
+ # FilterExpression=filter_expression,
214
+ # # ExpressionAttributeNames={
215
+ # # '#SH': 'SHIP_NAME',
216
+ # # '#CR': 'CRUISE_NAME',
217
+ # # '#FN': 'FILE_NAME',
218
+ # # },
219
+ # # ExpressionAttributeValues={ # criteria
220
+ # # ':cr': {
221
+ # # 'S': cruise_name,
222
+ # # },
223
+ # # },
224
+ # )
225
+ # # Note: table.scan() has 1 MB limit on results so pagination is used
226
+ #
227
+ # if len(response["Items"]) == 0 and "LastEvaluatedKey" not in response:
228
+ # return pd.DataFrame() # If no results, return empty dataframe
229
+ #
230
+ # data = response["Items"]
231
+ #
232
+ # while response.get('LastEvaluatedKey'): #"LastEvaluatedKey" in response:
233
+ # response = self.dynamodb_client.scan(
234
+ # TableName=table_name,
235
+ # ### Either 'Select' or 'ExpressionAttributeNames'/'ProjectionExpression'
236
+ # Select='ALL_ATTRIBUTES', # or 'SPECIFIC_ATTRIBUTES',
237
+ # FilterExpression=filter_expression,
238
+ # #ProjectionExpression='#SH, #CR, #FN', # what to specifically return — from expression_attribute_names
239
+ # # ExpressionAttributeNames={ # would need to specify all cols in df
240
+ # # '#SH': 'SHIP_NAME',
241
+ # # '#CR': 'CRUISE_NAME',
242
+ # # '#FN': 'FILE_NAME',
243
+ # # },
244
+ # ExpressionAttributeValues={ # criteria
245
+ # ':cr': {
246
+ # 'S': cruise_name,
247
+ # },
248
+ # },
249
+ # ConsistentRead=True,
250
+ # ExclusiveStartKey=response["LastEvaluatedKey"],
251
+ # )
252
+ # data.extend(response["Items"])
253
+ #
254
+ # deserializer = self.type_deserializer
255
+ # df = pd.DataFrame([deserializer.deserialize({"M": i}) for i in data])
256
+ #
257
+ # return df.sort_values(by="START_TIME", ignore_index=True)
258
+
208
259
  #####################################################################
209
260
  # TODO: WIP
210
261
  def delete_item(
@@ -16,7 +16,7 @@ class S3FSManager:
16
16
  # self.output_bucket_name = os.environ.get("OUTPUT_BUCKET_NAME")
17
17
  self.s3_region = os.environ.get("AWS_REGION", default="us-east-1")
18
18
  self.s3fs = s3fs.S3FileSystem(
19
- asynchronous=False,
19
+ # asynchronous=False,
20
20
  endpoint_url=endpoint_url,
21
21
  key=os.environ.get("OUTPUT_BUCKET_ACCESS_KEY"),
22
22
  secret=os.environ.get("OUTPUT_BUCKET_SECRET_ACCESS_KEY"),
@@ -1,5 +1,4 @@
1
1
  import gc
2
- import os
3
2
  from pathlib import Path
4
3
 
5
4
  import numcodecs
@@ -198,9 +197,9 @@ class ResampleRegrid:
198
197
  # df[df['PIPELINE_STATUS'] < PipelineStatus.LEVEL_1_PROCESSING] = np.nan
199
198
 
200
199
  # Get index from all cruise files. Note: should be based on which are included in cruise.
201
- index = cruise_df.index[
200
+ index = int(cruise_df.index[
202
201
  cruise_df["FILE_NAME"] == f"{file_name_stem}.raw"
203
- ][0]
202
+ ][0])
204
203
 
205
204
  # get input store
206
205
  input_xr_zarr_store = zarr_manager.open_s3_zarr_store_with_xarray(
@@ -227,18 +226,20 @@ class ResampleRegrid:
227
226
  min_echo_range = np.nanmin(np.float32(cruise_df["MIN_ECHO_RANGE"]))
228
227
  max_echo_range = np.nanmax(np.float32(cruise_df["MAX_ECHO_RANGE"]))
229
228
 
230
- print(
231
- "Creating empty ndarray for Sv data."
232
- ) # Note: cruise_zarr dimensions are (depth, time, frequency)
229
+ print("Creating empty ndarray for Sv data.") # Note: cruise dims (depth, time, frequency)
230
+ output_zarr_store_shape = output_zarr_store.Sv.shape
231
+ end_ping_time_index - start_ping_time_index
232
+ output_zarr_store_height = output_zarr_store_shape[0]
233
+ output_zarr_store_width = end_ping_time_index - start_ping_time_index
234
+ output_zarr_store_depth = output_zarr_store_shape[2]
233
235
  cruise_sv_subset = np.empty(
234
- shape=output_zarr_store.Sv[
235
- :, start_ping_time_index:end_ping_time_index, :
236
- ].shape
236
+ shape=(output_zarr_store_height, output_zarr_store_width, output_zarr_store_depth)
237
237
  )
238
238
  cruise_sv_subset[:, :, :] = np.nan
239
239
 
240
240
  all_cruise_depth_values = zarr_manager.get_depth_values(
241
- min_echo_range=min_echo_range, max_echo_range=max_echo_range
241
+ min_echo_range=min_echo_range,
242
+ max_echo_range=max_echo_range
242
243
  ) # (5262,) and
243
244
 
244
245
  print(" ".join(list(input_xr_zarr_store.Sv.dims)))
@@ -282,16 +283,6 @@ class ResampleRegrid:
282
283
  #########################################################################
283
284
  # write Sv values to cruise-level-model-store
284
285
  output_zarr_store.Sv[:, start_ping_time_index:end_ping_time_index, :] = regrid_resample.values
285
-
286
- #########################################################################
287
- # [5] write subset of latitude/longitude
288
- output_zarr_store.latitude[
289
- start_ping_time_index:end_ping_time_index
290
- ] = geospatial.dropna()["latitude"].values # TODO: get from ds_sv directly, dont need geojson anymore
291
- output_zarr_store.longitude[
292
- start_ping_time_index:end_ping_time_index
293
- ] = geospatial.dropna()["longitude"].values
294
-
295
286
  #########################################################################
296
287
  # TODO: add the "detected_seafloor_depth/" to the
297
288
  # L2 cruise dataarrays
@@ -311,6 +302,15 @@ class ResampleRegrid:
311
302
  output_zarr_store.bottom[
312
303
  start_ping_time_index:end_ping_time_index
313
304
  ] = detected_seafloor_depths
305
+ #
306
+ #########################################################################
307
+ # [5] write subset of latitude/longitude
308
+ output_zarr_store.latitude[
309
+ start_ping_time_index:end_ping_time_index
310
+ ] = geospatial.dropna()["latitude"].values # TODO: get from ds_sv directly, dont need geojson anymore
311
+ output_zarr_store.longitude[
312
+ start_ping_time_index:end_ping_time_index
313
+ ] = geospatial.dropna()["longitude"].values
314
314
  #########################################################################
315
315
  #########################################################################
316
316
  except Exception as err:
@@ -2,6 +2,7 @@ import numcodecs
2
2
  import numpy as np
3
3
  import xarray as xr
4
4
  import zarr
5
+ import importlib.metadata
5
6
  from numcodecs import Blosc
6
7
 
7
8
  from water_column_sonar_processing.aws import S3FSManager
@@ -249,9 +250,9 @@ class ZarrManager:
249
250
  root.attrs["sensor_name"] = sensor_name
250
251
  #
251
252
  root.attrs["processing_software_name"] = Coordinates.PROJECT_NAME.value
252
- root.attrs["processing_software_version"] = (
253
- "25.1.2" # TODO: get programmatically, echopype>utils>prov.py
254
- )
253
+
254
+ current_project_version = importlib.metadata.version('water_column_sonar_processing')
255
+ root.attrs["processing_software_version"] = current_project_version
255
256
  root.attrs["processing_software_time"] = Timestamp.get_timestamp()
256
257
  #
257
258
  root.attrs["calibration_status"] = calibration_status
@@ -290,7 +291,7 @@ class ZarrManager:
290
291
  # zarr_synchronizer: Union[str, None] = None, # TODO:
291
292
  output_bucket_name: str,
292
293
  endpoint_url=None,
293
- ):
294
+ ) -> zarr.hierarchy.Group:
294
295
  # Mounts a Zarr store using pythons Zarr implementation. The mounted store
295
296
  # will have read/write privileges so that store can be updated.
296
297
  print("Opening L2 Zarr store with Zarr for writing.")
@@ -316,18 +317,21 @@ class ZarrManager:
316
317
  input_bucket_name: str,
317
318
  endpoint_url=None,
318
319
  ) -> xr.Dataset:
319
- print("Opening L1 Zarr store in S3 with Xarray.")
320
+ print("Opening L1 Zarr store in S3 with Xarray.") # TODO: Is this only used for reading from?
320
321
  try:
321
322
  zarr_path = f"s3://{input_bucket_name}/level_1/{ship_name}/{cruise_name}/{sensor_name}/{file_name_stem}.zarr"
322
323
  s3fs_manager = S3FSManager(endpoint_url=endpoint_url)
323
324
  store_s3_map = s3fs_manager.s3_map(s3_zarr_store_path=zarr_path)
324
- ds = xr.open_zarr(
325
- store=store_s3_map, consolidated=None
326
- ) # synchronizer=SYNCHRONIZER
325
+ ds = xr.open_dataset(
326
+ filename_or_obj=store_s3_map,
327
+ engine="zarr",
328
+ chunks={}
329
+ )
327
330
  except Exception as err:
328
331
  print("Problem opening Zarr store in S3 as Xarray.")
329
332
  raise err
330
- print("Done opening Zarr store in S3 as Xarray.")
333
+ finally:
334
+ print("Exiting opening Zarr store in S3 as Xarray.")
331
335
  return ds
332
336
 
333
337
  def open_l2_zarr_store_with_xarray(
@@ -3,7 +3,7 @@ from enum import Enum, Flag, unique
3
3
 
4
4
  @unique
5
5
  class Constants(Flag):
6
- TILE_SIZE = 256 # TODO: add tile size to metadata?
6
+ TILE_SIZE = 1024
7
7
 
8
8
  # Average https://noaa-wcsd-zarr-pds.s3.us-east-1.amazonaws.com/level_2/Henry_B._Bigelow/HB0902/EK60/HB0902.zarr/time/927
9
9
  # chunk size is ~1.3 kB, HB0902 cruise takes ~30 seconds to load all time/lat/lon data
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: water_column_sonar_processing
3
- Version: 25.1.2
4
- Summary: A processing tool for water column sonar data.
3
+ Version: 25.1.4
4
+ Summary: Processing tool for water column sonar data.
5
5
  Author-email: Rudy Klucik <rudy.klucik@noaa.gov>
6
6
  Project-URL: Homepage, https://github.com/CI-CMG/water-column-sonar-processing
7
7
  Project-URL: Issues, https://github.com/CI-CMG/water-column-sonar-processing/issues
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3.8
11
+ Requires-Python: >=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: aiobotocore==2.19.0
@@ -34,6 +34,19 @@ Requires-Dist: typing-extensions==4.10.0
34
34
  Requires-Dist: xarray==2024.10.0
35
35
  Requires-Dist: xbatcher==0.4.0
36
36
  Requires-Dist: zarr==2.18.3
37
+ Provides-Extra: dev
38
+ Requires-Dist: bandit[toml]==1.8.0; extra == "dev"
39
+ Requires-Dist: build; extra == "dev"
40
+ Requires-Dist: pre-commit; extra == "dev"
41
+ Requires-Dist: pyinstaller; extra == "dev"
42
+ Requires-Dist: twine; extra == "dev"
43
+ Requires-Dist: flake8==7.1.1; extra == "dev"
44
+ Requires-Dist: pooch==1.8.2; extra == "dev"
45
+ Requires-Dist: pytest~=8.3.3; extra == "dev"
46
+ Requires-Dist: tqdm; extra == "dev"
47
+ Requires-Dist: bandit; extra == "dev"
48
+ Provides-Extra: test
49
+ Requires-Dist: pytest-cov; extra == "test"
37
50
 
38
51
  # Water Column Sonar Processing
39
52
  Processing tool for converting L0 data to L1 and L2 as well as generating geospatial information
@@ -80,14 +93,17 @@ Processing tool for converting L0 data to L1 and L2 as well as generating geospa
80
93
  3. Set interpreter
81
94
 
82
95
  # Installing Dependencies
83
-
84
- 1. Add dependencies with versions to requirements.txt
85
- 2. ```pip install --upgrade pip && pip install -r requirements_dev.txt```
96
+ ```
97
+ uv pip install --upgrade pip
98
+ #uv pip install -r requirements_dev.txt
99
+ uv pip install -r pyproject.toml --extra dev
100
+ ```
86
101
 
87
102
 
88
103
  # Pytest
89
104
  ```commandline
90
- pytest --disable-warnings
105
+ uv run pytest tests
106
+ #pytest --disable-warnings
91
107
  ```
92
108
  or
93
109
  > pytest --cache-clear --cov=src tests/ --cov-report=xml
@@ -120,7 +136,7 @@ https://colab.research.google.com/drive/1KiLMueXiz9WVB9o4RuzYeGjNZ6PsZU7a#scroll
120
136
  # Tag a Release
121
137
  Step 1 --> increment the semantic version in the zarr_manager.py "metadata" & the "pyproject.toml"
122
138
  ```commandline
123
- git tag -a v25.1.2 -m "Releasing version v25.1.2"
139
+ git tag -a v25.1.4 -m "Releasing version v25.1.4"
124
140
  git push origin --tags
125
141
  ```
126
142
 
@@ -1,15 +1,15 @@
1
1
  water_column_sonar_processing/__init__.py,sha256=fvRK4uFo_A0l7w_T4yckvDqJ3wMUq4JB3VVPXqWfewE,226
2
2
  water_column_sonar_processing/process.py,sha256=-yQtK3rnZq6lGAr3q02zLDe1NuMH9c0PiUOxKzG_r18,5386
3
3
  water_column_sonar_processing/aws/__init__.py,sha256=KJqK8oYMn-u8n8i-Jp_lG5BvCOTjwWSjWP8yAyDlWVo,297
4
- water_column_sonar_processing/aws/dynamodb_manager.py,sha256=-OARhOcvfctpPTuQQAqZ6wb4NHrqFjyVpe31CZyyE6c,11631
4
+ water_column_sonar_processing/aws/dynamodb_manager.py,sha256=htP4Y2rmOSFtdzUFrgK14Bn-UXAFG22Ow-dDrR2alSw,13949
5
5
  water_column_sonar_processing/aws/s3_manager.py,sha256=-PCiW7YF31nGIPa1oVOVTzjTSExAAkT_IyNNnvWv2HU,16214
6
- water_column_sonar_processing/aws/s3fs_manager.py,sha256=Vo-DXj6vgb8t1l4LdtNu7JCtq_RfFsnl33RuGeBUXhk,2561
6
+ water_column_sonar_processing/aws/s3fs_manager.py,sha256=oouzV9DZLplPC6vzbouWPzyfyNPABx_LGxRGJGc1vWg,2563
7
7
  water_column_sonar_processing/aws/sns_manager.py,sha256=Dp9avG5VSugSWPR1dZ-askuAw1fCZkNUHbOUP65iR-k,1867
8
8
  water_column_sonar_processing/aws/sqs_manager.py,sha256=NSUrWmnSC8h8Gf7gT0U8zFaQQ-yX89h0Q0mDLKGqp2Y,1597
9
9
  water_column_sonar_processing/cruise/__init__.py,sha256=H5hW0JMORuaFvQk_R31B4VL8RnRyKeanOOiWmqEMZJk,156
10
10
  water_column_sonar_processing/cruise/create_empty_zarr_store.py,sha256=ZsFQTDA0gXfQHlxDsXBGD1qQ0ipmx4kS81DcY6ml5Ew,7767
11
11
  water_column_sonar_processing/cruise/datatree_manager.py,sha256=Qy4dZCW8_q31lbjxbMsx3JtBS4BvQT17_2P0QD1RQcY,639
12
- water_column_sonar_processing/cruise/resample_regrid.py,sha256=gz_uP-mBD4JSBRBr69ZvsfmXX4yyBdRG9-P1z3If43E,14246
12
+ water_column_sonar_processing/cruise/resample_regrid.py,sha256=wnog-qwFXRH20AYRD_3BXUgP-TN5ZnyPHpFws3527Mk,14533
13
13
  water_column_sonar_processing/geometry/__init__.py,sha256=GIzzc-_7pwEwbOkGpc4i_fmjWI5ymllXqzdHq_d3Rio,299
14
14
  water_column_sonar_processing/geometry/elevation_manager.py,sha256=eq9w691WJknPwWYkvO3giKTPleIxCVc2tMGR0e8ZRxQ,4267
15
15
  water_column_sonar_processing/geometry/geometry_manager.py,sha256=nz5T1vCDWHYIfQ853EqKYHDetTul7jRWS3y8Evep8QU,10855
@@ -18,17 +18,17 @@ water_column_sonar_processing/geometry/pmtile_generation.py,sha256=7Lm08Jr6YaM4n
18
18
  water_column_sonar_processing/index/__init__.py,sha256=izEObsKiOoIJ0kZCFhvaYsBd6Ga71XJxnogjrNInw68,68
19
19
  water_column_sonar_processing/index/index_manager.py,sha256=qsS6rKObJlFXKyzRuT1bk2_qW1YagW-Fg_AkQ1U_KRs,14213
20
20
  water_column_sonar_processing/model/__init__.py,sha256=FXaCdbPqxp0ogmZm9NplRirqpgMiYs1iRYgJbFbbX2Y,65
21
- water_column_sonar_processing/model/zarr_manager.py,sha256=LlpmUPUoVgNknVPpWFnMoYR5XmDbFDkdXCPJoOipfr4,15505
21
+ water_column_sonar_processing/model/zarr_manager.py,sha256=wAO_8jcKIEonTNH12Fzzdirz0XLS7qgwOJGrmcVALR8,15678
22
22
  water_column_sonar_processing/processing/__init__.py,sha256=tdpSfwnY6lbAS_yBTu4aG0SjPgCKqh6LAFvIj_t3j3U,168
23
23
  water_column_sonar_processing/processing/batch_downloader.py,sha256=qXoruHdbgzAolmroK6eRn9bWgeHFgaVQLwhJ6X5oHRE,6299
24
24
  water_column_sonar_processing/processing/raw_to_zarr.py,sha256=Sn0_zBT7yYP6abbSTlQBPA6iZSBxeVqPYYSgoroiBEU,17599
25
25
  water_column_sonar_processing/utility/__init__.py,sha256=yDObMOL0_OxKWet5wffK2-XVJgoE9iwiY2q04GZrtBQ,234
26
26
  water_column_sonar_processing/utility/cleaner.py,sha256=bNbs-hopWxtKAFBK0Eu18xdRErZCGZvtla3j-1bTwQw,619
27
- water_column_sonar_processing/utility/constants.py,sha256=AD6RlDrJRVN1GYwRvo7cunLhrdC0F8CyOlbkB_GxL-s,2180
27
+ water_column_sonar_processing/utility/constants.py,sha256=UtzFkvH5VE7eb8PzeKUDVt-nX6SOdlHtkul1zycF_Z0,2146
28
28
  water_column_sonar_processing/utility/pipeline_status.py,sha256=O-0SySqdRGJ6bs3zQe1NV9vkOpmsRM7zj5QoHgzYioY,4395
29
29
  water_column_sonar_processing/utility/timestamp.py,sha256=bO0oir7KxxoEHPGRkz9FCBfOligkocUyRiWRzAq8fnU,361
30
- water_column_sonar_processing-25.1.2.dist-info/LICENSE,sha256=lz4IpJ5_adG3S0ali-WaIpQFVTnEAOucMDQPECUVEYw,1110
31
- water_column_sonar_processing-25.1.2.dist-info/METADATA,sha256=BkcPnxcolpi8A3smtLG-nb9wBQwC52fx2bAqfnBHFiY,5448
32
- water_column_sonar_processing-25.1.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
33
- water_column_sonar_processing-25.1.2.dist-info/top_level.txt,sha256=aRYU4A7RNBlNrL4vzjytFAir3BNnmOgsvIGKKA36tg4,30
34
- water_column_sonar_processing-25.1.2.dist-info/RECORD,,
30
+ water_column_sonar_processing-25.1.4.dist-info/LICENSE,sha256=lz4IpJ5_adG3S0ali-WaIpQFVTnEAOucMDQPECUVEYw,1110
31
+ water_column_sonar_processing-25.1.4.dist-info/METADATA,sha256=vI9oh0A7AwlM3jQSKWNw3CdPR_yz4yHiynYuKbzNYH0,5959
32
+ water_column_sonar_processing-25.1.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
33
+ water_column_sonar_processing-25.1.4.dist-info/top_level.txt,sha256=aRYU4A7RNBlNrL4vzjytFAir3BNnmOgsvIGKKA36tg4,30
34
+ water_column_sonar_processing-25.1.4.dist-info/RECORD,,