anemoi-utils 0.4.3__py3-none-any.whl → 0.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of anemoi-utils might be problematic. Click here for more details.

@@ -0,0 +1,328 @@
1
+ # (C) Copyright 2024 European Centre for Medium-Range Weather Forecasts.
2
+ # This software is licensed under the terms of the Apache Licence Version 2.0
3
+ # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
4
+ # In applying this licence, ECMWF does not waive the privileges and immunities
5
+ # granted to it by virtue of its status as an intergovernmental organisation
6
+ # nor does it submit to any jurisdiction.
7
+
8
+ import concurrent.futures
9
+ import logging
10
+ import os
11
+ import shutil
12
+ from abc import abstractmethod
13
+
14
+ import tqdm
15
+
16
+ from ..humanize import bytes_to_human
17
+
18
+ LOGGER = logging.getLogger(__name__)
19
+
20
+
21
+ def _ignore(number_of_files, total_size, total_transferred, transfering):
22
+ pass
23
+
24
+
25
+ class Loader:
26
+
27
+ def transfer_folder(self, *, source, target, overwrite=False, resume=False, verbosity=1, threads=1, progress=None):
28
+ assert verbosity == 1, verbosity
29
+
30
+ if progress is None:
31
+ progress = _ignore
32
+
33
+ # from boto3.s3.transfer import TransferConfig
34
+ # config = TransferConfig(use_threads=False)
35
+ config = None
36
+ with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
37
+ try:
38
+ if verbosity > 0:
39
+ LOGGER.info(f"{self.action} {source} to {target}")
40
+
41
+ total_size = 0
42
+ total_transferred = 0
43
+
44
+ futures = []
45
+ for name in self.list_source(source):
46
+
47
+ futures.append(
48
+ executor.submit(
49
+ self.transfer_file,
50
+ source=self.source_path(name, source),
51
+ target=self.target_path(name, source, target),
52
+ overwrite=overwrite,
53
+ resume=resume,
54
+ verbosity=verbosity - 1,
55
+ config=config,
56
+ )
57
+ )
58
+ total_size += self.source_size(name)
59
+
60
+ if len(futures) % 10000 == 0:
61
+
62
+ progress(len(futures), total_size, 0, False)
63
+
64
+ if verbosity > 0:
65
+ LOGGER.info(f"Preparing transfer, {len(futures):,} files... ({bytes_to_human(total_size)})")
66
+ done, _ = concurrent.futures.wait(
67
+ futures,
68
+ timeout=0.001,
69
+ return_when=concurrent.futures.FIRST_EXCEPTION,
70
+ )
71
+ # Trigger exceptions if any
72
+ for future in done:
73
+ future.result()
74
+
75
+ number_of_files = len(futures)
76
+ progress(number_of_files, total_size, 0, True)
77
+
78
+ if verbosity > 0:
79
+ LOGGER.info(f"{self.action} {number_of_files:,} files ({bytes_to_human(total_size)})")
80
+ with tqdm.tqdm(total=total_size, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
81
+ for future in concurrent.futures.as_completed(futures):
82
+ size = future.result()
83
+ pbar.update(size)
84
+ total_transferred += size
85
+ progress(number_of_files, total_size, total_transferred, True)
86
+ else:
87
+ for future in concurrent.futures.as_completed(futures):
88
+ size = future.result()
89
+ total_transferred += size
90
+ progress(number_of_files, total_size, total_transferred, True)
91
+
92
+ except Exception:
93
+ executor.shutdown(wait=False, cancel_futures=True)
94
+ raise
95
+
96
+ def transfer_file(self, source, target, overwrite, resume, verbosity, threads=1, progress=None, config=None):
97
+ try:
98
+ return self._transfer_file(source, target, overwrite, resume, verbosity, threads=threads, config=config)
99
+ except Exception as e:
100
+ LOGGER.exception(f"Error transferring {source} to {target}")
101
+ LOGGER.error(e)
102
+ raise
103
+
104
+ @abstractmethod
105
+ def list_source(self, source):
106
+ raise NotImplementedError
107
+
108
+ @abstractmethod
109
+ def source_path(self, local_path, source):
110
+ raise NotImplementedError
111
+
112
+ @abstractmethod
113
+ def target_path(self, source_path, source, target):
114
+ raise NotImplementedError
115
+
116
+ @abstractmethod
117
+ def source_size(self, local_path):
118
+ raise NotImplementedError
119
+
120
+ @abstractmethod
121
+ def copy(self, source, target, **kwargs):
122
+ raise NotImplementedError
123
+
124
+ @abstractmethod
125
+ def get_temporary_target(self, target, pattern):
126
+ raise NotImplementedError
127
+
128
+ @abstractmethod
129
+ def rename_target(self, target, temporary_target):
130
+ raise NotImplementedError
131
+
132
+
133
+ class BaseDownload(Loader):
134
+ action = "Downloading"
135
+
136
+ @abstractmethod
137
+ def copy(self, source, target, **kwargs):
138
+ raise NotImplementedError
139
+
140
+ def get_temporary_target(self, target, pattern):
141
+ dirname, basename = os.path.split(target)
142
+ return pattern.format(dirname=dirname, basename=basename)
143
+
144
+ def rename_target(self, target, new_target):
145
+ os.rename(target, new_target)
146
+
147
+ def delete_target(self, target):
148
+ if os.path.exists(target):
149
+ shutil.rmtree(target)
150
+
151
+
152
+ class BaseUpload(Loader):
153
+ action = "Uploading"
154
+
155
+ def copy(self, source, target, **kwargs):
156
+ if os.path.isdir(source):
157
+ self.transfer_folder(source=source, target=target, **kwargs)
158
+ else:
159
+ self.transfer_file(source=source, target=target, **kwargs)
160
+
161
+ def list_source(self, source):
162
+ for root, _, files in os.walk(source):
163
+ for file in files:
164
+ yield os.path.join(root, file)
165
+
166
+ def source_path(self, local_path, source):
167
+ return local_path
168
+
169
+ def target_path(self, source_path, source, target):
170
+ relative_path = os.path.relpath(source_path, source)
171
+ path = os.path.join(target, relative_path)
172
+ return path
173
+
174
+ def source_size(self, local_path):
175
+ return os.path.getsize(local_path)
176
+
177
+
178
+ class TransferMethodNotImplementedError(NotImplementedError):
179
+ pass
180
+
181
+
182
+ class Transfer:
183
+ """This is the internal API and should not be used directly. Use the transfer function instead."""
184
+
185
+ TransferMethodNotImplementedError = TransferMethodNotImplementedError
186
+
187
+ def __init__(
188
+ self,
189
+ source,
190
+ target,
191
+ overwrite=False,
192
+ resume=False,
193
+ verbosity=1,
194
+ threads=1,
195
+ progress=None,
196
+ temporary_target=False,
197
+ ):
198
+ if target == ".":
199
+ target = os.path.basename(source)
200
+
201
+ temporary_target = {
202
+ False: "{dirname}/{basename}",
203
+ True: "{dirname}-downloading/{basename}",
204
+ "-tmp/*": "{dirname}-tmp/{basename}",
205
+ "*-tmp": "{dirname}/{basename}-tmp",
206
+ "tmp-*": "{dirname}/tmp-{basename}",
207
+ }.get(temporary_target, temporary_target)
208
+ assert isinstance(temporary_target, str), (type(temporary_target), temporary_target)
209
+
210
+ self.source = source
211
+ self.target = target
212
+ self.overwrite = overwrite
213
+ self.resume = resume
214
+ self.verbosity = verbosity
215
+ self.threads = threads
216
+ self.progress = progress
217
+ self.temporary_target = temporary_target
218
+
219
+ cls = _find_transfer_class(self.source, self.target)
220
+ self.loader = cls()
221
+
222
+ def run(self):
223
+
224
+ target = self.loader.get_temporary_target(self.target, self.temporary_target)
225
+ if target != self.target:
226
+ LOGGER.info(f"Using temporary target {target} to copy to {self.target}")
227
+
228
+ if self.overwrite:
229
+ # delete the target if it exists
230
+ LOGGER.info(f"Deleting {self.target}")
231
+ self.delete_target(target)
232
+
233
+ # carefully delete the temporary target if it exists
234
+ head, tail = os.path.split(self.target)
235
+ head_, tail_ = os.path.split(target)
236
+ if not head_.startswith(head) or tail not in tail_:
237
+ LOGGER.info(f"{target} is too different from {self.target} to delete it automatically.")
238
+ else:
239
+ self.delete_target(target)
240
+
241
+ self.loader.copy(
242
+ self.source,
243
+ target,
244
+ overwrite=self.overwrite,
245
+ resume=self.resume,
246
+ verbosity=self.verbosity,
247
+ threads=self.threads,
248
+ progress=self.progress,
249
+ )
250
+
251
+ self.rename_target(target, self.target)
252
+
253
+ return self
254
+
255
+ def rename_target(self, target, new_target):
256
+ if target != new_target:
257
+ LOGGER.info(f"Renaming temporary target {target} into {self.target}")
258
+ return self.loader.rename_target(target, new_target)
259
+
260
+ def delete_target(self, target):
261
+ return self.loader.delete_target(target)
262
+
263
+
264
+ def _find_transfer_class(source, target):
265
+ from_ssh = source.startswith("ssh://")
266
+ into_ssh = target.startswith("ssh://")
267
+
268
+ from_s3 = source.startswith("s3://")
269
+ into_s3 = target.startswith("s3://")
270
+
271
+ from_local = not from_ssh and not from_s3
272
+ into_local = not into_ssh and not into_s3
273
+
274
+ # check that exactly one source type and one target type is specified
275
+ assert sum([into_ssh, into_local, into_s3]) == 1, (into_ssh, into_local, into_s3)
276
+ assert sum([from_ssh, from_local, from_s3]) == 1, (from_ssh, from_local, from_s3)
277
+
278
+ if from_local and into_ssh: # local -> ssh
279
+ from .ssh import RsyncUpload
280
+
281
+ return RsyncUpload
282
+
283
+ if from_s3 and into_local: # local <- S3
284
+ from .s3 import S3Download
285
+
286
+ return S3Download
287
+
288
+ if from_local and into_s3: # local -> S3
289
+ from .s3 import S3Upload
290
+
291
+ return S3Upload
292
+
293
+ raise TransferMethodNotImplementedError(f"Transfer from {source} to {target} is not implemented")
294
+
295
+
296
+ # this is the public API
297
+ def transfer(*args, **kwargs) -> Loader:
298
+ """Parameters
299
+ ----------
300
+ source : str
301
+ A path to a local file or folder or a URL to a file or a folder on S3.
302
+ The url should start with 's3://'.
303
+ target : str
304
+ A path to a local file or folder or a URL to a file or a folder on S3 or a remote folder.
305
+ The url should start with 's3://' or 'ssh://'.
306
+ overwrite : bool, optional
307
+ If the data is alreay on in the target location it will be overwritten.
308
+ By default False
309
+ resume : bool, optional
310
+ If the data is alreay on S3 it will not be uploaded, unless the remote file has a different size
311
+ Ignored if the target is an SSH remote folder (ssh://).
312
+ By default False
313
+ verbosity : int, optional
314
+ The level of verbosity, by default 1
315
+ progress: callable, optional
316
+ A callable that will be called with the number of files, the total size of the files, the total size
317
+ transferred and a boolean indicating if the transfer has started. By default None
318
+ threads : int, optional
319
+ The number of threads to use when uploading a directory, by default 1
320
+ temporary_target : bool, optional
321
+ Experimental feature
322
+ If True and if the target location supports it, the data will be uploaded to a temporary location
323
+ then renamed to the final location. Supported by SSH and local targets, not supported by S3.
324
+ By default False
325
+ """
326
+ copier = Transfer(*args, **kwargs)
327
+ copier.run()
328
+ return copier
@@ -0,0 +1,386 @@
1
+ # (C) Copyright 2024 European Centre for Medium-Range Weather Forecasts.
2
+ # This software is licensed under the terms of the Apache Licence Version 2.0
3
+ # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
4
+ # In applying this licence, ECMWF does not waive the privileges and immunities
5
+ # granted to it by virtue of its status as an intergovernmental organisation
6
+ # nor does it submit to any jurisdiction.
7
+
8
+ """This module provides functions to upload, download, list and delete files and folders on S3.
9
+ The functions of this package expect that the AWS credentials are set up in the environment
10
+ typicaly by setting the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables or
11
+ by creating a `~/.aws/credentials` file. It is also possible to set the `endpoint_url` in the same file
12
+ to use a different S3 compatible service::
13
+
14
+ [default]
15
+ endpoint_url = https://some-storage.somewhere.world
16
+ aws_access_key_id = xxxxxxxxxxxxxxxxxxxxxxxx
17
+ aws_secret_access_key = xxxxxxxxxxxxxxxxxxxxxxxx
18
+
19
+ Alternatively, the `endpoint_url`, and keys can be set in one of
20
+ the `~/.config/anemoi/settings.toml`
21
+ or `~/.config/anemoi/settings-secrets.toml` files.
22
+
23
+ """
24
+
25
+ import logging
26
+ import os
27
+ import threading
28
+ from copy import deepcopy
29
+ from typing import Iterable
30
+
31
+ import tqdm
32
+
33
+ from ..config import load_config
34
+ from ..humanize import bytes_to_human
35
+ from . import BaseDownload
36
+ from . import BaseUpload
37
+
38
+ LOGGER = logging.getLogger(__name__)
39
+
40
+
41
+ # s3_clients are not thread-safe, so we need to create a new client for each thread
42
+
43
+ thread_local = threading.local()
44
+
45
+
46
+ def s3_client(bucket, region=None):
47
+ import boto3
48
+ from botocore import UNSIGNED
49
+ from botocore.client import Config
50
+
51
+ if not hasattr(thread_local, "s3_clients"):
52
+ thread_local.s3_clients = {}
53
+
54
+ key = f"{bucket}-{region}"
55
+
56
+ boto3_config = dict(max_pool_connections=25)
57
+
58
+ if key in thread_local.s3_clients:
59
+ return thread_local.s3_clients[key]
60
+
61
+ boto3_config = dict(max_pool_connections=25)
62
+
63
+ if region:
64
+ # This is using AWS
65
+
66
+ options = {"region_name": region}
67
+
68
+ # Anonymous access
69
+ if not (
70
+ os.path.exists(os.path.expanduser("~/.aws/credentials"))
71
+ or ("AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ)
72
+ ):
73
+ boto3_config["signature_version"] = UNSIGNED
74
+
75
+ else:
76
+
77
+ # We may be accessing a different S3 compatible service
78
+ # Use anemoi.config to get the configuration
79
+
80
+ options = {}
81
+ config = load_config(secrets=["aws_access_key_id", "aws_secret_access_key"])
82
+
83
+ cfg = config.get("object-storage", {})
84
+ for k, v in cfg.items():
85
+ if isinstance(v, (str, int, float, bool)):
86
+ options[k] = v
87
+
88
+ for k, v in cfg.get(bucket, {}).items():
89
+ if isinstance(v, (str, int, float, bool)):
90
+ options[k] = v
91
+
92
+ type = options.pop("type", "s3")
93
+ if type != "s3":
94
+ raise ValueError(f"Unsupported object storage type {type}")
95
+
96
+ if "config" in options:
97
+ boto3_config.update(options["config"])
98
+ del options["config"]
99
+ from botocore.client import Config
100
+
101
+ options["config"] = Config(**boto3_config)
102
+
103
+ thread_local.s3_clients[key] = boto3.client("s3", **options)
104
+
105
+ return thread_local.s3_clients[key]
106
+
107
+
108
+ class S3Upload(BaseUpload):
109
+
110
+ def get_temporary_target(self, target, pattern):
111
+ return target
112
+
113
+ def rename_target(self, target, temporary_target):
114
+ pass
115
+
116
+ def delete_target(self, target):
117
+ pass
118
+ # delete(target)
119
+
120
+ def _transfer_file(self, source, target, overwrite, resume, verbosity, threads, config=None):
121
+
122
+ from botocore.exceptions import ClientError
123
+
124
+ assert target.startswith("s3://")
125
+
126
+ _, _, bucket, key = target.split("/", 3)
127
+ s3 = s3_client(bucket)
128
+
129
+ size = os.path.getsize(source)
130
+
131
+ if verbosity > 0:
132
+ LOGGER.info(f"{self.action} {source} to {target} ({bytes_to_human(size)})")
133
+
134
+ try:
135
+ results = s3.head_object(Bucket=bucket, Key=key)
136
+ remote_size = int(results["ContentLength"])
137
+ except ClientError as e:
138
+ if e.response["Error"]["Code"] != "404":
139
+ raise
140
+ remote_size = None
141
+
142
+ if remote_size is not None:
143
+ if remote_size != size:
144
+ LOGGER.warning(
145
+ f"{target} already exists, but with different size, re-uploading (remote={remote_size}, local={size})"
146
+ )
147
+ elif resume:
148
+ # LOGGER.info(f"{target} already exists, skipping")
149
+ return size
150
+
151
+ if remote_size is not None and not overwrite and not resume:
152
+ raise ValueError(f"{target} already exists, use 'overwrite' to replace or 'resume' to skip")
153
+
154
+ if verbosity > 0:
155
+ with tqdm.tqdm(total=size, unit="B", unit_scale=True, unit_divisor=1024, leave=False) as pbar:
156
+ s3.upload_file(source, bucket, key, Callback=lambda x: pbar.update(x), Config=config)
157
+ else:
158
+ s3.upload_file(source, bucket, key, Config=config)
159
+
160
+ return size
161
+
162
+
163
+ class S3Download(BaseDownload):
164
+
165
+ def copy(self, source, target, **kwargs):
166
+ assert source.startswith("s3://")
167
+
168
+ if source.endswith("/"):
169
+ self.transfer_folder(source=source, target=target, **kwargs)
170
+ else:
171
+ self.transfer_file(source=source, target=target, **kwargs)
172
+
173
+ def list_source(self, source):
174
+ yield from _list_objects(source)
175
+
176
+ def source_path(self, s3_object, source):
177
+ _, _, bucket, _ = source.split("/", 3)
178
+ return f"s3://{bucket}/{s3_object['Key']}"
179
+
180
+ def target_path(self, s3_object, source, target):
181
+ _, _, _, folder = source.split("/", 3)
182
+ local_path = os.path.join(target, os.path.relpath(s3_object["Key"], folder))
183
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
184
+ return local_path
185
+
186
+ def source_size(self, s3_object):
187
+ return s3_object["Size"]
188
+
189
+ def _transfer_file(self, source, target, overwrite, resume, verbosity, threads, config=None):
190
+ # from boto3.s3.transfer import TransferConfig
191
+
192
+ _, _, bucket, key = source.split("/", 3)
193
+ s3 = s3_client(bucket)
194
+
195
+ try:
196
+ response = s3.head_object(Bucket=bucket, Key=key)
197
+ except s3.exceptions.ClientError as e:
198
+ if e.response["Error"]["Code"] == "404":
199
+ raise ValueError(f"{source} does not exist ({bucket}, {key})")
200
+ raise
201
+
202
+ size = int(response["ContentLength"])
203
+
204
+ if verbosity > 0:
205
+ LOGGER.info(f"{self.action} {source} to {target} ({bytes_to_human(size)})")
206
+
207
+ if overwrite:
208
+ resume = False
209
+
210
+ if resume:
211
+ if os.path.exists(target):
212
+ local_size = os.path.getsize(target)
213
+ if local_size != size:
214
+ LOGGER.warning(
215
+ f"{target} already with different size, re-downloading (remote={size}, local={local_size})"
216
+ )
217
+ else:
218
+ # if verbosity > 0:
219
+ # LOGGER.info(f"{target} already exists, skipping")
220
+ return size
221
+
222
+ if os.path.exists(target) and not overwrite:
223
+ raise ValueError(f"{target} already exists, use 'overwrite' to replace or 'resume' to skip")
224
+
225
+ if verbosity > 0:
226
+ with tqdm.tqdm(total=size, unit="B", unit_scale=True, unit_divisor=1024, leave=False) as pbar:
227
+ s3.download_file(bucket, key, target, Callback=lambda x: pbar.update(x), Config=config)
228
+ else:
229
+ s3.download_file(bucket, key, target, Config=config)
230
+
231
+ return size
232
+
233
+
234
+ def _list_objects(target, batch=False):
235
+ _, _, bucket, prefix = target.split("/", 3)
236
+ s3 = s3_client(bucket)
237
+
238
+ paginator = s3.get_paginator("list_objects_v2")
239
+
240
+ for page in paginator.paginate(Bucket=bucket, Prefix=prefix):
241
+ if "Contents" in page:
242
+ objects = deepcopy(page["Contents"])
243
+ if batch:
244
+ yield objects
245
+ else:
246
+ yield from objects
247
+
248
+
249
+ def _delete_folder(target) -> None:
250
+ _, _, bucket, _ = target.split("/", 3)
251
+ s3 = s3_client(bucket)
252
+
253
+ total = 0
254
+ for batch in _list_objects(target, batch=True):
255
+ LOGGER.info(f"Deleting {len(batch):,} objects from {target}")
256
+ s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": o["Key"]} for o in batch]})
257
+ total += len(batch)
258
+ LOGGER.info(f"Deleted {len(batch):,} objects (total={total:,})")
259
+
260
+
261
+ def _delete_file(target) -> None:
262
+ from botocore.exceptions import ClientError
263
+
264
+ _, _, bucket, key = target.split("/", 3)
265
+ s3 = s3_client(bucket)
266
+
267
+ try:
268
+ s3.head_object(Bucket=bucket, Key=key)
269
+ exits = True
270
+ except ClientError as e:
271
+ if e.response["Error"]["Code"] != "404":
272
+ raise
273
+ exits = False
274
+
275
+ if not exits:
276
+ LOGGER.warning(f"{target} does not exist. Did you mean to delete a folder? Then add a trailing '/'")
277
+ return
278
+
279
+ LOGGER.info(f"Deleting {target}")
280
+ s3.delete_object(Bucket=bucket, Key=key)
281
+ LOGGER.info(f"{target} is deleted")
282
+
283
+
284
+ def delete(target) -> None:
285
+ """Delete a file or a folder from S3.
286
+
287
+ Parameters
288
+ ----------
289
+ target : str
290
+ The URL of a file or a folder on S3. The url should start with 's3://'. If the URL ends with a '/' it is
291
+ assumed to be a folder, otherwise it is assumed to be a file.
292
+ """
293
+
294
+ assert target.startswith("s3://")
295
+
296
+ if target.endswith("/"):
297
+ _delete_folder(target)
298
+ else:
299
+ _delete_file(target)
300
+
301
+
302
+ def list_folder(folder) -> Iterable:
303
+ """List the sub folders in a folder on S3.
304
+
305
+ Parameters
306
+ ----------
307
+ folder : str
308
+ The URL of a folder on S3. The url should start with 's3://'.
309
+
310
+ Returns
311
+ -------
312
+ list
313
+ A list of the subfolders names in the folder.
314
+ """
315
+
316
+ assert folder.startswith("s3://")
317
+ if not folder.endswith("/"):
318
+ folder += "/"
319
+
320
+ _, _, bucket, prefix = folder.split("/", 3)
321
+
322
+ s3 = s3_client(bucket)
323
+ paginator = s3.get_paginator("list_objects_v2")
324
+
325
+ for page in paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter="/"):
326
+ if "CommonPrefixes" in page:
327
+ yield from [folder + _["Prefix"] for _ in page.get("CommonPrefixes")]
328
+
329
+
330
+ def object_info(target) -> dict:
331
+ """Get information about an object on S3.
332
+
333
+ Parameters
334
+ ----------
335
+ target : str
336
+ The URL of a file or a folder on S3. The url should start with 's3://'.
337
+
338
+ Returns
339
+ -------
340
+ dict
341
+ A dictionary with information about the object.
342
+ """
343
+
344
+ _, _, bucket, key = target.split("/", 3)
345
+ s3 = s3_client(bucket)
346
+
347
+ try:
348
+ return s3.head_object(Bucket=bucket, Key=key)
349
+ except s3.exceptions.ClientError as e:
350
+ if e.response["Error"]["Code"] == "404":
351
+ raise ValueError(f"{target} does not exist")
352
+ raise
353
+
354
+
355
+ def object_acl(target) -> dict:
356
+ """Get information about an object's ACL on S3.
357
+
358
+ Parameters
359
+ ----------
360
+ target : str
361
+ The URL of a file or a folder on S3. The url should start with 's3://'.
362
+
363
+ Returns
364
+ -------
365
+ dict
366
+ A dictionary with information about the object's ACL.
367
+ """
368
+
369
+ _, _, bucket, key = target.split("/", 3)
370
+ s3 = s3_client()
371
+
372
+ return s3.get_object_acl(Bucket=bucket, Key=key)
373
+
374
+
375
+ def download(source, target, *args, **kwargs):
376
+ from . import transfer
377
+
378
+ assert source.startswith("s3://"), f"source {source} should start with 's3://'"
379
+ return transfer(source, target, *args, **kwargs)
380
+
381
+
382
+ def upload(source, target, *args, **kwargs):
383
+ from . import transfer
384
+
385
+ assert target.startswith("s3://"), f"target {target} should start with 's3://'"
386
+ return transfer(source, target, *args, **kwargs)