skypilot-nightly 1.0.0.dev20250331__py3-none-any.whl → 1.0.0.dev20250402__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sky/__init__.py +2 -2
- sky/adaptors/kubernetes.py +7 -0
- sky/backends/cloud_vm_ray_backend.py +58 -13
- sky/check.py +2 -2
- sky/cli.py +7 -3
- sky/client/cli.py +7 -3
- sky/cloud_stores.py +8 -10
- sky/data/data_utils.py +178 -90
- sky/data/mounting_utils.py +79 -22
- sky/data/storage.py +95 -30
- sky/global_user_state.py +5 -0
- sky/models.py +4 -1
- sky/server/requests/payloads.py +4 -4
- sky/server/server.py +8 -1
- sky/skylet/constants.py +8 -2
- sky/task.py +2 -2
- sky/utils/controller_utils.py +2 -2
- sky/utils/kubernetes/gpu_labeler.py +35 -42
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/METADATA +1 -5
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/RECORD +24 -24
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250331.dist-info → skypilot_nightly-1.0.0.dev20250402.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
|
|
5
5
|
import urllib.request
|
6
6
|
|
7
7
|
# Replaced with the current commit when building the wheels.
|
8
|
-
_SKYPILOT_COMMIT_SHA = '
|
8
|
+
_SKYPILOT_COMMIT_SHA = '515e5df7c14cfd425f606de20d4b96d6a897ee7d'
|
9
9
|
|
10
10
|
|
11
11
|
def _get_git_commit():
|
@@ -35,7 +35,7 @@ def _get_git_commit():
|
|
35
35
|
|
36
36
|
|
37
37
|
__commit__ = _get_git_commit()
|
38
|
-
__version__ = '1.0.0.
|
38
|
+
__version__ = '1.0.0.dev20250402'
|
39
39
|
__root_dir__ = os.path.dirname(os.path.abspath(__file__))
|
40
40
|
|
41
41
|
|
sky/adaptors/kubernetes.py
CHANGED
@@ -146,6 +146,13 @@ def apps_api(context: Optional[str] = None):
|
|
146
146
|
return kubernetes.client.AppsV1Api()
|
147
147
|
|
148
148
|
|
149
|
+
@_api_logging_decorator('urllib3', logging.ERROR)
|
150
|
+
@annotations.lru_cache(scope='request')
|
151
|
+
def batch_api(context: Optional[str] = None):
|
152
|
+
_load_config(context)
|
153
|
+
return kubernetes.client.BatchV1Api()
|
154
|
+
|
155
|
+
|
149
156
|
@_api_logging_decorator('urllib3', logging.ERROR)
|
150
157
|
@annotations.lru_cache(scope='request')
|
151
158
|
def api_client(context: Optional[str] = None):
|
@@ -61,6 +61,7 @@ from sky.utils import command_runner
|
|
61
61
|
from sky.utils import common
|
62
62
|
from sky.utils import common_utils
|
63
63
|
from sky.utils import controller_utils
|
64
|
+
from sky.utils import env_options
|
64
65
|
from sky.utils import log_utils
|
65
66
|
from sky.utils import message_utils
|
66
67
|
from sky.utils import registry
|
@@ -622,13 +623,39 @@ class RayCodeGen:
|
|
622
623
|
options_str = ', '.join(options)
|
623
624
|
logger.debug('Added Task with options: '
|
624
625
|
f'{options_str}')
|
626
|
+
# Script to block completion of a job until all storage mounted with
|
627
|
+
# CACHED_MOUNT mode is uploaded to remote.
|
628
|
+
rclone_flush_script = textwrap.dedent(f"""\
|
629
|
+
|
630
|
+
if [ $(findmnt -t fuse.rclone --noheading | wc -l) -gt 0 ]; then
|
631
|
+
flushed=0
|
632
|
+
# extra second on top of --vfs-cache-poll-interval to
|
633
|
+
# avoid race condition between rclone log line creation and this check.
|
634
|
+
sleep 1
|
635
|
+
while [ $flushed -eq 0 ]; do
|
636
|
+
# sleep for the same interval as --vfs-cache-poll-interval
|
637
|
+
sleep {constants.RCLONE_CACHE_REFRESH_INTERVAL}
|
638
|
+
flushed=1
|
639
|
+
for file in {constants.RCLONE_LOG_DIR}/*; do
|
640
|
+
exitcode=0
|
641
|
+
tac $file | grep "vfs cache: cleaned:" -m 1 | grep "in use 0, to upload 0, uploading 0" -q || exitcode=$?
|
642
|
+
if [ $exitcode -ne 0 ]; then
|
643
|
+
echo "skypilot: cached mount is still uploading to remote"
|
644
|
+
flushed=0
|
645
|
+
break
|
646
|
+
fi
|
647
|
+
done
|
648
|
+
done
|
649
|
+
echo "skypilot: cached mount uploaded complete"
|
650
|
+
fi""")
|
625
651
|
self._code += [
|
626
652
|
sky_env_vars_dict_str,
|
627
653
|
textwrap.dedent(f"""\
|
628
654
|
script = {bash_script!r}
|
655
|
+
rclone_flush_script = {rclone_flush_script!r}
|
629
656
|
if run_fn is not None:
|
630
657
|
script = run_fn({gang_scheduling_id}, gang_scheduling_id_to_ip)
|
631
|
-
|
658
|
+
script += rclone_flush_script
|
632
659
|
|
633
660
|
if script is not None:
|
634
661
|
sky_env_vars_dict['{constants.SKYPILOT_NUM_GPUS_PER_NODE}'] = {int(math.ceil(num_gpus))!r}
|
@@ -4869,7 +4896,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4869
4896
|
# Handle cases where `storage_mounts` is None. This occurs when users
|
4870
4897
|
# initiate a 'sky start' command from a Skypilot version that predates
|
4871
4898
|
# the introduction of the `storage_mounts_metadata` feature.
|
4872
|
-
if
|
4899
|
+
if storage_mounts is None:
|
4873
4900
|
return
|
4874
4901
|
|
4875
4902
|
# Process only mount mode objects here. COPY mode objects have been
|
@@ -4878,10 +4905,11 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4878
4905
|
storage_mounts = {
|
4879
4906
|
path: storage_mount
|
4880
4907
|
for path, storage_mount in storage_mounts.items()
|
4881
|
-
if storage_mount.mode
|
4908
|
+
if storage_mount.mode in storage_lib.MOUNTABLE_STORAGE_MODES
|
4882
4909
|
}
|
4883
4910
|
|
4884
|
-
# Handle cases when there aren't any Storages with MOUNT
|
4911
|
+
# Handle cases when there aren't any Storages with either MOUNT or
|
4912
|
+
# MOUNT_CACHED mode.
|
4885
4913
|
if not storage_mounts:
|
4886
4914
|
return
|
4887
4915
|
start = time.time()
|
@@ -4911,7 +4939,13 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4911
4939
|
# Get the first store and use it to mount
|
4912
4940
|
store = list(storage_obj.stores.values())[0]
|
4913
4941
|
assert store is not None, storage_obj
|
4914
|
-
|
4942
|
+
if storage_obj.mode == storage_lib.StorageMode.MOUNT:
|
4943
|
+
mount_cmd = store.mount_command(dst)
|
4944
|
+
action_message = 'Mounting'
|
4945
|
+
else:
|
4946
|
+
assert storage_obj.mode == storage_lib.StorageMode.MOUNT_CACHED
|
4947
|
+
mount_cmd = store.mount_cached_command(dst)
|
4948
|
+
action_message = 'Mounting cached mode'
|
4915
4949
|
src_print = (storage_obj.source
|
4916
4950
|
if storage_obj.source else storage_obj.name)
|
4917
4951
|
if isinstance(src_print, list):
|
@@ -4923,7 +4957,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4923
4957
|
target=dst,
|
4924
4958
|
cmd=mount_cmd,
|
4925
4959
|
run_rsync=False,
|
4926
|
-
action_message=
|
4960
|
+
action_message=action_message,
|
4927
4961
|
log_path=log_path,
|
4928
4962
|
# Need to source bashrc, as the cloud specific CLI or SDK
|
4929
4963
|
# may require PATH in bashrc.
|
@@ -4942,12 +4976,23 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4942
4976
|
f' to an empty or non-existent path.')
|
4943
4977
|
raise RuntimeError(error_msg) from None
|
4944
4978
|
else:
|
4945
|
-
#
|
4946
|
-
|
4947
|
-
|
4948
|
-
|
4949
|
-
|
4950
|
-
|
4979
|
+
# By default, raising an error caused from mounting_utils
|
4980
|
+
# shows a big heredoc as part of it. Here, we want to
|
4981
|
+
# conditionally show the heredoc only if SKYPILOT_DEBUG
|
4982
|
+
# is set
|
4983
|
+
if env_options.Options.SHOW_DEBUG_INFO.get():
|
4984
|
+
raise exceptions.CommandError(
|
4985
|
+
e.returncode,
|
4986
|
+
command='to mount',
|
4987
|
+
error_msg=e.error_msg,
|
4988
|
+
detailed_reason=e.detailed_reason)
|
4989
|
+
else:
|
4990
|
+
# Strip the command (a big heredoc) from the exception
|
4991
|
+
raise exceptions.CommandError(
|
4992
|
+
e.returncode,
|
4993
|
+
command='to mount',
|
4994
|
+
error_msg=e.error_msg,
|
4995
|
+
detailed_reason=e.detailed_reason) from None
|
4951
4996
|
|
4952
4997
|
end = time.time()
|
4953
4998
|
logger.debug(f'Storage mount sync took {end - start} seconds.')
|
@@ -4965,7 +5010,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4965
5010
|
return
|
4966
5011
|
storage_mounts_metadata = {}
|
4967
5012
|
for dst, storage_obj in storage_mounts.items():
|
4968
|
-
if storage_obj.mode
|
5013
|
+
if storage_obj.mode not in storage_lib.MOUNTABLE_STORAGE_MODES:
|
4969
5014
|
# Skip non-mount storage objects, as there is no need to
|
4970
5015
|
# reconstruct them during cluster restart.
|
4971
5016
|
continue
|
sky/check.py
CHANGED
@@ -359,9 +359,9 @@ def _format_enabled_cloud(cloud_name: str,
|
|
359
359
|
if i == len(existing_contexts) -
|
360
360
|
1 else ux_utils.INDENT_SYMBOL)
|
361
361
|
contexts_formatted.append(f'\n {symbol}{context}')
|
362
|
-
context_info = f'Allowed contexts:{"".join(contexts_formatted)}'
|
362
|
+
context_info = f' Allowed contexts:{"".join(contexts_formatted)}'
|
363
363
|
else:
|
364
|
-
context_info = f'Active context: {existing_contexts[0]}'
|
364
|
+
context_info = f' Active context: {existing_contexts[0]}'
|
365
365
|
|
366
366
|
return (f'{_green_color(cloud_and_capabilities)}\n'
|
367
367
|
f' {colorama.Style.DIM}{context_info}'
|
sky/cli.py
CHANGED
@@ -253,7 +253,8 @@ def _async_call_or_wait(request_id: str, async_call: bool,
|
|
253
253
|
fg='green')
|
254
254
|
click.echo(
|
255
255
|
f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Check logs with: '
|
256
|
-
f'sky api logs {short_request_id}
|
256
|
+
f'{ux_utils.BOLD}sky api logs {short_request_id}'
|
257
|
+
f'{colorama.Style.RESET_ALL}\n'
|
257
258
|
f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Or, visit: '
|
258
259
|
f'{server_common.get_server_url()}/api/stream?'
|
259
260
|
f'request_id={short_request_id}'
|
@@ -3900,8 +3901,11 @@ def jobs_launch(
|
|
3900
3901
|
|
3901
3902
|
common_utils.check_cluster_name_is_valid(name)
|
3902
3903
|
|
3903
|
-
|
3904
|
-
|
3904
|
+
# Optimize info is only show if _need_confirmation.
|
3905
|
+
if not yes:
|
3906
|
+
click.secho(
|
3907
|
+
f'Managed job {dag.name!r} will be launched on (estimated):',
|
3908
|
+
fg='yellow')
|
3905
3909
|
|
3906
3910
|
request_id = managed_jobs.launch(dag, name, _need_confirmation=not yes)
|
3907
3911
|
job_id_handle = _async_call_or_wait(request_id, async_call,
|
sky/client/cli.py
CHANGED
@@ -253,7 +253,8 @@ def _async_call_or_wait(request_id: str, async_call: bool,
|
|
253
253
|
fg='green')
|
254
254
|
click.echo(
|
255
255
|
f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Check logs with: '
|
256
|
-
f'sky api logs {short_request_id}
|
256
|
+
f'{ux_utils.BOLD}sky api logs {short_request_id}'
|
257
|
+
f'{colorama.Style.RESET_ALL}\n'
|
257
258
|
f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Or, visit: '
|
258
259
|
f'{server_common.get_server_url()}/api/stream?'
|
259
260
|
f'request_id={short_request_id}'
|
@@ -3900,8 +3901,11 @@ def jobs_launch(
|
|
3900
3901
|
|
3901
3902
|
common_utils.check_cluster_name_is_valid(name)
|
3902
3903
|
|
3903
|
-
|
3904
|
-
|
3904
|
+
# Optimize info is only show if _need_confirmation.
|
3905
|
+
if not yes:
|
3906
|
+
click.secho(
|
3907
|
+
f'Managed job {dag.name!r} will be launched on (estimated):',
|
3908
|
+
fg='yellow')
|
3905
3909
|
|
3906
3910
|
request_id = managed_jobs.launch(dag, name, _need_confirmation=not yes)
|
3907
3911
|
job_id_handle = _async_call_or_wait(request_id, async_call,
|
sky/cloud_stores.py
CHANGED
@@ -23,7 +23,6 @@ from sky.adaptors import nebius
|
|
23
23
|
from sky.adaptors import oci
|
24
24
|
from sky.clouds import gcp
|
25
25
|
from sky.data import data_utils
|
26
|
-
from sky.data.data_utils import Rclone
|
27
26
|
from sky.skylet import constants
|
28
27
|
from sky.utils import ux_utils
|
29
28
|
|
@@ -454,19 +453,18 @@ class IBMCosCloudStorage(CloudStorage):
|
|
454
453
|
def _get_rclone_sync_command(self, source: str, destination: str):
|
455
454
|
bucket_name, data_path, bucket_region = data_utils.split_cos_path(
|
456
455
|
source)
|
457
|
-
|
458
|
-
|
459
|
-
data_path_in_bucket = bucket_name
|
460
|
-
|
461
|
-
|
462
|
-
bucket_region)
|
456
|
+
rclone_profile_name = (
|
457
|
+
data_utils.Rclone.RcloneStores.IBM.get_profile_name(bucket_name))
|
458
|
+
data_path_in_bucket = f'{bucket_name}{data_path}'
|
459
|
+
rclone_config = data_utils.Rclone.RcloneStores.IBM.get_config(
|
460
|
+
rclone_profile_name=rclone_profile_name, region=bucket_region)
|
463
461
|
# configure_rclone stores bucket profile in remote cluster's rclone.conf
|
464
462
|
configure_rclone = (
|
465
|
-
f' mkdir -p
|
466
|
-
f' echo "{
|
463
|
+
f' mkdir -p {constants.RCLONE_CONFIG_DIR} &&'
|
464
|
+
f' echo "{rclone_config}">> {constants.RCLONE_CONFIG_PATH}')
|
467
465
|
download_via_rclone = (
|
468
466
|
'rclone copy '
|
469
|
-
f'{
|
467
|
+
f'{rclone_profile_name}:{data_path_in_bucket} {destination}')
|
470
468
|
|
471
469
|
all_commands = list(self._GET_RCLONE)
|
472
470
|
all_commands.append(configure_rclone)
|
sky/data/data_utils.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
"""Miscellaneous Utils for Sky Data
|
2
2
|
"""
|
3
3
|
import concurrent.futures
|
4
|
-
|
4
|
+
import enum
|
5
5
|
from multiprocessing import pool
|
6
6
|
import os
|
7
7
|
import re
|
@@ -13,6 +13,7 @@ import urllib.parse
|
|
13
13
|
|
14
14
|
from filelock import FileLock
|
15
15
|
|
16
|
+
from sky import clouds
|
16
17
|
from sky import exceptions
|
17
18
|
from sky import sky_logging
|
18
19
|
from sky.adaptors import aws
|
@@ -21,6 +22,7 @@ from sky.adaptors import cloudflare
|
|
21
22
|
from sky.adaptors import gcp
|
22
23
|
from sky.adaptors import ibm
|
23
24
|
from sky.adaptors import nebius
|
25
|
+
from sky.skylet import constants
|
24
26
|
from sky.skylet import log_lib
|
25
27
|
from sky.utils import common_utils
|
26
28
|
from sky.utils import ux_utils
|
@@ -589,73 +591,146 @@ def get_cos_regions() -> List[str]:
|
|
589
591
|
]
|
590
592
|
|
591
593
|
|
592
|
-
class Rclone
|
593
|
-
"""
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
594
|
+
class Rclone:
|
595
|
+
"""Provides methods to manage and generate Rclone configuration profile."""
|
596
|
+
|
597
|
+
# TODO(syang) Move the enum's functionality into AbstractStore subclass and
|
598
|
+
# deprecate this class.
|
599
|
+
class RcloneStores(enum.Enum):
|
600
|
+
"""Rclone supporting storage types and supporting methods."""
|
601
|
+
S3 = 'S3'
|
602
|
+
GCS = 'GCS'
|
603
|
+
IBM = 'IBM'
|
604
|
+
R2 = 'R2'
|
605
|
+
AZURE = 'AZURE'
|
606
|
+
|
607
|
+
def get_profile_name(self, bucket_name: str) -> str:
|
608
|
+
"""Gets the Rclone profile name for a given bucket.
|
609
|
+
|
610
|
+
Args:
|
611
|
+
bucket_name: The name of the bucket.
|
612
|
+
|
613
|
+
Returns:
|
614
|
+
A string containing the Rclone profile name, which combines
|
615
|
+
prefix based on the storage type and the bucket name.
|
616
|
+
"""
|
617
|
+
profile_prefix = {
|
618
|
+
Rclone.RcloneStores.S3: 'sky-s3',
|
619
|
+
Rclone.RcloneStores.GCS: 'sky-gcs',
|
620
|
+
Rclone.RcloneStores.IBM: 'sky-ibm',
|
621
|
+
Rclone.RcloneStores.R2: 'sky-r2',
|
622
|
+
Rclone.RcloneStores.AZURE: 'sky-azure'
|
623
|
+
}
|
624
|
+
return f'{profile_prefix[self]}-{bucket_name}'
|
625
|
+
|
626
|
+
def get_config(self,
|
627
|
+
bucket_name: Optional[str] = None,
|
628
|
+
rclone_profile_name: Optional[str] = None,
|
629
|
+
region: Optional[str] = None,
|
630
|
+
storage_account_name: Optional[str] = None,
|
631
|
+
storage_account_key: Optional[str] = None) -> str:
|
632
|
+
"""Generates an Rclone configuration for a specific storage type.
|
633
|
+
|
634
|
+
This method creates an Rclone configuration string based on the
|
635
|
+
storage type and the provided parameters.
|
636
|
+
|
637
|
+
Args:
|
638
|
+
bucket_name: The name of the bucket.
|
639
|
+
rclone_profile_name: The name of the Rclone profile. If not
|
640
|
+
provided, it will be generated using the bucket_name.
|
641
|
+
region: Region of bucket.
|
642
|
+
|
643
|
+
Returns:
|
644
|
+
A string containing the Rclone configuration.
|
645
|
+
|
646
|
+
Raises:
|
647
|
+
NotImplementedError: If the storage type is not supported.
|
648
|
+
"""
|
649
|
+
if rclone_profile_name is None:
|
650
|
+
assert bucket_name is not None
|
651
|
+
rclone_profile_name = self.get_profile_name(bucket_name)
|
652
|
+
if self is Rclone.RcloneStores.S3:
|
653
|
+
aws_credentials = (
|
654
|
+
aws.session().get_credentials().get_frozen_credentials())
|
655
|
+
access_key_id = aws_credentials.access_key
|
656
|
+
secret_access_key = aws_credentials.secret_key
|
657
|
+
config = textwrap.dedent(f"""\
|
658
|
+
[{rclone_profile_name}]
|
659
|
+
type = s3
|
660
|
+
provider = AWS
|
661
|
+
access_key_id = {access_key_id}
|
662
|
+
secret_access_key = {secret_access_key}
|
663
|
+
acl = private
|
664
|
+
""")
|
665
|
+
elif self is Rclone.RcloneStores.GCS:
|
666
|
+
config = textwrap.dedent(f"""\
|
667
|
+
[{rclone_profile_name}]
|
668
|
+
type = google cloud storage
|
669
|
+
project_number = {clouds.GCP.get_project_id()}
|
670
|
+
bucket_policy_only = true
|
671
|
+
""")
|
672
|
+
elif self is Rclone.RcloneStores.IBM:
|
673
|
+
access_key_id, secret_access_key = ibm.get_hmac_keys()
|
674
|
+
config = textwrap.dedent(f"""\
|
675
|
+
[{rclone_profile_name}]
|
676
|
+
type = s3
|
677
|
+
provider = IBMCOS
|
678
|
+
access_key_id = {access_key_id}
|
679
|
+
secret_access_key = {secret_access_key}
|
680
|
+
region = {region}
|
681
|
+
endpoint = s3.{region}.cloud-object-storage.appdomain.cloud
|
682
|
+
location_constraint = {region}-smart
|
683
|
+
acl = private
|
684
|
+
""")
|
685
|
+
elif self is Rclone.RcloneStores.R2:
|
686
|
+
cloudflare_session = cloudflare.session()
|
687
|
+
cloudflare_credentials = (
|
688
|
+
cloudflare.get_r2_credentials(cloudflare_session))
|
689
|
+
endpoint = cloudflare.create_endpoint()
|
690
|
+
access_key_id = cloudflare_credentials.access_key
|
691
|
+
secret_access_key = cloudflare_credentials.secret_key
|
692
|
+
config = textwrap.dedent(f"""\
|
693
|
+
[{rclone_profile_name}]
|
694
|
+
type = s3
|
695
|
+
provider = Cloudflare
|
696
|
+
access_key_id = {access_key_id}
|
697
|
+
secret_access_key = {secret_access_key}
|
698
|
+
endpoint = {endpoint}
|
699
|
+
region = auto
|
700
|
+
acl = private
|
701
|
+
""")
|
702
|
+
elif self is Rclone.RcloneStores.AZURE:
|
703
|
+
assert storage_account_name and storage_account_key
|
704
|
+
config = textwrap.dedent(f"""\
|
705
|
+
[{rclone_profile_name}]
|
706
|
+
type = azureblob
|
707
|
+
account = {storage_account_name}
|
708
|
+
key = {storage_account_key}
|
709
|
+
""")
|
710
|
+
else:
|
711
|
+
with ux_utils.print_exception_no_traceback():
|
712
|
+
raise NotImplementedError(
|
713
|
+
f'Unsupported store type for Rclone: {self}')
|
714
|
+
return config
|
607
715
|
|
608
716
|
@staticmethod
|
609
|
-
def
|
610
|
-
|
611
|
-
"""
|
717
|
+
def store_rclone_config(bucket_name: str, cloud: RcloneStores,
|
718
|
+
region: str) -> str:
|
719
|
+
"""Creates rclone configuration files for bucket syncing and mounting.
|
612
720
|
|
613
721
|
Args:
|
614
|
-
bucket_name
|
615
|
-
cloud
|
616
|
-
|
617
|
-
"""
|
618
|
-
try:
|
619
|
-
return cloud.value + bucket_name
|
620
|
-
except AttributeError as e:
|
621
|
-
with ux_utils.print_exception_no_traceback():
|
622
|
-
raise ValueError(f'Value: {cloud} isn\'t a member of '
|
623
|
-
'Rclone.RcloneClouds') from e
|
624
|
-
|
625
|
-
@staticmethod
|
626
|
-
def get_rclone_config(bucket_name: str, cloud: RcloneClouds,
|
627
|
-
region: str) -> str:
|
628
|
-
bucket_rclone_profile = Rclone.generate_rclone_bucket_profile_name(
|
629
|
-
bucket_name, cloud)
|
630
|
-
if cloud is Rclone.RcloneClouds.IBM:
|
631
|
-
access_key_id, secret_access_key = ibm.get_hmac_keys()
|
632
|
-
config_data = textwrap.dedent(f"""\
|
633
|
-
[{bucket_rclone_profile}]
|
634
|
-
type = s3
|
635
|
-
provider = IBMCOS
|
636
|
-
access_key_id = {access_key_id}
|
637
|
-
secret_access_key = {secret_access_key}
|
638
|
-
region = {region}
|
639
|
-
endpoint = s3.{region}.cloud-object-storage.appdomain.cloud
|
640
|
-
location_constraint = {region}-smart
|
641
|
-
acl = private
|
642
|
-
""")
|
643
|
-
else:
|
644
|
-
with ux_utils.print_exception_no_traceback():
|
645
|
-
raise NotImplementedError('No rclone configuration builder was '
|
646
|
-
f'implemented for cloud: {cloud}.')
|
647
|
-
return config_data
|
722
|
+
bucket_name: Name of the bucket.
|
723
|
+
cloud: RcloneStores enum representing the cloud provider.
|
724
|
+
region: Region of the bucket.
|
648
725
|
|
649
|
-
|
650
|
-
|
651
|
-
region: str) -> str:
|
652
|
-
"""Creates a configuration files for rclone - used for
|
653
|
-
bucket syncing and mounting """
|
726
|
+
Returns:
|
727
|
+
str: The configuration data written to the file.
|
654
728
|
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
729
|
+
Raises:
|
730
|
+
StorageError: If rclone is not installed.
|
731
|
+
"""
|
732
|
+
rclone_config_path = os.path.expanduser(constants.RCLONE_CONFIG_PATH)
|
733
|
+
config_data = cloud.get_config(bucket_name=bucket_name, region=region)
|
659
734
|
try:
|
660
735
|
subprocess.run('rclone version',
|
661
736
|
shell=True,
|
@@ -669,9 +744,7 @@ class Rclone():
|
|
669
744
|
'"curl https://rclone.org/install.sh '
|
670
745
|
'| sudo bash" ') from None
|
671
746
|
|
672
|
-
# create ~/.config/rclone/ if doesn't exist
|
673
747
|
os.makedirs(os.path.dirname(rclone_config_path), exist_ok=True)
|
674
|
-
# create rclone.conf if doesn't exist
|
675
748
|
if not os.path.isfile(rclone_config_path):
|
676
749
|
open(rclone_config_path, 'w', encoding='utf-8').close()
|
677
750
|
|
@@ -692,18 +765,24 @@ class Rclone():
|
|
692
765
|
return config_data
|
693
766
|
|
694
767
|
@staticmethod
|
695
|
-
def get_region_from_rclone(bucket_name: str, cloud:
|
696
|
-
"""Returns region field of the specified bucket in rclone.conf
|
697
|
-
|
698
|
-
|
699
|
-
bucket_name
|
700
|
-
|
701
|
-
|
768
|
+
def get_region_from_rclone(bucket_name: str, cloud: RcloneStores) -> str:
|
769
|
+
"""Returns the region field of the specified bucket in rclone.conf.
|
770
|
+
|
771
|
+
Args:
|
772
|
+
bucket_name: Name of the bucket.
|
773
|
+
cloud: RcloneStores enum representing the cloud provider.
|
774
|
+
|
775
|
+
Returns:
|
776
|
+
The region field if the bucket exists, otherwise an empty string.
|
777
|
+
"""
|
778
|
+
rclone_profile = cloud.get_profile_name(bucket_name)
|
779
|
+
rclone_config_path = os.path.expanduser(constants.RCLONE_CONFIG_PATH)
|
780
|
+
with open(rclone_config_path, 'r', encoding='utf-8') as file:
|
702
781
|
bucket_profile_found = False
|
703
782
|
for line in file:
|
704
783
|
if line.lstrip().startswith('#'): # skip user's comments.
|
705
784
|
continue
|
706
|
-
if line.strip() == f'[{
|
785
|
+
if line.strip() == f'[{rclone_profile}]':
|
707
786
|
bucket_profile_found = True
|
708
787
|
elif bucket_profile_found and line.startswith('region'):
|
709
788
|
return line.split('=')[1].strip()
|
@@ -715,36 +794,45 @@ class Rclone():
|
|
715
794
|
return ''
|
716
795
|
|
717
796
|
@staticmethod
|
718
|
-
def delete_rclone_bucket_profile(bucket_name: str, cloud:
|
719
|
-
"""Deletes specified bucket profile
|
720
|
-
|
721
|
-
|
722
|
-
|
797
|
+
def delete_rclone_bucket_profile(bucket_name: str, cloud: RcloneStores):
|
798
|
+
"""Deletes specified bucket profile from rclone.conf.
|
799
|
+
|
800
|
+
Args:
|
801
|
+
bucket_name: Name of the bucket.
|
802
|
+
cloud: RcloneStores enum representing the cloud provider.
|
803
|
+
"""
|
804
|
+
rclone_profile = cloud.get_profile_name(bucket_name)
|
805
|
+
rclone_config_path = os.path.expanduser(constants.RCLONE_CONFIG_PATH)
|
723
806
|
|
724
807
|
if not os.path.isfile(rclone_config_path):
|
725
|
-
logger.warning(
|
726
|
-
|
727
|
-
f'trying to delete rclone profile: {bucket_rclone_profile}')
|
808
|
+
logger.warning('Failed to locate "rclone.conf" while '
|
809
|
+
f'trying to delete rclone profile: {rclone_profile}')
|
728
810
|
return
|
729
811
|
|
730
812
|
with FileLock(rclone_config_path + '.lock'):
|
731
813
|
profiles_to_keep = Rclone._remove_bucket_profile_rclone(
|
732
814
|
bucket_name, cloud)
|
733
815
|
|
734
|
-
# write back file without profile: [
|
816
|
+
# write back file without profile: [rclone_profile]
|
735
817
|
with open(f'{rclone_config_path}', 'w', encoding='utf-8') as file:
|
736
818
|
file.writelines(profiles_to_keep)
|
737
819
|
|
738
820
|
@staticmethod
|
739
821
|
def _remove_bucket_profile_rclone(bucket_name: str,
|
740
|
-
cloud:
|
741
|
-
"""Returns rclone profiles without
|
742
|
-
|
743
|
-
|
744
|
-
bucket_name
|
745
|
-
|
746
|
-
|
747
|
-
|
822
|
+
cloud: RcloneStores) -> List[str]:
|
823
|
+
"""Returns rclone profiles without ones matching [prefix+bucket_name].
|
824
|
+
|
825
|
+
Args:
|
826
|
+
bucket_name: Name of the bucket.
|
827
|
+
cloud: RcloneStores enum representing the cloud provider.
|
828
|
+
|
829
|
+
Returns:
|
830
|
+
Lines to keep in the rclone config file.
|
831
|
+
"""
|
832
|
+
rclone_profile_name = cloud.get_profile_name(bucket_name)
|
833
|
+
rclone_config_path = os.path.expanduser(constants.RCLONE_CONFIG_PATH)
|
834
|
+
|
835
|
+
with open(rclone_config_path, 'r', encoding='utf-8') as file:
|
748
836
|
lines = file.readlines() # returns a list of the file's lines
|
749
837
|
# delete existing bucket profile matching:
|
750
838
|
# '[profile_prefix+bucket_name]'
|
@@ -757,7 +845,7 @@ class Rclone():
|
|
757
845
|
# keep user comments only if they aren't under
|
758
846
|
# a profile we are discarding
|
759
847
|
lines_to_keep.append(line)
|
760
|
-
elif f'[{
|
848
|
+
elif f'[{rclone_profile_name}]' in line:
|
761
849
|
skip_lines = True
|
762
850
|
elif skip_lines:
|
763
851
|
if '[' in line:
|