skypilot-nightly 1.0.0.dev20251027__py3-none-any.whl → 1.0.0.dev20251101__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of skypilot-nightly might be problematic. Click here for more details.

Files changed (114) hide show
  1. sky/__init__.py +2 -2
  2. sky/adaptors/aws.py +25 -7
  3. sky/adaptors/coreweave.py +278 -0
  4. sky/backends/backend_utils.py +9 -6
  5. sky/backends/cloud_vm_ray_backend.py +2 -3
  6. sky/check.py +25 -13
  7. sky/client/cli/command.py +52 -24
  8. sky/cloud_stores.py +73 -0
  9. sky/clouds/aws.py +59 -11
  10. sky/core.py +7 -5
  11. sky/dashboard/out/404.html +1 -1
  12. sky/dashboard/out/_next/static/{YP5Vc3ROcDnTGta0XAhcs → 8ixeA0NVQJN8HUdijid8b}/_buildManifest.js +1 -1
  13. sky/dashboard/out/_next/static/chunks/{1141-d5204f35a3388bf4.js → 1141-c3c10e2c6ed71a8f.js} +1 -1
  14. sky/dashboard/out/_next/static/chunks/2755.d6dc6d530fed0b61.js +26 -0
  15. sky/dashboard/out/_next/static/chunks/3294.87a13fba0058865b.js +1 -0
  16. sky/dashboard/out/_next/static/chunks/{3785.538eb23a098fc304.js → 3785.170be320e0060eaf.js} +1 -1
  17. sky/dashboard/out/_next/static/chunks/4282-49b2065b7336e496.js +1 -0
  18. sky/dashboard/out/_next/static/chunks/7615-80aa7b09f45a86d2.js +1 -0
  19. sky/dashboard/out/_next/static/chunks/8969-4ed9236db997b42b.js +1 -0
  20. sky/dashboard/out/_next/static/chunks/9360.10a3aac7aad5e3aa.js +31 -0
  21. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-ac4a217f17b087cb.js +16 -0
  22. sky/dashboard/out/_next/static/chunks/pages/clusters/{[cluster]-fbf2907ce2bb67e2.js → [cluster]-1704039ccaf997cf.js} +1 -1
  23. sky/dashboard/out/_next/static/chunks/pages/{jobs-0dc34cf9a8710a9f.js → jobs-7eee823559e5cf9f.js} +1 -1
  24. sky/dashboard/out/_next/static/chunks/pages/{users-96d6b8bb2dec055f.js → users-2b172f13f8538a7a.js} +1 -1
  25. sky/dashboard/out/_next/static/chunks/pages/workspaces/{[name]-fb1b4d3bfb047cad.js → [name]-bbfe5860c93470fd.js} +1 -1
  26. sky/dashboard/out/_next/static/chunks/pages/{workspaces-6fc994fa1ee6c6bf.js → workspaces-1891376c08050940.js} +1 -1
  27. sky/dashboard/out/_next/static/chunks/{webpack-585d805f693dbceb.js → webpack-e38d5319cd10a3a0.js} +1 -1
  28. sky/dashboard/out/_next/static/css/0748ce22df867032.css +3 -0
  29. sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
  30. sky/dashboard/out/clusters/[cluster].html +1 -1
  31. sky/dashboard/out/clusters.html +1 -1
  32. sky/dashboard/out/config.html +1 -1
  33. sky/dashboard/out/index.html +1 -1
  34. sky/dashboard/out/infra/[context].html +1 -1
  35. sky/dashboard/out/infra.html +1 -1
  36. sky/dashboard/out/jobs/[job].html +1 -1
  37. sky/dashboard/out/jobs/pools/[pool].html +1 -1
  38. sky/dashboard/out/jobs.html +1 -1
  39. sky/dashboard/out/users.html +1 -1
  40. sky/dashboard/out/volumes.html +1 -1
  41. sky/dashboard/out/workspace/new.html +1 -1
  42. sky/dashboard/out/workspaces/[name].html +1 -1
  43. sky/dashboard/out/workspaces.html +1 -1
  44. sky/data/data_utils.py +92 -1
  45. sky/data/mounting_utils.py +71 -2
  46. sky/data/storage.py +166 -9
  47. sky/global_user_state.py +14 -18
  48. sky/jobs/constants.py +2 -0
  49. sky/jobs/controller.py +62 -67
  50. sky/jobs/file_content_utils.py +80 -0
  51. sky/jobs/log_gc.py +201 -0
  52. sky/jobs/scheduler.py +15 -2
  53. sky/jobs/server/core.py +85 -13
  54. sky/jobs/server/server.py +14 -13
  55. sky/jobs/server/utils.py +28 -10
  56. sky/jobs/state.py +216 -40
  57. sky/jobs/utils.py +65 -28
  58. sky/metrics/utils.py +18 -0
  59. sky/optimizer.py +1 -1
  60. sky/provision/kubernetes/instance.py +88 -19
  61. sky/provision/kubernetes/volume.py +2 -2
  62. sky/schemas/api/responses.py +3 -5
  63. sky/schemas/db/spot_jobs/004_job_file_contents.py +42 -0
  64. sky/schemas/db/spot_jobs/005_logs_gc.py +38 -0
  65. sky/schemas/generated/managed_jobsv1_pb2.py +39 -35
  66. sky/schemas/generated/managed_jobsv1_pb2.pyi +21 -5
  67. sky/serve/replica_managers.py +2 -2
  68. sky/serve/serve_utils.py +9 -2
  69. sky/serve/server/server.py +8 -7
  70. sky/server/common.py +21 -15
  71. sky/server/constants.py +1 -1
  72. sky/server/daemons.py +23 -17
  73. sky/server/requests/executor.py +7 -3
  74. sky/server/requests/payloads.py +2 -0
  75. sky/server/requests/request_names.py +80 -0
  76. sky/server/requests/requests.py +137 -102
  77. sky/server/requests/serializers/decoders.py +0 -6
  78. sky/server/requests/serializers/encoders.py +33 -6
  79. sky/server/server.py +105 -36
  80. sky/server/stream_utils.py +56 -13
  81. sky/setup_files/dependencies.py +2 -0
  82. sky/skylet/constants.py +6 -1
  83. sky/skylet/events.py +7 -0
  84. sky/skylet/services.py +18 -7
  85. sky/ssh_node_pools/server.py +5 -4
  86. sky/task.py +14 -42
  87. sky/templates/kubernetes-ray.yml.j2 +1 -1
  88. sky/templates/nebius-ray.yml.j2 +1 -0
  89. sky/templates/websocket_proxy.py +140 -12
  90. sky/users/permission.py +4 -1
  91. sky/utils/cli_utils/status_utils.py +8 -2
  92. sky/utils/context_utils.py +13 -1
  93. sky/utils/db/migration_utils.py +1 -1
  94. sky/utils/resource_checker.py +4 -1
  95. sky/utils/resources_utils.py +53 -29
  96. sky/utils/schemas.py +23 -4
  97. sky/volumes/server/server.py +4 -3
  98. sky/workspaces/server.py +7 -6
  99. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/METADATA +53 -37
  100. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/RECORD +106 -100
  101. sky/dashboard/out/_next/static/chunks/2755.227c84f5adf75c6b.js +0 -26
  102. sky/dashboard/out/_next/static/chunks/3015-2dcace420c8939f4.js +0 -1
  103. sky/dashboard/out/_next/static/chunks/3294.6d5054a953a818cb.js +0 -1
  104. sky/dashboard/out/_next/static/chunks/4282-d2f3ef2fbf78e347.js +0 -1
  105. sky/dashboard/out/_next/static/chunks/8969-0389e2cb52412db3.js +0 -1
  106. sky/dashboard/out/_next/static/chunks/9360.07d78b8552bc9d17.js +0 -31
  107. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-c815b90e296b8075.js +0 -16
  108. sky/dashboard/out/_next/static/css/4c052b4444e52a58.css +0 -3
  109. /sky/dashboard/out/_next/static/{YP5Vc3ROcDnTGta0XAhcs → 8ixeA0NVQJN8HUdijid8b}/_ssgManifest.js +0 -0
  110. /sky/dashboard/out/_next/static/chunks/pages/{_app-513d332313670f2a.js → _app-bde01e4a2beec258.js} +0 -0
  111. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/WHEEL +0 -0
  112. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/entry_points.txt +0 -0
  113. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/licenses/LICENSE +0 -0
  114. {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/top_level.txt +0 -0
sky/data/data_utils.py CHANGED
@@ -19,6 +19,7 @@ from sky import sky_logging
19
19
  from sky.adaptors import aws
20
20
  from sky.adaptors import azure
21
21
  from sky.adaptors import cloudflare
22
+ from sky.adaptors import coreweave
22
23
  from sky.adaptors import gcp
23
24
  from sky.adaptors import ibm
24
25
  from sky.adaptors import nebius
@@ -625,6 +626,7 @@ class Rclone:
625
626
  R2 = 'R2'
626
627
  AZURE = 'AZURE'
627
628
  NEBIUS = 'NEBIUS'
629
+ COREWEAVE = 'COREWEAVE'
628
630
 
629
631
  def get_profile_name(self, bucket_name: str) -> str:
630
632
  """Gets the Rclone profile name for a given bucket.
@@ -642,7 +644,8 @@ class Rclone:
642
644
  Rclone.RcloneStores.IBM: 'sky-ibm',
643
645
  Rclone.RcloneStores.R2: 'sky-r2',
644
646
  Rclone.RcloneStores.AZURE: 'sky-azure',
645
- Rclone.RcloneStores.NEBIUS: 'sky-nebius'
647
+ Rclone.RcloneStores.NEBIUS: 'sky-nebius',
648
+ Rclone.RcloneStores.COREWEAVE: 'sky-coreweave'
646
649
  }
647
650
  return f'{profile_prefix[self]}-{bucket_name}'
648
651
 
@@ -748,6 +751,25 @@ class Rclone:
748
751
  endpoint = {endpoint_url}
749
752
  acl = private
750
753
  """)
754
+ elif self is Rclone.RcloneStores.COREWEAVE:
755
+ coreweave_session = coreweave.session()
756
+ coreweave_credentials = coreweave.get_coreweave_credentials(
757
+ coreweave_session)
758
+ # Get endpoint URL from the client
759
+ endpoint_url = coreweave.get_endpoint()
760
+ access_key_id = coreweave_credentials.access_key
761
+ secret_access_key = coreweave_credentials.secret_key
762
+ config = textwrap.dedent(f"""\
763
+ [{rclone_profile_name}]
764
+ type = s3
765
+ provider = Other
766
+ access_key_id = {access_key_id}
767
+ secret_access_key = {secret_access_key}
768
+ endpoint = {endpoint_url}
769
+ region = auto
770
+ acl = private
771
+ force_path_style = false
772
+ """)
751
773
  else:
752
774
  with ux_utils.print_exception_no_traceback():
753
775
  raise NotImplementedError(
@@ -908,3 +930,72 @@ def split_oci_path(oci_path: str) -> Tuple[str, str]:
908
930
  bucket = path_parts.pop(0)
909
931
  key = '/'.join(path_parts)
910
932
  return bucket, key
933
+
934
+
935
+ def create_coreweave_client() -> Client:
936
+ """Create CoreWeave S3 client."""
937
+ return coreweave.client('s3')
938
+
939
+
940
+ def split_coreweave_path(coreweave_path: str) -> Tuple[str, str]:
941
+ """Splits CoreWeave Path into Bucket name and Relative Path to Bucket
942
+
943
+ Args:
944
+ coreweave_path: str; CoreWeave Path, e.g. cw://imagenet/train/
945
+ """
946
+ path_parts = coreweave_path.replace('cw://', '').split('/')
947
+ bucket = path_parts.pop(0)
948
+ key = '/'.join(path_parts)
949
+ return bucket, key
950
+
951
+
952
+ def verify_coreweave_bucket(name: str, retry: int = 0) -> bool:
953
+ """Verify CoreWeave bucket exists and is accessible.
954
+
955
+ Retries head_bucket operation up to retry times with 5 second intervals
956
+ to handle DNS propagation delays or temporary connectivity issues.
957
+ """
958
+ coreweave_client = create_coreweave_client()
959
+ max_retries = retry + 1 # 5s * (retry+1) = total seconds to retry
960
+ retry_count = 0
961
+
962
+ while retry_count < max_retries:
963
+ try:
964
+ coreweave_client.head_bucket(Bucket=name)
965
+ if retry_count > 0:
966
+ logger.debug(
967
+ f'Successfully verified bucket {name} after '
968
+ f'{retry_count} retries ({retry_count * 5} seconds)')
969
+ return True
970
+
971
+ except coreweave.botocore.exceptions.ClientError as e: # type: ignore[union-attr] # pylint: disable=line-too-long:
972
+ error_code = e.response['Error']['Code']
973
+ if error_code == '403':
974
+ logger.error(f'Access denied to bucket {name}')
975
+ return False
976
+ elif error_code == '404':
977
+ logger.debug(f'Bucket {name} does not exist')
978
+ else:
979
+ logger.debug(
980
+ f'Unexpected error checking CoreWeave bucket {name}: {e}')
981
+ except Exception as e: # pylint: disable=broad-except
982
+ logger.debug(
983
+ f'Unexpected error checking CoreWeave bucket {name}: {e}')
984
+
985
+ # Common retry logic for all transient errors
986
+ retry_count += 1
987
+ if retry_count < max_retries:
988
+ logger.debug(f'Error checking CoreWeave bucket {name} '
989
+ f'(attempt {retry_count}/{max_retries}). '
990
+ f'Retrying in 5 seconds...')
991
+ time.sleep(5)
992
+ else:
993
+ attempt_str = 'attempt'
994
+ if max_retries > 1:
995
+ attempt_str += 's'
996
+ logger.error(f'Failed to verify CoreWeave bucket {name} after '
997
+ f'{max_retries} {attempt_str}.')
998
+ return False
999
+
1000
+ # Should not reach here, but just in case
1001
+ return False
@@ -148,6 +148,45 @@ def get_nebius_mount_cmd(nebius_profile_name: str,
148
148
  return mount_cmd
149
149
 
150
150
 
151
+ def get_coreweave_mount_cmd(cw_credentials_path: str,
152
+ coreweave_profile_name: str,
153
+ bucket_name: str,
154
+ endpoint_url: str,
155
+ mount_path: str,
156
+ _bucket_sub_path: Optional[str] = None) -> str:
157
+ """Returns a command to mount CoreWeave bucket"""
158
+ if _bucket_sub_path is None:
159
+ _bucket_sub_path = ''
160
+ else:
161
+ _bucket_sub_path = f':{_bucket_sub_path}'
162
+
163
+ # Use rclone for ARM64 architectures since goofys doesn't support them
164
+ arch_check = 'ARCH=$(uname -m) && '
165
+ rclone_mount = (
166
+ f'{FUSERMOUNT3_SOFT_LINK_CMD} && '
167
+ f'AWS_SHARED_CREDENTIALS_FILE={cw_credentials_path} '
168
+ f'AWS_PROFILE={coreweave_profile_name} '
169
+ f'rclone mount :s3:{bucket_name}{_bucket_sub_path} {mount_path} '
170
+ f'--s3-force-path-style=false '
171
+ f'--s3-endpoint {endpoint_url} --daemon --allow-other')
172
+ goofys_mount = (f'AWS_SHARED_CREDENTIALS_FILE={cw_credentials_path} '
173
+ f'AWS_PROFILE={coreweave_profile_name} {_GOOFYS_WRAPPER} '
174
+ '-o allow_other '
175
+ f'--stat-cache-ttl {_STAT_CACHE_TTL} '
176
+ f'--type-cache-ttl {_TYPE_CACHE_TTL} '
177
+ f'--subdomain '
178
+ f'--endpoint {endpoint_url} '
179
+ f'{bucket_name}{_bucket_sub_path} {mount_path}')
180
+
181
+ mount_cmd = (f'{arch_check}'
182
+ f'if [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then '
183
+ f' {rclone_mount}; '
184
+ f'else '
185
+ f' {goofys_mount}; '
186
+ f'fi')
187
+ return mount_cmd
188
+
189
+
151
190
  def get_gcs_mount_install_cmd() -> str:
152
191
  """Returns a command to install GCS mount utility gcsfuse."""
153
192
  install_cmd = ('ARCH=$(uname -m) && '
@@ -194,9 +233,18 @@ def get_az_mount_install_cmd() -> str:
194
233
  # Try to install fuse3 from default repos
195
234
  'sudo apt-get update && '
196
235
  'FUSE3_INSTALLED=0 && '
236
+ # On Kubernetes, if FUSERMOUNT_SHARED_DIR is set, it means
237
+ # fusermount and fusermount3 is symlinked to fusermount-shim.
238
+ # If we reinstall fuse3, it may overwrite the symlink, so
239
+ # just install libfuse3, which is needed by blobfuse2.
240
+ 'if [ -n "${FUSERMOUNT_SHARED_DIR:-}" ]; then '
241
+ ' PACKAGES="libfuse3-3 libfuse3-dev"; '
242
+ 'else '
243
+ ' PACKAGES="fuse3 libfuse3-3 libfuse3-dev"; '
244
+ 'fi && '
197
245
  'if sudo apt-get install -y '
198
246
  '-o Dpkg::Options::="--force-confdef" '
199
- 'fuse3 libfuse3-dev; then '
247
+ '$PACKAGES; then '
200
248
  ' FUSE3_INSTALLED=1; '
201
249
  ' echo "fuse3 installed from default repos"; '
202
250
  'else '
@@ -217,7 +265,7 @@ def get_az_mount_install_cmd() -> str:
217
265
  ' if sudo apt-get install -y '
218
266
  '-o Dpkg::Options::="--force-confdef" '
219
267
  '-o Dpkg::Options::="--force-confold" '
220
- 'fuse3 libfuse3-3 libfuse3-dev; then '
268
+ '$PACKAGES; then '
221
269
  ' FUSE3_INSTALLED=1; '
222
270
  ' echo "fuse3 installed from focal"; '
223
271
  ' sudo rm /etc/apt/sources.list.d/focal-fuse3.list; '
@@ -564,7 +612,28 @@ def get_mounting_script(
564
612
  fi
565
613
  fi
566
614
  echo "Mounting $SOURCE_BUCKET to $MOUNT_PATH with $MOUNT_BINARY..."
615
+ set +e
567
616
  {mount_cmd}
617
+ MOUNT_EXIT_CODE=$?
618
+ set -e
619
+ if [ $MOUNT_EXIT_CODE -ne 0 ]; then
620
+ echo "Mount failed with exit code $MOUNT_EXIT_CODE."
621
+ if [ "$MOUNT_BINARY" = "goofys" ]; then
622
+ echo "Looking for goofys log files..."
623
+ # Find goofys log files in /tmp (created by mktemp -t goofys.XXXX.log)
624
+ # Note: if /dev/log exists, goofys logs to syslog instead of a file
625
+ GOOFYS_LOGS=$(ls -t /tmp/goofys.*.log 2>/dev/null | head -1)
626
+ if [ -n "$GOOFYS_LOGS" ]; then
627
+ echo "=== Goofys log file contents ==="
628
+ cat "$GOOFYS_LOGS"
629
+ echo "=== End of goofys log file ==="
630
+ else
631
+ echo "No goofys log file found in /tmp"
632
+ fi
633
+ fi
634
+ # TODO(kevin): Print logs from rclone, etc too for observability.
635
+ exit $MOUNT_EXIT_CODE
636
+ fi
568
637
  echo "Mounting done."
569
638
  """)
570
639
 
sky/data/storage.py CHANGED
@@ -23,6 +23,7 @@ from sky import skypilot_config
23
23
  from sky.adaptors import aws
24
24
  from sky.adaptors import azure
25
25
  from sky.adaptors import cloudflare
26
+ from sky.adaptors import coreweave
26
27
  from sky.adaptors import gcp
27
28
  from sky.adaptors import ibm
28
29
  from sky.adaptors import nebius
@@ -62,6 +63,7 @@ STORE_ENABLED_CLOUDS: List[str] = [
62
63
  str(clouds.OCI()),
63
64
  str(clouds.Nebius()),
64
65
  cloudflare.NAME,
66
+ coreweave.NAME,
65
67
  ]
66
68
 
67
69
  # Maximum number of concurrent rsync upload processes
@@ -93,6 +95,12 @@ def get_cached_enabled_storage_cloud_names_or_refresh(
93
95
  r2_is_enabled, _ = cloudflare.check_storage_credentials()
94
96
  if r2_is_enabled:
95
97
  enabled_clouds.append(cloudflare.NAME)
98
+
99
+ # Similarly, handle CoreWeave storage credentials
100
+ coreweave_is_enabled, _ = coreweave.check_storage_credentials()
101
+ if coreweave_is_enabled:
102
+ enabled_clouds.append(coreweave.NAME)
103
+
96
104
  if raise_if_no_cloud_access and not enabled_clouds:
97
105
  raise exceptions.NoCloudAccessError(
98
106
  'No cloud access available for storage. '
@@ -126,6 +134,7 @@ class StoreType(enum.Enum):
126
134
  IBM = 'IBM'
127
135
  OCI = 'OCI'
128
136
  NEBIUS = 'NEBIUS'
137
+ COREWEAVE = 'COREWEAVE'
129
138
  VOLUME = 'VOLUME'
130
139
 
131
140
  @classmethod
@@ -883,7 +892,7 @@ class Storage(object):
883
892
  f'{source} in the file_mounts section of your YAML')
884
893
  is_local_source = True
885
894
  elif split_path.scheme in [
886
- 's3', 'gs', 'https', 'r2', 'cos', 'oci', 'nebius'
895
+ 's3', 'gs', 'https', 'r2', 'cos', 'oci', 'nebius', 'cw'
887
896
  ]:
888
897
  is_local_source = False
889
898
  # Storage mounting does not support mounting specific files from
@@ -908,7 +917,8 @@ class Storage(object):
908
917
  with ux_utils.print_exception_no_traceback():
909
918
  raise exceptions.StorageSourceError(
910
919
  f'Supported paths: local, s3://, gs://, https://, '
911
- f'r2://, cos://, oci://, nebius://. Got: {source}')
920
+ f'r2://, cos://, oci://, nebius://, cw://. '
921
+ f'Got: {source}')
912
922
  return source, is_local_source
913
923
 
914
924
  def _validate_storage_spec(self, name: Optional[str]) -> None:
@@ -923,7 +933,16 @@ class Storage(object):
923
933
  """
924
934
  prefix = name.split('://')[0]
925
935
  prefix = prefix.lower()
926
- if prefix in ['s3', 'gs', 'https', 'r2', 'cos', 'oci', 'nebius']:
936
+ if prefix in [
937
+ 's3',
938
+ 'gs',
939
+ 'https',
940
+ 'r2',
941
+ 'cos',
942
+ 'oci',
943
+ 'nebius',
944
+ 'cw',
945
+ ]:
927
946
  with ux_utils.print_exception_no_traceback():
928
947
  raise exceptions.StorageNameError(
929
948
  'Prefix detected: `name` cannot start with '
@@ -1062,6 +1081,12 @@ class Storage(object):
1062
1081
  source=self.source,
1063
1082
  sync_on_reconstruction=self.sync_on_reconstruction,
1064
1083
  _bucket_sub_path=self._bucket_sub_path)
1084
+ elif s_type == StoreType.COREWEAVE:
1085
+ store = CoreWeaveStore.from_metadata(
1086
+ s_metadata,
1087
+ source=self.source,
1088
+ sync_on_reconstruction=self.sync_on_reconstruction,
1089
+ _bucket_sub_path=self._bucket_sub_path)
1065
1090
  else:
1066
1091
  with ux_utils.print_exception_no_traceback():
1067
1092
  raise ValueError(f'Unknown store type: {s_type}')
@@ -1417,6 +1442,7 @@ class S3CompatibleConfig:
1417
1442
  aws_profile: Optional[str] = None
1418
1443
  get_endpoint_url: Optional[Callable[[], str]] = None
1419
1444
  credentials_file: Optional[str] = None
1445
+ config_file: Optional[str] = None
1420
1446
  extra_cli_args: Optional[List[str]] = None
1421
1447
 
1422
1448
  # Provider-specific settings
@@ -1437,8 +1463,8 @@ class S3CompatibleStore(AbstractStore):
1437
1463
  """Base class for S3-compatible object storage providers.
1438
1464
 
1439
1465
  This class provides a unified interface for all S3-compatible storage
1440
- providers (AWS S3, Cloudflare R2, Nebius, MinIO, etc.) by leveraging
1441
- a configuration-driven approach that eliminates code duplication.
1466
+ providers (AWS S3, Cloudflare R2, Nebius, MinIO, CoreWeave, etc.) by
1467
+ leveraging a configuration-driven approach that eliminates code duplication
1442
1468
 
1443
1469
  ## Adding a New S3-Compatible Store
1444
1470
 
@@ -1864,6 +1890,9 @@ class S3CompatibleStore(AbstractStore):
1864
1890
  if self.config.credentials_file:
1865
1891
  cmd = 'AWS_SHARED_CREDENTIALS_FILE=' + \
1866
1892
  f'{self.config.credentials_file} {cmd}'
1893
+ if self.config.config_file:
1894
+ cmd = 'AWS_CONFIG_FILE=' + \
1895
+ f'{self.config.config_file} {cmd}'
1867
1896
 
1868
1897
  return cmd
1869
1898
 
@@ -1909,6 +1938,9 @@ class S3CompatibleStore(AbstractStore):
1909
1938
  if self.config.credentials_file:
1910
1939
  cmd = 'AWS_SHARED_CREDENTIALS_FILE=' + \
1911
1940
  f'{self.config.credentials_file} {cmd}'
1941
+ if self.config.config_file:
1942
+ cmd = 'AWS_CONFIG_FILE=' + \
1943
+ f'{self.config.config_file} {cmd}'
1912
1944
 
1913
1945
  return cmd
1914
1946
 
@@ -1962,6 +1994,9 @@ class S3CompatibleStore(AbstractStore):
1962
1994
  if self.config.credentials_file:
1963
1995
  command = (f'AWS_SHARED_CREDENTIALS_FILE='
1964
1996
  f'{self.config.credentials_file} {command}')
1997
+ if self.config.config_file:
1998
+ command = 'AWS_CONFIG_FILE=' + \
1999
+ f'{self.config.config_file} {command}'
1965
2000
  with ux_utils.print_exception_no_traceback():
1966
2001
  raise exceptions.StorageBucketGetError(
1967
2002
  _BUCKET_FAIL_TO_CONNECT_MESSAGE.format(name=self.name) +
@@ -2034,7 +2069,9 @@ class S3CompatibleStore(AbstractStore):
2034
2069
  remove_command = (f'AWS_SHARED_CREDENTIALS_FILE='
2035
2070
  f'{self.config.credentials_file} '
2036
2071
  f'{remove_command}')
2037
-
2072
+ if self.config.config_file:
2073
+ remove_command = 'AWS_CONFIG_FILE=' + \
2074
+ f'{self.config.config_file} {remove_command}'
2038
2075
  return self._execute_remove_command(
2039
2076
  remove_command, bucket_name,
2040
2077
  f'Deleting {self.config.store_type} bucket {bucket_name}',
@@ -2047,8 +2084,9 @@ class S3CompatibleStore(AbstractStore):
2047
2084
  try:
2048
2085
  with rich_utils.safe_status(
2049
2086
  ux_utils.spinner_message(hint_operating)):
2050
- subprocess.check_output(command.split(' '),
2051
- stderr=subprocess.STDOUT)
2087
+ subprocess.check_output(command,
2088
+ stderr=subprocess.STDOUT,
2089
+ shell=True)
2052
2090
  except subprocess.CalledProcessError as e:
2053
2091
  if 'NoSuchBucket' in e.output.decode('utf-8'):
2054
2092
  logger.debug(
@@ -2091,7 +2129,9 @@ class S3CompatibleStore(AbstractStore):
2091
2129
  remove_command = (f'AWS_SHARED_CREDENTIALS_FILE='
2092
2130
  f'{self.config.credentials_file} '
2093
2131
  f'{remove_command}')
2094
-
2132
+ if self.config.config_file:
2133
+ remove_command = 'AWS_CONFIG_FILE=' + \
2134
+ f'{self.config.config_file} {remove_command}'
2095
2135
  return self._execute_remove_command(
2096
2136
  remove_command, bucket_name,
2097
2137
  (f'Removing objects from {self.config.store_type} bucket '
@@ -2168,6 +2208,10 @@ class GcsStore(AbstractStore):
2168
2208
  elif self.source.startswith('oci://'):
2169
2209
  raise NotImplementedError(
2170
2210
  'Moving data from OCI to GCS is currently not supported.')
2211
+ elif self.source.startswith('cw://'):
2212
+ raise NotImplementedError(
2213
+ 'Moving data from CoreWeave Object Storage to GCS is'
2214
+ ' currently not supported.')
2171
2215
  # Validate name
2172
2216
  self.name = self.validate_name(self.name)
2173
2217
  # Check if the storage is enabled
@@ -2783,6 +2827,10 @@ class AzureBlobStore(AbstractStore):
2783
2827
  elif self.source.startswith('oci://'):
2784
2828
  raise NotImplementedError(
2785
2829
  'Moving data from OCI to AZureBlob is not supported.')
2830
+ elif self.source.startswith('cw://'):
2831
+ raise NotImplementedError(
2832
+ 'Moving data from CoreWeave Object Storage to AzureBlob is'
2833
+ ' currently not supported.')
2786
2834
  # Validate name
2787
2835
  self.name = self.validate_name(self.name)
2788
2836
 
@@ -3154,6 +3202,8 @@ class AzureBlobStore(AbstractStore):
3154
3202
  raise NotImplementedError(error_message.format('OCI'))
3155
3203
  elif self.source.startswith('nebius://'):
3156
3204
  raise NotImplementedError(error_message.format('NEBIUS'))
3205
+ elif self.source.startswith('cw://'):
3206
+ raise NotImplementedError(error_message.format('CoreWeave'))
3157
3207
  else:
3158
3208
  self.batch_az_blob_sync([self.source])
3159
3209
  except exceptions.StorageUploadError:
@@ -3572,6 +3622,10 @@ class IBMCosStore(AbstractStore):
3572
3622
  assert self.name == data_utils.split_cos_path(self.source)[0], (
3573
3623
  'COS Bucket is specified as path, the name should be '
3574
3624
  'the same as COS bucket.')
3625
+ elif self.source.startswith('cw://'):
3626
+ raise NotImplementedError(
3627
+ 'Moving data from CoreWeave Object Storage to COS is '
3628
+ 'currently not supported.')
3575
3629
  # Validate name
3576
3630
  self.name = IBMCosStore.validate_name(self.name)
3577
3631
 
@@ -3670,6 +3724,9 @@ class IBMCosStore(AbstractStore):
3670
3724
  elif self.source.startswith('r2://'):
3671
3725
  raise Exception('IBM COS currently not supporting'
3672
3726
  'data transfers between COS and r2')
3727
+ elif self.source.startswith('cw://'):
3728
+ raise Exception('IBM COS currently not supporting'
3729
+ 'data transfers between COS and CoreWeave')
3673
3730
  else:
3674
3731
  self.batch_ibm_rsync([self.source])
3675
3732
 
@@ -4595,3 +4652,103 @@ class NebiusStore(S3CompatibleStore):
4595
4652
  rclone_config, rclone_profile_name, self.bucket.name, mount_path)
4596
4653
  return mounting_utils.get_mounting_command(mount_path, install_cmd,
4597
4654
  mount_cached_cmd)
4655
+
4656
+
4657
+ @register_s3_compatible_store
4658
+ class CoreWeaveStore(S3CompatibleStore):
4659
+ """CoreWeaveStore inherits from S3CompatibleStore and represents the backend
4660
+ for CoreWeave Object Storage buckets.
4661
+ """
4662
+
4663
+ @classmethod
4664
+ def get_config(cls) -> S3CompatibleConfig:
4665
+ """Return the configuration for CoreWeave Object Storage."""
4666
+ return S3CompatibleConfig(
4667
+ store_type='COREWEAVE',
4668
+ url_prefix='cw://',
4669
+ client_factory=lambda region: data_utils.create_coreweave_client(),
4670
+ resource_factory=lambda name: coreweave.resource('s3').Bucket(name),
4671
+ split_path=data_utils.split_coreweave_path,
4672
+ verify_bucket=data_utils.verify_coreweave_bucket,
4673
+ aws_profile=coreweave.COREWEAVE_PROFILE_NAME,
4674
+ get_endpoint_url=coreweave.get_endpoint,
4675
+ credentials_file=coreweave.COREWEAVE_CREDENTIALS_PATH,
4676
+ config_file=coreweave.COREWEAVE_CONFIG_PATH,
4677
+ cloud_name=coreweave.NAME,
4678
+ default_region=coreweave.DEFAULT_REGION,
4679
+ mount_cmd_factory=cls._get_coreweave_mount_cmd,
4680
+ )
4681
+
4682
+ def _get_bucket(self) -> Tuple[StorageHandle, bool]:
4683
+ """Get or create bucket using CoreWeave's S3 API"""
4684
+ bucket = self.config.resource_factory(self.name)
4685
+
4686
+ # Use our custom bucket verification instead of head_bucket
4687
+ if data_utils.verify_coreweave_bucket(self.name):
4688
+ self._validate_existing_bucket()
4689
+ return bucket, False
4690
+
4691
+ # TODO(hailong): Enable the bucket creation for CoreWeave
4692
+ # Disable this to avoid waiting too long until the following
4693
+ # issue is resolved:
4694
+ # https://github.com/skypilot-org/skypilot/issues/7736
4695
+ raise exceptions.StorageBucketGetError(
4696
+ f'Bucket {self.name!r} does not exist. CoreWeave buckets can take'
4697
+ ' a long time to become accessible after creation, so SkyPilot'
4698
+ ' does not create them automatically. Please create the bucket'
4699
+ ' manually in CoreWeave and wait for it to be accessible before'
4700
+ ' using it.')
4701
+
4702
+ # # Check if this is a source with URL prefix (existing bucket case)
4703
+ # if isinstance(self.source, str) and self.source.startswith(
4704
+ # self.config.url_prefix):
4705
+ # with ux_utils.print_exception_no_traceback():
4706
+ # raise exceptions.StorageBucketGetError(
4707
+ # 'Attempted to use a non-existent bucket as a source: '
4708
+ # f'{self.source}.')
4709
+
4710
+ # # If bucket cannot be found, create it if needed
4711
+ # if self.sync_on_reconstruction:
4712
+ # bucket = self._create_bucket(self.name)
4713
+ # return bucket, True
4714
+ # else:
4715
+ # raise exceptions.StorageExternalDeletionError(
4716
+ # 'Attempted to fetch a non-existent bucket: '
4717
+ # f'{self.name}')
4718
+
4719
+ @classmethod
4720
+ def _get_coreweave_mount_cmd(cls, bucket_name: str, mount_path: str,
4721
+ bucket_sub_path: Optional[str]) -> str:
4722
+ """Factory method for CoreWeave mount command."""
4723
+ endpoint_url = coreweave.get_endpoint()
4724
+ return mounting_utils.get_coreweave_mount_cmd(
4725
+ coreweave.COREWEAVE_CREDENTIALS_PATH,
4726
+ coreweave.COREWEAVE_PROFILE_NAME, bucket_name, endpoint_url,
4727
+ mount_path, bucket_sub_path)
4728
+
4729
+ def mount_cached_command(self, mount_path: str) -> str:
4730
+ """CoreWeave-specific cached mount implementation using rclone."""
4731
+ install_cmd = mounting_utils.get_rclone_install_cmd()
4732
+ rclone_profile_name = (
4733
+ data_utils.Rclone.RcloneStores.COREWEAVE.get_profile_name(
4734
+ self.name))
4735
+ rclone_config = data_utils.Rclone.RcloneStores.COREWEAVE.get_config(
4736
+ rclone_profile_name=rclone_profile_name)
4737
+ mount_cached_cmd = mounting_utils.get_mount_cached_cmd(
4738
+ rclone_config, rclone_profile_name, self.bucket.name, mount_path)
4739
+ return mounting_utils.get_mounting_command(mount_path, install_cmd,
4740
+ mount_cached_cmd)
4741
+
4742
+ def _create_bucket(self, bucket_name: str) -> StorageHandle:
4743
+ """Create bucket using S3 API with timing handling for CoreWeave."""
4744
+ result = super()._create_bucket(bucket_name)
4745
+ # Ensure bucket is created
4746
+ # The newly created bucket ever takes about 18min to be accessible,
4747
+ # here we just retry for 36 times (5s * 36 = 180s) to avoid waiting
4748
+ # too long
4749
+ # TODO(hailong): Update the logic here when the following
4750
+ # issue is resolved:
4751
+ # https://github.com/skypilot-org/skypilot/issues/7736
4752
+ data_utils.verify_coreweave_bucket(bucket_name, retry=36)
4753
+
4754
+ return result
sky/global_user_state.py CHANGED
@@ -1605,7 +1605,6 @@ def get_cluster_from_name(
1605
1605
  cluster_table.c.owner,
1606
1606
  cluster_table.c.metadata,
1607
1607
  cluster_table.c.cluster_hash,
1608
- cluster_table.c.storage_mounts_metadata,
1609
1608
  cluster_table.c.cluster_ever_up,
1610
1609
  cluster_table.c.status_updated_at,
1611
1610
  cluster_table.c.user_hash,
@@ -1642,8 +1641,6 @@ def get_cluster_from_name(
1642
1641
  'owner': _load_owner(row.owner),
1643
1642
  'metadata': json.loads(row.metadata),
1644
1643
  'cluster_hash': row.cluster_hash,
1645
- 'storage_mounts_metadata': _load_storage_mounts_metadata(
1646
- row.storage_mounts_metadata),
1647
1644
  'cluster_ever_up': bool(row.cluster_ever_up),
1648
1645
  'status_updated_at': row.status_updated_at,
1649
1646
  'workspace': row.workspace,
@@ -1704,27 +1701,27 @@ def get_clusters(
1704
1701
  cluster_table.c.name,
1705
1702
  cluster_table.c.launched_at,
1706
1703
  cluster_table.c.handle,
1707
- cluster_table.c.last_use,
1708
1704
  cluster_table.c.status,
1709
1705
  cluster_table.c.autostop,
1710
1706
  cluster_table.c.to_down,
1711
- cluster_table.c.owner,
1712
- cluster_table.c.metadata,
1713
1707
  cluster_table.c.cluster_hash,
1714
- cluster_table.c.storage_mounts_metadata,
1715
1708
  cluster_table.c.cluster_ever_up,
1716
- cluster_table.c.status_updated_at,
1717
1709
  cluster_table.c.user_hash,
1718
- cluster_table.c.config_hash,
1719
1710
  cluster_table.c.workspace,
1720
- cluster_table.c.is_managed,
1721
1711
  user_table.c.name.label('user_name'),
1722
1712
  ]
1723
1713
  if not summary_response:
1724
1714
  query_fields.extend([
1725
1715
  cluster_table.c.last_creation_yaml,
1726
1716
  cluster_table.c.last_creation_command,
1717
+ cluster_table.c.config_hash,
1718
+ cluster_table.c.owner,
1719
+ cluster_table.c.metadata,
1720
+ cluster_table.c.last_use,
1721
+ cluster_table.c.status_updated_at,
1727
1722
  ])
1723
+ if not exclude_managed_clusters:
1724
+ query_fields.append(cluster_table.c.is_managed)
1728
1725
  with orm.Session(_SQLALCHEMY_ENGINE) as session:
1729
1726
  query = session.query(*query_fields).outerjoin(
1730
1727
  user_table, cluster_table.c.user_hash == user_table.c.id)
@@ -1771,30 +1768,29 @@ def get_clusters(
1771
1768
  'name': row.name,
1772
1769
  'launched_at': row.launched_at,
1773
1770
  'handle': pickle.loads(row.handle),
1774
- 'last_use': row.last_use,
1775
1771
  'status': status_lib.ClusterStatus[row.status],
1776
1772
  'autostop': row.autostop,
1777
1773
  'to_down': bool(row.to_down),
1778
- 'owner': _load_owner(row.owner),
1779
- 'metadata': json.loads(row.metadata),
1780
1774
  'cluster_hash': row.cluster_hash,
1781
- 'storage_mounts_metadata': _load_storage_mounts_metadata(
1782
- row.storage_mounts_metadata),
1783
1775
  'cluster_ever_up': bool(row.cluster_ever_up),
1784
- 'status_updated_at': row.status_updated_at,
1785
1776
  'user_hash': (row.user_hash
1786
1777
  if row.user_hash is not None else current_user_hash),
1787
1778
  'user_name': (row.user_name
1788
1779
  if row.user_name is not None else current_user_name),
1789
1780
  'workspace': row.workspace,
1790
- 'is_managed': bool(row.is_managed),
1791
- 'config_hash': row.config_hash,
1781
+ 'is_managed': False
1782
+ if exclude_managed_clusters else bool(row.is_managed),
1792
1783
  }
1793
1784
  if not summary_response:
1794
1785
  record['last_creation_yaml'] = row.last_creation_yaml
1795
1786
  record['last_creation_command'] = row.last_creation_command
1796
1787
  record['last_event'] = last_cluster_event_dict.get(
1797
1788
  row.cluster_hash, None)
1789
+ record['config_hash'] = row.config_hash
1790
+ record['owner'] = _load_owner(row.owner)
1791
+ record['metadata'] = json.loads(row.metadata)
1792
+ record['last_use'] = row.last_use
1793
+ record['status_updated_at'] = row.status_updated_at
1798
1794
 
1799
1795
  records.append(record)
1800
1796
  return records
sky/jobs/constants.py CHANGED
@@ -46,6 +46,8 @@ JOBS_CLUSTER_NAME_PREFIX_LENGTH = 25
46
46
  # The version of the lib files that jobs/utils use. Whenever there is an API
47
47
  # change for the jobs/utils, we need to bump this version and update
48
48
  # job.utils.ManagedJobCodeGen to handle the version update.
49
+ # WARNING: If you update this due to a codegen change, make sure to make the
50
+ # corresponding change in the ManagedJobsService AND bump the SKYLET_VERSION.
49
51
  MANAGED_JOBS_VERSION = 12
50
52
 
51
53
  # The command for setting up the jobs dashboard on the controller. It firstly