skypilot-nightly 1.0.0.dev20250527__py3-none-any.whl → 1.0.0.dev20250528__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. sky/__init__.py +2 -2
  2. sky/adaptors/kubernetes.py +13 -1
  3. sky/backends/cloud_vm_ray_backend.py +2 -2
  4. sky/check.py +4 -1
  5. sky/clouds/kubernetes.py +2 -2
  6. sky/dashboard/out/404.html +1 -1
  7. sky/dashboard/out/_next/static/Mx1iAbDQn1jMHh3UHmK3R/_buildManifest.js +1 -0
  8. sky/dashboard/out/_next/static/chunks/121-8f55ee3fa6301784.js +20 -0
  9. sky/dashboard/out/_next/static/chunks/{573-82bd40a37af834f1.js → 173-7db8607cefc20f70.js} +5 -5
  10. sky/dashboard/out/_next/static/chunks/236-d6900c828331f664.js +6 -0
  11. sky/dashboard/out/_next/static/chunks/293-351268365226d251.js +1 -0
  12. sky/dashboard/out/_next/static/chunks/470-4d003c441839094d.js +1 -0
  13. sky/dashboard/out/_next/static/chunks/578-9146658cead92981.js +6 -0
  14. sky/dashboard/out/_next/static/chunks/843-256ec920f6d5f41f.js +11 -0
  15. sky/dashboard/out/_next/static/chunks/856-62b87c68917b08ed.js +1 -0
  16. sky/dashboard/out/_next/static/chunks/973-1a09cac61cfcc1e1.js +1 -0
  17. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-159bffb2fa34ed54.js +6 -0
  18. sky/dashboard/out/_next/static/chunks/pages/clusters/{[cluster]-e23fcddf60578a0d.js → [cluster]-9506c00257d10dbd.js} +1 -1
  19. sky/dashboard/out/_next/static/chunks/pages/{clusters-8afda8efa5b74997.js → clusters-943992b84fd6f4ee.js} +1 -1
  20. sky/dashboard/out/_next/static/chunks/pages/config-41738d1896fc02fe.js +6 -0
  21. sky/dashboard/out/_next/static/chunks/pages/infra-881fcd902fbbd0e5.js +6 -0
  22. sky/dashboard/out/_next/static/chunks/pages/jobs/[job]-2c29e97a6aa50dd4.js +6 -0
  23. sky/dashboard/out/_next/static/chunks/pages/{jobs-ff7e8e377d02b651.js → jobs-a4efc09e61988f8d.js} +1 -1
  24. sky/dashboard/out/_next/static/chunks/pages/users-b2634885d67c49a6.js +6 -0
  25. sky/dashboard/out/_next/static/chunks/pages/workspace/{new-63763ffa3edb4508.js → new-579b3203c7c19d84.js} +1 -1
  26. sky/dashboard/out/_next/static/chunks/pages/workspaces/{[name]-3ede7a13caf23375.js → [name]-9388e38fac73ee8f.js} +1 -1
  27. sky/dashboard/out/_next/static/chunks/pages/workspaces-610c49ae3619ee85.js +1 -0
  28. sky/dashboard/out/_next/static/css/ffd1cd601648c303.css +3 -0
  29. sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
  30. sky/dashboard/out/clusters/[cluster].html +1 -1
  31. sky/dashboard/out/clusters.html +1 -1
  32. sky/dashboard/out/config.html +1 -1
  33. sky/dashboard/out/index.html +1 -1
  34. sky/dashboard/out/infra.html +1 -1
  35. sky/dashboard/out/jobs/[job].html +1 -1
  36. sky/dashboard/out/jobs.html +1 -1
  37. sky/dashboard/out/users.html +1 -1
  38. sky/dashboard/out/workspace/new.html +1 -1
  39. sky/dashboard/out/workspaces/[name].html +1 -1
  40. sky/dashboard/out/workspaces.html +1 -1
  41. sky/global_user_state.py +181 -134
  42. sky/provision/kubernetes/utils.py +4 -4
  43. sky/server/server.py +6 -5
  44. sky/setup_files/dependencies.py +1 -0
  45. sky/skylet/constants.py +4 -0
  46. sky/utils/db_utils.py +34 -46
  47. sky/utils/subprocess_utils.py +2 -3
  48. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/METADATA +2 -1
  49. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/RECORD +55 -54
  50. sky/dashboard/out/_next/static/D5bjIfl4Ob3SV3LJz3CO0/_buildManifest.js +0 -1
  51. sky/dashboard/out/_next/static/chunks/236-e220ba0c35bf089e.js +0 -6
  52. sky/dashboard/out/_next/static/chunks/470-1d784f5c8750744a.js +0 -1
  53. sky/dashboard/out/_next/static/chunks/488-50d843fdb5396d32.js +0 -15
  54. sky/dashboard/out/_next/static/chunks/578-24f35aa98d38d638.js +0 -6
  55. sky/dashboard/out/_next/static/chunks/627-31b701e69f52db0c.js +0 -1
  56. sky/dashboard/out/_next/static/chunks/843-e35d71cf1c7f706e.js +0 -11
  57. sky/dashboard/out/_next/static/chunks/990-f85643b521f7ca65.js +0 -1
  58. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-339b59921ccfe266.js +0 -1
  59. sky/dashboard/out/_next/static/chunks/pages/config-72b8c6c2edfd0e39.js +0 -6
  60. sky/dashboard/out/_next/static/chunks/pages/infra-1521baab6992916b.js +0 -1
  61. sky/dashboard/out/_next/static/chunks/pages/jobs/[job]-4d913940b4fa6f5a.js +0 -1
  62. sky/dashboard/out/_next/static/chunks/pages/users-9900af52acf8648d.js +0 -1
  63. sky/dashboard/out/_next/static/chunks/pages/workspaces-72330c4d0fc9a4a2.js +0 -1
  64. sky/dashboard/out/_next/static/css/6a1c0d711a4bdaf1.css +0 -3
  65. /sky/dashboard/out/_next/static/{D5bjIfl4Ob3SV3LJz3CO0 → Mx1iAbDQn1jMHh3UHmK3R}/_ssgManifest.js +0 -0
  66. /sky/dashboard/out/_next/static/chunks/pages/{_app-3985f074c163a856.js → _app-a631df412d8172de.js} +0 -0
  67. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/WHEEL +0 -0
  68. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/entry_points.txt +0 -0
  69. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/licenses/LICENSE +0 -0
  70. {skypilot_nightly-1.0.0.dev20250527.dist-info → skypilot_nightly-1.0.0.dev20250528.dist-info}/top_level.txt +0 -0
sky/global_user_state.py CHANGED
@@ -10,6 +10,7 @@ import json
10
10
  import os
11
11
  import pathlib
12
12
  import pickle
13
+ import re
13
14
  import time
14
15
  import typing
15
16
  from typing import Any, Dict, List, Optional, Set, Tuple
@@ -18,6 +19,7 @@ import uuid
18
19
  import sqlalchemy
19
20
  from sqlalchemy import exc as sqlalchemy_exc
20
21
  from sqlalchemy import orm
22
+ from sqlalchemy.dialects import postgresql
21
23
  from sqlalchemy.dialects import sqlite
22
24
  from sqlalchemy.ext import declarative
23
25
 
@@ -43,7 +45,14 @@ _ENABLED_CLOUDS_KEY_PREFIX = 'enabled_clouds_'
43
45
  _DB_PATH = os.path.expanduser('~/.sky/state.db')
44
46
  pathlib.Path(_DB_PATH).parents[0].mkdir(parents=True, exist_ok=True)
45
47
 
46
- _SQLALCHEMY_ENGINE = sqlalchemy.create_engine(f'sqlite:///{_DB_PATH}')
48
+ if os.environ.get(constants.SKYPILOT_API_SERVER_DB_URL_ENV_VAR):
49
+ # If SKYPILOT_API_SERVER_DB_URL_ENV_VAR is set, use it as the database URI.
50
+ logger.debug(
51
+ f'using db URI from {constants.SKYPILOT_API_SERVER_DB_URL_ENV_VAR}')
52
+ _SQLALCHEMY_ENGINE = sqlalchemy.create_engine(
53
+ os.environ.get(constants.SKYPILOT_API_SERVER_DB_URL_ENV_VAR))
54
+ else:
55
+ _SQLALCHEMY_ENGINE = sqlalchemy.create_engine('sqlite:///' + _DB_PATH)
47
56
 
48
57
  Base = declarative.declarative_base()
49
58
 
@@ -125,6 +134,26 @@ cluster_history_table = sqlalchemy.Table(
125
134
  )
126
135
 
127
136
 
137
+ def _glob_to_similar(glob_pattern):
138
+ """Converts a glob pattern to a PostgreSQL LIKE pattern."""
139
+
140
+ # Escape special LIKE characters that are not special in glob
141
+ glob_pattern = glob_pattern.replace('%', '\\%').replace('_', '\\_')
142
+
143
+ # Convert glob wildcards to LIKE wildcards
144
+ like_pattern = glob_pattern.replace('*', '%').replace('?', '_')
145
+
146
+ # Handle character classes, including negation
147
+ def replace_char_class(match):
148
+ group = match.group(0)
149
+ if group.startswith('[!'):
150
+ return '[^' + group[2:-1] + ']'
151
+ return group
152
+
153
+ like_pattern = re.sub(r'\[(!)?.*?\]', replace_char_class, like_pattern)
154
+ return like_pattern
155
+
156
+
128
157
  def create_table():
129
158
  # Enable WAL mode to avoid locking issues.
130
159
  # See: issue #1441 and PR #1509
@@ -152,31 +181,52 @@ def create_table():
152
181
  # the latest version of SkyPilot.
153
182
  with orm.Session(_SQLALCHEMY_ENGINE) as session:
154
183
  # Add autostop column to clusters table
155
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters', 'autostop',
156
- 'INTEGER DEFAULT -1')
184
+ db_utils.add_column_to_table_sqlalchemy(session,
185
+ 'clusters',
186
+ 'autostop',
187
+ sqlalchemy.Integer(),
188
+ default_statement='DEFAULT -1')
157
189
 
158
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters', 'metadata',
159
- 'TEXT DEFAULT \'{}\'')
190
+ db_utils.add_column_to_table_sqlalchemy(
191
+ session,
192
+ 'clusters',
193
+ 'metadata',
194
+ sqlalchemy.Text(),
195
+ default_statement='DEFAULT \'{}\'')
160
196
 
161
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters', 'to_down',
162
- 'INTEGER DEFAULT 0')
197
+ db_utils.add_column_to_table_sqlalchemy(session,
198
+ 'clusters',
199
+ 'to_down',
200
+ sqlalchemy.Integer(),
201
+ default_statement='DEFAULT 0')
163
202
 
164
203
  # The cloud identity that created the cluster.
165
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters', 'owner',
166
- 'TEXT')
204
+ db_utils.add_column_to_table_sqlalchemy(
205
+ session,
206
+ 'clusters',
207
+ 'owner',
208
+ sqlalchemy.Text(),
209
+ default_statement='DEFAULT NULL')
167
210
 
168
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters',
169
- 'cluster_hash',
170
- 'TEXT DEFAULT null')
211
+ db_utils.add_column_to_table_sqlalchemy(
212
+ session,
213
+ 'clusters',
214
+ 'cluster_hash',
215
+ sqlalchemy.Text(),
216
+ default_statement='DEFAULT NULL')
171
217
 
172
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters',
173
- 'storage_mounts_metadata',
174
- 'BLOB DEFAULT null')
218
+ db_utils.add_column_to_table_sqlalchemy(
219
+ session,
220
+ 'clusters',
221
+ 'storage_mounts_metadata',
222
+ sqlalchemy.LargeBinary(),
223
+ default_statement='DEFAULT NULL')
175
224
  db_utils.add_column_to_table_sqlalchemy(
176
225
  session,
177
226
  'clusters',
178
227
  'cluster_ever_up',
179
- 'INTEGER DEFAULT 0',
228
+ sqlalchemy.Integer(),
229
+ default_statement='DEFAULT 0',
180
230
  # Set the value to 1 so that all the existing clusters before #2977
181
231
  # are considered as ever up, i.e:
182
232
  # existing cluster's default (null) -> 1;
@@ -185,28 +235,39 @@ def create_table():
185
235
  # clusters were never really UP, setting it to 1 means they won't be
186
236
  # auto-deleted during any failover.
187
237
  value_to_replace_existing_entries=1)
188
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters',
189
- 'status_updated_at',
190
- 'INTEGER DEFAULT null')
191
238
  db_utils.add_column_to_table_sqlalchemy(
192
239
  session,
193
240
  'clusters',
194
- 'user_hash',
195
- 'TEXT DEFAULT null',
241
+ 'status_updated_at',
242
+ sqlalchemy.Integer(),
243
+ default_statement='DEFAULT NULL')
244
+ db_utils.add_column_to_table_sqlalchemy(
245
+ session,
246
+ 'clusters',
247
+ 'user_hasha',
248
+ sqlalchemy.Text(),
249
+ default_statement='DEFAULT NULL',
196
250
  value_to_replace_existing_entries=common_utils.get_user_hash())
197
- db_utils.add_column_to_table_sqlalchemy(session, 'clusters',
198
- 'config_hash',
199
- 'TEXT DEFAULT null')
251
+ db_utils.add_column_to_table_sqlalchemy(
252
+ session,
253
+ 'clusters',
254
+ 'config_hash',
255
+ sqlalchemy.Text(),
256
+ default_statement='DEFAULT NULL')
200
257
 
201
- db_utils.add_column_to_table_sqlalchemy(session, 'cluster_history',
202
- 'user_hash',
203
- 'TEXT DEFAULT null')
258
+ db_utils.add_column_to_table_sqlalchemy(
259
+ session,
260
+ 'cluster_history',
261
+ 'user_hash',
262
+ sqlalchemy.Text(),
263
+ default_statement='DEFAULT NULL')
204
264
 
205
265
  db_utils.add_column_to_table_sqlalchemy(
206
266
  session,
207
267
  'clusters',
208
268
  'workspace',
209
- 'TEXT DEFAULT \'default\'',
269
+ sqlalchemy.Text(),
270
+ default_statement='DEFAULT \'default\'',
210
271
  value_to_replace_existing_entries=constants.
211
272
  SKYPILOT_DEFAULT_WORKSPACE)
212
273
  session.commit()
@@ -223,20 +284,18 @@ def add_or_update_user(user: models.User):
223
284
  with orm.Session(_SQLALCHEMY_ENGINE) as session:
224
285
  if (_SQLALCHEMY_ENGINE.dialect.name ==
225
286
  db_utils.SQLAlchemyDialect.SQLITE.value):
226
- insert_stmnt = sqlite.insert(user_table).values(id=user.id,
227
- name=user.name)
228
- do_update_stmt = insert_stmnt.on_conflict_do_update(
229
- index_elements=[user_table.c.id],
230
- set_={user_table.c.name: user.name})
231
- session.execute(do_update_stmt)
287
+ insert_func = sqlite.insert
232
288
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
233
289
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
234
- # TODO(syang) support postgres dialect
235
- session.rollback()
236
- raise ValueError('Unsupported database dialect')
290
+ insert_func = postgresql.insert
237
291
  else:
238
- session.rollback()
239
292
  raise ValueError('Unsupported database dialect')
293
+ insert_stmnt = insert_func(user_table).values(id=user.id,
294
+ name=user.name)
295
+ do_update_stmt = insert_stmnt.on_conflict_do_update(
296
+ index_elements=[user_table.c.id],
297
+ set_={user_table.c.name: user.name})
298
+ session.execute(do_update_stmt)
240
299
  session.commit()
241
300
 
242
301
 
@@ -348,76 +407,67 @@ def add_or_update_cluster(cluster_name: str,
348
407
 
349
408
  if (_SQLALCHEMY_ENGINE.dialect.name ==
350
409
  db_utils.SQLAlchemyDialect.SQLITE.value):
351
- insert_stmnt = sqlite.insert(cluster_table).values(
352
- name=cluster_name,
353
- **conditional_values,
354
- handle=handle,
355
- status=status.value,
356
- # set metadata to server default ('{}')
357
- # set owner to server default (null)
358
- cluster_hash=cluster_hash,
359
- # set storage_mounts_metadata to server default (null)
360
- status_updated_at=status_updated_at,
361
- )
362
- do_update_stmt = insert_stmnt.on_conflict_do_update(
363
- index_elements=[cluster_table.c.name],
364
- set_={
365
- **conditional_values,
366
- cluster_table.c.handle: handle,
367
- cluster_table.c.status: status.value,
368
- # do not update metadata value
369
- # do not update owner value
370
- cluster_table.c.cluster_hash: cluster_hash,
371
- # do not update storage_mounts_metadata
372
- cluster_table.c.status_updated_at: status_updated_at,
373
- # do not update user_hash
374
- })
375
- session.execute(do_update_stmt)
410
+ insert_func = sqlite.insert
376
411
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
377
412
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
378
- # TODO(syang) support postgres dialect
379
- session.rollback()
380
- raise ValueError('Unsupported database dialect')
413
+ insert_func = postgresql.insert
381
414
  else:
382
415
  session.rollback()
383
416
  raise ValueError('Unsupported database dialect')
384
417
 
418
+ insert_stmnt = insert_func(cluster_table).values(
419
+ name=cluster_name,
420
+ **conditional_values,
421
+ handle=handle,
422
+ status=status.value,
423
+ # set metadata to server default ('{}')
424
+ # set owner to server default (null)
425
+ cluster_hash=cluster_hash,
426
+ # set storage_mounts_metadata to server default (null)
427
+ status_updated_at=status_updated_at,
428
+ )
429
+ do_update_stmt = insert_stmnt.on_conflict_do_update(
430
+ index_elements=[cluster_table.c.name],
431
+ set_={
432
+ **conditional_values,
433
+ cluster_table.c.handle: handle,
434
+ cluster_table.c.status: status.value,
435
+ # do not update metadata value
436
+ # do not update owner value
437
+ cluster_table.c.cluster_hash: cluster_hash,
438
+ # do not update storage_mounts_metadata
439
+ cluster_table.c.status_updated_at: status_updated_at,
440
+ # do not update user_hash
441
+ })
442
+ session.execute(do_update_stmt)
443
+
385
444
  # Modify cluster history table
386
445
  launched_nodes = getattr(cluster_handle, 'launched_nodes', None)
387
446
  launched_resources = getattr(cluster_handle, 'launched_resources', None)
388
447
 
389
- if (_SQLALCHEMY_ENGINE.dialect.name ==
390
- db_utils.SQLAlchemyDialect.SQLITE.value):
391
- insert_stmnt = sqlite.insert(cluster_history_table).values(
392
- cluster_hash=cluster_hash,
393
- name=cluster_name,
394
- num_nodes=launched_nodes,
395
- requested_resources=pickle.dumps(requested_resources),
396
- launched_resources=pickle.dumps(launched_resources),
397
- usage_intervals=pickle.dumps(usage_intervals),
398
- user_hash=user_hash)
399
- do_update_stmt = insert_stmnt.on_conflict_do_update(
400
- index_elements=[cluster_history_table.c.cluster_hash],
401
- set_={
402
- cluster_history_table.c.name: cluster_name,
403
- cluster_history_table.c.num_nodes: launched_nodes,
404
- cluster_history_table.c.requested_resources:
405
- pickle.dumps(requested_resources),
406
- cluster_history_table.c.launched_resources:
407
- pickle.dumps(launched_resources),
408
- cluster_history_table.c.usage_intervals:
409
- pickle.dumps(usage_intervals),
410
- cluster_history_table.c.user_hash: user_hash
411
- })
412
- session.execute(do_update_stmt)
413
- elif (_SQLALCHEMY_ENGINE.dialect.name ==
414
- db_utils.SQLAlchemyDialect.POSTGRESQL.value):
415
- # TODO(syang) support postgres dialect
416
- session.rollback()
417
- raise ValueError('Unsupported database dialect')
418
- else:
419
- session.rollback()
420
- raise ValueError('Unsupported database dialect')
448
+ insert_stmnt = insert_func(cluster_history_table).values(
449
+ cluster_hash=cluster_hash,
450
+ name=cluster_name,
451
+ num_nodes=launched_nodes,
452
+ requested_resources=pickle.dumps(requested_resources),
453
+ launched_resources=pickle.dumps(launched_resources),
454
+ usage_intervals=pickle.dumps(usage_intervals),
455
+ user_hash=user_hash)
456
+ do_update_stmt = insert_stmnt.on_conflict_do_update(
457
+ index_elements=[cluster_history_table.c.cluster_hash],
458
+ set_={
459
+ cluster_history_table.c.name: cluster_name,
460
+ cluster_history_table.c.num_nodes: launched_nodes,
461
+ cluster_history_table.c.requested_resources:
462
+ pickle.dumps(requested_resources),
463
+ cluster_history_table.c.launched_resources:
464
+ pickle.dumps(launched_resources),
465
+ cluster_history_table.c.usage_intervals:
466
+ pickle.dumps(usage_intervals),
467
+ cluster_history_table.c.user_hash: user_hash
468
+ })
469
+ session.execute(do_update_stmt)
470
+
421
471
  session.commit()
422
472
 
423
473
 
@@ -504,9 +554,9 @@ def get_glob_cluster_names(cluster_name: str) -> List[str]:
504
554
  cluster_table.c.name.op('GLOB')(cluster_name)).all()
505
555
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
506
556
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
507
- # TODO(syang) support postgres dialect
508
- # postgres does not support GLOB
509
- raise ValueError('Unsupported database dialect')
557
+ rows = session.query(cluster_table).filter(
558
+ cluster_table.c.name.op('SIMILAR TO')(
559
+ _glob_to_similar(cluster_name))).all()
510
560
  else:
511
561
  raise ValueError('Unsupported database dialect')
512
562
  return [row.name for row in rows]
@@ -741,6 +791,7 @@ def get_cluster_from_name(
741
791
  'config_hash': row.config_hash,
742
792
  'workspace': row.workspace,
743
793
  }
794
+
744
795
  return record
745
796
 
746
797
 
@@ -849,21 +900,19 @@ def set_enabled_clouds(enabled_clouds: List[str],
849
900
  with orm.Session(_SQLALCHEMY_ENGINE) as session:
850
901
  if (_SQLALCHEMY_ENGINE.dialect.name ==
851
902
  db_utils.SQLAlchemyDialect.SQLITE.value):
852
- insert_stmnt = sqlite.insert(config_table).values(
853
- key=_get_enabled_clouds_key(cloud_capability, workspace),
854
- value=json.dumps(enabled_clouds))
855
- do_update_stmt = insert_stmnt.on_conflict_do_update(
856
- index_elements=[config_table.c.key],
857
- set_={config_table.c.value: json.dumps(enabled_clouds)})
858
- session.execute(do_update_stmt)
903
+ insert_func = sqlite.insert
859
904
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
860
905
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
861
- # TODO(syang) support postgres dialect
862
- session.rollback()
863
- raise ValueError('Unsupported database dialect')
906
+ insert_func = postgresql.insert
864
907
  else:
865
- session.rollback()
866
908
  raise ValueError('Unsupported database dialect')
909
+ insert_stmnt = insert_func(config_table).values(
910
+ key=_get_enabled_clouds_key(cloud_capability, workspace),
911
+ value=json.dumps(enabled_clouds))
912
+ do_update_stmt = insert_stmnt.on_conflict_do_update(
913
+ index_elements=[config_table.c.key],
914
+ set_={config_table.c.value: json.dumps(enabled_clouds)})
915
+ session.execute(do_update_stmt)
867
916
  session.commit()
868
917
 
869
918
 
@@ -888,29 +937,27 @@ def add_or_update_storage(storage_name: str,
888
937
  with orm.Session(_SQLALCHEMY_ENGINE) as session:
889
938
  if (_SQLALCHEMY_ENGINE.dialect.name ==
890
939
  db_utils.SQLAlchemyDialect.SQLITE.value):
891
- insert_stmnt = sqlite.insert(storage_table).values(
892
- name=storage_name,
893
- handle=handle,
894
- last_use=last_use,
895
- launched_at=storage_launched_at,
896
- status=storage_status.value)
897
- do_update_stmt = insert_stmnt.on_conflict_do_update(
898
- index_elements=[storage_table.c.name],
899
- set_={
900
- storage_table.c.handle: handle,
901
- storage_table.c.last_use: last_use,
902
- storage_table.c.launched_at: storage_launched_at,
903
- storage_table.c.status: storage_status.value
904
- })
905
- session.execute(do_update_stmt)
940
+ insert_func = sqlite.insert
906
941
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
907
942
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
908
- # TODO(syang) support postgres dialect
909
- session.rollback()
910
- raise ValueError('Unsupported database dialect')
943
+ insert_func = postgresql.insert
911
944
  else:
912
- session.rollback()
913
945
  raise ValueError('Unsupported database dialect')
946
+ insert_stmnt = insert_func(storage_table).values(
947
+ name=storage_name,
948
+ handle=handle,
949
+ last_use=last_use,
950
+ launched_at=storage_launched_at,
951
+ status=storage_status.value)
952
+ do_update_stmt = insert_stmnt.on_conflict_do_update(
953
+ index_elements=[storage_table.c.name],
954
+ set_={
955
+ storage_table.c.handle: handle,
956
+ storage_table.c.last_use: last_use,
957
+ storage_table.c.launched_at: storage_launched_at,
958
+ storage_table.c.status: storage_status.value
959
+ })
960
+ session.execute(do_update_stmt)
914
961
  session.commit()
915
962
 
916
963
 
@@ -973,9 +1020,9 @@ def get_glob_storage_name(storage_name: str) -> List[str]:
973
1020
  storage_table.c.name.op('GLOB')(storage_name)).all()
974
1021
  elif (_SQLALCHEMY_ENGINE.dialect.name ==
975
1022
  db_utils.SQLAlchemyDialect.POSTGRESQL.value):
976
- # TODO(syang) support postgres dialect
977
- # postgres does not support GLOB
978
- raise ValueError('Unsupported database dialect')
1023
+ rows = session.query(storage_table).filter(
1024
+ storage_table.c.name.op('SIMILAR TO')(
1025
+ _glob_to_similar(storage_name))).all()
979
1026
  else:
980
1027
  raise ValueError('Unsupported database dialect')
981
1028
  return [row.name for row in rows]
@@ -1525,7 +1525,7 @@ def is_kubeconfig_exec_auth(
1525
1525
  return False, None
1526
1526
 
1527
1527
  # Get active context and user from kubeconfig using k8s api
1528
- all_contexts, current_context = k8s.config.list_kube_config_contexts()
1528
+ all_contexts, current_context = kubernetes.list_kube_config_contexts()
1529
1529
  context_obj = current_context
1530
1530
  if context is not None:
1531
1531
  for c in all_contexts:
@@ -1581,7 +1581,7 @@ def get_current_kube_config_context_name() -> Optional[str]:
1581
1581
  """
1582
1582
  k8s = kubernetes.kubernetes
1583
1583
  try:
1584
- _, current_context = k8s.config.list_kube_config_contexts()
1584
+ _, current_context = kubernetes.list_kube_config_contexts()
1585
1585
  return current_context['name']
1586
1586
  except k8s.config.config_exception.ConfigException:
1587
1587
  return None
@@ -1617,7 +1617,7 @@ def get_all_kube_context_names() -> List[str]:
1617
1617
  k8s = kubernetes.kubernetes
1618
1618
  context_names = []
1619
1619
  try:
1620
- all_contexts, _ = k8s.config.list_kube_config_contexts()
1620
+ all_contexts, _ = kubernetes.list_kube_config_contexts()
1621
1621
  # all_contexts will always have at least one context. If kubeconfig
1622
1622
  # does not have any contexts defined, it will raise ConfigException.
1623
1623
  context_names = [context['name'] for context in all_contexts]
@@ -1660,7 +1660,7 @@ def get_kube_config_context_namespace(
1660
1660
  return f.read().strip()
1661
1661
  # If not in-cluster, get the namespace from kubeconfig
1662
1662
  try:
1663
- contexts, current_context = k8s.config.list_kube_config_contexts()
1663
+ contexts, current_context = kubernetes.list_kube_config_contexts()
1664
1664
  if context_name is None:
1665
1665
  context = current_context
1666
1666
  else:
sky/server/server.py CHANGED
@@ -421,6 +421,10 @@ async def validate(validate_body: payloads.ValidateBody) -> None:
421
421
  logger.debug(f'Validating tasks: {validate_body.dag}')
422
422
 
423
423
  context.initialize()
424
+ ctx = context.get()
425
+ assert ctx is not None
426
+ # TODO(aylei): generalize this to all requests without a db record.
427
+ ctx.override_envs(validate_body.env_vars)
424
428
 
425
429
  def validate_dag(dag: dag_utils.dag_lib.Dag):
426
430
  # TODO: Admin policy may contain arbitrary code, which may be expensive
@@ -1199,13 +1203,10 @@ async def health(request: fastapi.Request) -> Dict[str, Any]:
1199
1203
 
1200
1204
 
1201
1205
  @app.websocket('/kubernetes-pod-ssh-proxy')
1202
- async def kubernetes_pod_ssh_proxy(
1203
- websocket: fastapi.WebSocket,
1204
- cluster_name_body: payloads.ClusterNameBody = fastapi.Depends()
1205
- ) -> None:
1206
+ async def kubernetes_pod_ssh_proxy(websocket: fastapi.WebSocket,
1207
+ cluster_name: str) -> None:
1206
1208
  """Proxies SSH to the Kubernetes pod with websocket."""
1207
1209
  await websocket.accept()
1208
- cluster_name = cluster_name_body.cluster_name
1209
1210
  logger.info(f'WebSocket connection accepted for cluster: {cluster_name}')
1210
1211
 
1211
1212
  cluster_records = core.status(cluster_name, all_users=True)
@@ -57,6 +57,7 @@ install_requires = [
57
57
  'httpx',
58
58
  'setproctitle',
59
59
  'sqlalchemy',
60
+ 'psycopg2-binary',
60
61
  ]
61
62
 
62
63
  local_ray = [
sky/skylet/constants.py CHANGED
@@ -408,3 +408,7 @@ SKY_USER_FILE_PATH = '~/.sky/generated'
408
408
  ENV_VAR_IS_SKYPILOT_SERVER = 'IS_SKYPILOT_SERVER'
409
409
 
410
410
  SKYPILOT_DEFAULT_WORKSPACE = 'default'
411
+
412
+ # Experimental - may be deprecated in the future without notice.
413
+ SKYPILOT_API_SERVER_DB_URL_ENV_VAR: str = (
414
+ f'{SKYPILOT_ENV_VAR_PREFIX}API_SERVER_DB_URL')
sky/utils/db_utils.py CHANGED
@@ -88,60 +88,48 @@ def add_column_to_table_sqlalchemy(
88
88
  session: 'Session',
89
89
  table_name: str,
90
90
  column_name: str,
91
- column_type: str,
91
+ column_type: sqlalchemy.types.TypeEngine,
92
+ default_statement: Optional[str] = None,
92
93
  copy_from: Optional[str] = None,
93
94
  value_to_replace_existing_entries: Optional[Any] = None,
94
95
  ):
95
96
  """Add a column to a table."""
96
- dialect = session.bind.dialect
97
- if dialect.name == SQLAlchemyDialect.SQLITE.value:
98
- try:
97
+ # column type may be different for different dialects.
98
+ # for example, sqlite uses BLOB for LargeBinary
99
+ # while postgres uses BYTEA.
100
+ column_type_str = column_type.compile(dialect=session.bind.dialect)
101
+ default_statement_str = (f' {default_statement}'
102
+ if default_statement is not None else '')
103
+ try:
104
+ session.execute(
105
+ sqlalchemy.text(f'ALTER TABLE {table_name} '
106
+ f'ADD COLUMN {column_name} {column_type_str}'
107
+ f'{default_statement_str}'))
108
+ if copy_from is not None:
99
109
  session.execute(
100
- sqlalchemy.text(f'ALTER TABLE {table_name} '
101
- f'ADD COLUMN {column_name} {column_type}'))
102
- if copy_from is not None:
103
- session.execute(
104
- sqlalchemy.text(f'UPDATE {table_name} '
105
- f'SET {column_name} = {copy_from}'))
106
- if value_to_replace_existing_entries is not None:
107
- session.execute(
108
- sqlalchemy.text(f'UPDATE {table_name} '
109
- f'SET {column_name} = :replacement_value '
110
- f'WHERE {column_name} IS NULL'),
111
- {'replacement_value': value_to_replace_existing_entries})
112
- except sqlalchemy_exc.OperationalError as e:
113
- if 'duplicate column name' in str(e):
114
- pass
115
- else:
116
- raise
117
- elif dialect.name == SQLAlchemyDialect.POSTGRESQL.value:
118
- # TODO(syang) support postgres dialect
119
- session.rollback()
120
- raise ValueError('Unsupported database dialect')
121
- else:
122
- session.rollback()
123
- raise ValueError('Unsupported database dialect')
110
+ sqlalchemy.text(f'UPDATE {table_name} '
111
+ f'SET {column_name} = {copy_from}'))
112
+ if value_to_replace_existing_entries is not None:
113
+ session.execute(
114
+ sqlalchemy.text(f'UPDATE {table_name} '
115
+ f'SET {column_name} = :replacement_value '
116
+ f'WHERE {column_name} IS NULL'),
117
+ {'replacement_value': value_to_replace_existing_entries})
118
+ #sqlite
119
+ except sqlalchemy_exc.OperationalError as e:
120
+ if 'duplicate column name' in str(e):
121
+ pass
122
+ else:
123
+ raise
124
+ #postgressql
125
+ except sqlalchemy_exc.ProgrammingError as e:
126
+ if 'already exists' in str(e):
127
+ pass
128
+ else:
129
+ raise
124
130
  session.commit()
125
131
 
126
132
 
127
- def rename_column(
128
- cursor: 'sqlite3.Cursor',
129
- conn: 'sqlite3.Connection',
130
- table_name: str,
131
- old_name: str,
132
- new_name: str,
133
- ):
134
- """Rename a column in a table."""
135
- # NOTE: This only works for sqlite3 >= 3.25.0. Be careful to use this.
136
-
137
- for row in cursor.execute(f'PRAGMA table_info({table_name})'):
138
- if row[1] == old_name:
139
- cursor.execute(f'ALTER TABLE {table_name} '
140
- f'RENAME COLUMN {old_name} to {new_name}')
141
- break
142
- conn.commit()
143
-
144
-
145
133
  class SQLiteConn(threading.local):
146
134
  """Thread-local connection to the sqlite3 database."""
147
135
 
@@ -246,11 +246,10 @@ def kill_process_with_grace_period(proc: GenericProcess,
246
246
  # The child process may have already been terminated.
247
247
  return
248
248
  except psutil.TimeoutExpired:
249
- # Pass to finally to force kill the process.
250
- pass
251
- finally:
252
249
  logger.debug(f'Process {proc.pid} did not terminate after '
253
250
  f'{grace_period} seconds')
251
+ # Continue to finally to force kill the process.
252
+ finally:
254
253
  # Attempt to force kill if the normal termination fails
255
254
  if not force:
256
255
  logger.debug(f'Force killing process {proc.pid}')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: skypilot-nightly
3
- Version: 1.0.0.dev20250527
3
+ Version: 1.0.0.dev20250528
4
4
  Summary: SkyPilot: Run AI on Any Infra — Unified, Faster, Cheaper.
5
5
  Author: SkyPilot Team
6
6
  License: Apache 2.0
@@ -48,6 +48,7 @@ Requires-Dist: aiofiles
48
48
  Requires-Dist: httpx
49
49
  Requires-Dist: setproctitle
50
50
  Requires-Dist: sqlalchemy
51
+ Requires-Dist: psycopg2-binary
51
52
  Provides-Extra: aws
52
53
  Requires-Dist: awscli>=1.27.10; extra == "aws"
53
54
  Requires-Dist: botocore>=1.29.10; extra == "aws"