skypilot-nightly 1.0.0.dev20250509__py3-none-any.whl → 1.0.0.dev20251107__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of skypilot-nightly might be problematic. Click here for more details.

Files changed (512) hide show
  1. sky/__init__.py +22 -6
  2. sky/adaptors/aws.py +25 -7
  3. sky/adaptors/common.py +24 -1
  4. sky/adaptors/coreweave.py +278 -0
  5. sky/adaptors/do.py +8 -2
  6. sky/adaptors/hyperbolic.py +8 -0
  7. sky/adaptors/kubernetes.py +149 -18
  8. sky/adaptors/nebius.py +170 -17
  9. sky/adaptors/primeintellect.py +1 -0
  10. sky/adaptors/runpod.py +68 -0
  11. sky/adaptors/seeweb.py +167 -0
  12. sky/adaptors/shadeform.py +89 -0
  13. sky/admin_policy.py +187 -4
  14. sky/authentication.py +179 -225
  15. sky/backends/__init__.py +4 -2
  16. sky/backends/backend.py +22 -9
  17. sky/backends/backend_utils.py +1299 -380
  18. sky/backends/cloud_vm_ray_backend.py +1715 -518
  19. sky/backends/docker_utils.py +1 -1
  20. sky/backends/local_docker_backend.py +11 -6
  21. sky/backends/wheel_utils.py +37 -9
  22. sky/{clouds/service_catalog → catalog}/__init__.py +21 -19
  23. sky/{clouds/service_catalog → catalog}/aws_catalog.py +27 -8
  24. sky/{clouds/service_catalog → catalog}/azure_catalog.py +10 -7
  25. sky/{clouds/service_catalog → catalog}/common.py +89 -48
  26. sky/{clouds/service_catalog → catalog}/cudo_catalog.py +8 -5
  27. sky/{clouds/service_catalog → catalog}/data_fetchers/analyze.py +1 -1
  28. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_aws.py +30 -40
  29. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_cudo.py +38 -38
  30. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_gcp.py +42 -15
  31. sky/catalog/data_fetchers/fetch_hyperbolic.py +136 -0
  32. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_lambda_cloud.py +1 -0
  33. sky/catalog/data_fetchers/fetch_nebius.py +335 -0
  34. sky/catalog/data_fetchers/fetch_runpod.py +698 -0
  35. sky/catalog/data_fetchers/fetch_seeweb.py +329 -0
  36. sky/catalog/data_fetchers/fetch_shadeform.py +142 -0
  37. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_vast.py +1 -1
  38. sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_vsphere.py +1 -1
  39. sky/{clouds/service_catalog → catalog}/do_catalog.py +5 -2
  40. sky/{clouds/service_catalog → catalog}/fluidstack_catalog.py +6 -3
  41. sky/{clouds/service_catalog → catalog}/gcp_catalog.py +41 -15
  42. sky/catalog/hyperbolic_catalog.py +136 -0
  43. sky/{clouds/service_catalog → catalog}/ibm_catalog.py +9 -6
  44. sky/{clouds/service_catalog → catalog}/kubernetes_catalog.py +36 -24
  45. sky/{clouds/service_catalog → catalog}/lambda_catalog.py +9 -6
  46. sky/{clouds/service_catalog → catalog}/nebius_catalog.py +9 -7
  47. sky/{clouds/service_catalog → catalog}/oci_catalog.py +9 -6
  48. sky/{clouds/service_catalog → catalog}/paperspace_catalog.py +5 -2
  49. sky/catalog/primeintellect_catalog.py +95 -0
  50. sky/{clouds/service_catalog → catalog}/runpod_catalog.py +11 -4
  51. sky/{clouds/service_catalog → catalog}/scp_catalog.py +9 -6
  52. sky/catalog/seeweb_catalog.py +184 -0
  53. sky/catalog/shadeform_catalog.py +165 -0
  54. sky/catalog/ssh_catalog.py +167 -0
  55. sky/{clouds/service_catalog → catalog}/vast_catalog.py +6 -3
  56. sky/{clouds/service_catalog → catalog}/vsphere_catalog.py +5 -2
  57. sky/check.py +491 -203
  58. sky/cli.py +5 -6005
  59. sky/client/{cli.py → cli/command.py} +2477 -1885
  60. sky/client/cli/deprecation_utils.py +99 -0
  61. sky/client/cli/flags.py +359 -0
  62. sky/client/cli/table_utils.py +320 -0
  63. sky/client/common.py +70 -32
  64. sky/client/oauth.py +82 -0
  65. sky/client/sdk.py +1203 -297
  66. sky/client/sdk_async.py +833 -0
  67. sky/client/service_account_auth.py +47 -0
  68. sky/cloud_stores.py +73 -0
  69. sky/clouds/__init__.py +13 -0
  70. sky/clouds/aws.py +358 -93
  71. sky/clouds/azure.py +105 -83
  72. sky/clouds/cloud.py +127 -36
  73. sky/clouds/cudo.py +68 -50
  74. sky/clouds/do.py +66 -48
  75. sky/clouds/fluidstack.py +63 -44
  76. sky/clouds/gcp.py +339 -110
  77. sky/clouds/hyperbolic.py +293 -0
  78. sky/clouds/ibm.py +70 -49
  79. sky/clouds/kubernetes.py +563 -162
  80. sky/clouds/lambda_cloud.py +74 -54
  81. sky/clouds/nebius.py +206 -80
  82. sky/clouds/oci.py +88 -66
  83. sky/clouds/paperspace.py +61 -44
  84. sky/clouds/primeintellect.py +317 -0
  85. sky/clouds/runpod.py +164 -74
  86. sky/clouds/scp.py +89 -83
  87. sky/clouds/seeweb.py +466 -0
  88. sky/clouds/shadeform.py +400 -0
  89. sky/clouds/ssh.py +263 -0
  90. sky/clouds/utils/aws_utils.py +10 -4
  91. sky/clouds/utils/gcp_utils.py +87 -11
  92. sky/clouds/utils/oci_utils.py +38 -14
  93. sky/clouds/utils/scp_utils.py +177 -124
  94. sky/clouds/vast.py +99 -77
  95. sky/clouds/vsphere.py +51 -40
  96. sky/core.py +349 -139
  97. sky/dag.py +15 -0
  98. sky/dashboard/out/404.html +1 -1
  99. sky/dashboard/out/_next/static/chunks/1141-e6aa9ab418717c59.js +11 -0
  100. sky/dashboard/out/_next/static/chunks/1272-1ef0bf0237faccdb.js +1 -0
  101. sky/dashboard/out/_next/static/chunks/1871-74503c8e80fd253b.js +6 -0
  102. sky/dashboard/out/_next/static/chunks/2260-7703229c33c5ebd5.js +1 -0
  103. sky/dashboard/out/_next/static/chunks/2350.fab69e61bac57b23.js +1 -0
  104. sky/dashboard/out/_next/static/chunks/2369.fc20f0c2c8ed9fe7.js +15 -0
  105. sky/dashboard/out/_next/static/chunks/2755.fff53c4a3fcae910.js +26 -0
  106. sky/dashboard/out/_next/static/chunks/3294.72362fa129305b19.js +1 -0
  107. sky/dashboard/out/_next/static/chunks/3785.ad6adaa2a0fa9768.js +1 -0
  108. sky/dashboard/out/_next/static/chunks/3850-ff4a9a69d978632b.js +1 -0
  109. sky/dashboard/out/_next/static/chunks/3937.210053269f121201.js +1 -0
  110. sky/dashboard/out/_next/static/chunks/4725.a830b5c9e7867c92.js +1 -0
  111. sky/dashboard/out/_next/static/chunks/4937.a2baa2df5572a276.js +15 -0
  112. sky/dashboard/out/_next/static/chunks/5739-d67458fcb1386c92.js +8 -0
  113. sky/dashboard/out/_next/static/chunks/6130-2be46d70a38f1e82.js +1 -0
  114. sky/dashboard/out/_next/static/chunks/616-3d59f75e2ccf9321.js +39 -0
  115. sky/dashboard/out/_next/static/chunks/6212-7bd06f60ba693125.js +13 -0
  116. sky/dashboard/out/_next/static/chunks/6601-06114c982db410b6.js +1 -0
  117. sky/dashboard/out/_next/static/chunks/6856-ef8ba11f96d8c4a3.js +1 -0
  118. sky/dashboard/out/_next/static/chunks/6989-01359c57e018caa4.js +1 -0
  119. sky/dashboard/out/_next/static/chunks/6990-32b6e2d3822301fa.js +1 -0
  120. sky/dashboard/out/_next/static/chunks/7359-c8d04e06886000b3.js +30 -0
  121. sky/dashboard/out/_next/static/chunks/7411-b15471acd2cba716.js +41 -0
  122. sky/dashboard/out/_next/static/chunks/7615-3301e838e5f25772.js +1 -0
  123. sky/dashboard/out/_next/static/chunks/8640.5b9475a2d18c5416.js +16 -0
  124. sky/dashboard/out/_next/static/chunks/8969-1e4613c651bf4051.js +1 -0
  125. sky/dashboard/out/_next/static/chunks/9025.fa408f3242e9028d.js +6 -0
  126. sky/dashboard/out/_next/static/chunks/9353-cff34f7e773b2e2b.js +1 -0
  127. sky/dashboard/out/_next/static/chunks/9360.7310982cf5a0dc79.js +31 -0
  128. sky/dashboard/out/_next/static/chunks/9847.3aaca6bb33455140.js +30 -0
  129. sky/dashboard/out/_next/static/chunks/fd9d1056-86323a29a8f7e46a.js +1 -0
  130. sky/dashboard/out/_next/static/chunks/framework-cf60a09ccd051a10.js +33 -0
  131. sky/dashboard/out/_next/static/chunks/main-app-587214043926b3cc.js +1 -0
  132. sky/dashboard/out/_next/static/chunks/main-f15ccb73239a3bf1.js +1 -0
  133. sky/dashboard/out/_next/static/chunks/pages/_app-bde01e4a2beec258.js +34 -0
  134. sky/dashboard/out/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js +1 -0
  135. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-c736ead69c2d86ec.js +16 -0
  136. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]-a37d2063af475a1c.js +1 -0
  137. sky/dashboard/out/_next/static/chunks/pages/clusters-d44859594e6f8064.js +1 -0
  138. sky/dashboard/out/_next/static/chunks/pages/config-dfb9bf07b13045f4.js +1 -0
  139. sky/dashboard/out/_next/static/chunks/pages/index-444f1804401f04ea.js +1 -0
  140. sky/dashboard/out/_next/static/chunks/pages/infra/[context]-c0b5935149902e6f.js +1 -0
  141. sky/dashboard/out/_next/static/chunks/pages/infra-aed0ea19df7cf961.js +1 -0
  142. sky/dashboard/out/_next/static/chunks/pages/jobs/[job]-5796e8d6aea291a0.js +16 -0
  143. sky/dashboard/out/_next/static/chunks/pages/jobs/pools/[pool]-6edeb7d06032adfc.js +21 -0
  144. sky/dashboard/out/_next/static/chunks/pages/jobs-479dde13399cf270.js +1 -0
  145. sky/dashboard/out/_next/static/chunks/pages/users-5ab3b907622cf0fe.js +1 -0
  146. sky/dashboard/out/_next/static/chunks/pages/volumes-b84b948ff357c43e.js +1 -0
  147. sky/dashboard/out/_next/static/chunks/pages/workspace/new-3f88a1c7e86a3f86.js +1 -0
  148. sky/dashboard/out/_next/static/chunks/pages/workspaces/[name]-c5a3eeee1c218af1.js +1 -0
  149. sky/dashboard/out/_next/static/chunks/pages/workspaces-22b23febb3e89ce1.js +1 -0
  150. sky/dashboard/out/_next/static/chunks/webpack-2679be77fc08a2f8.js +1 -0
  151. sky/dashboard/out/_next/static/css/0748ce22df867032.css +3 -0
  152. sky/dashboard/out/_next/static/zB0ed6ge_W1MDszVHhijS/_buildManifest.js +1 -0
  153. sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
  154. sky/dashboard/out/clusters/[cluster].html +1 -1
  155. sky/dashboard/out/clusters.html +1 -1
  156. sky/dashboard/out/config.html +1 -0
  157. sky/dashboard/out/index.html +1 -1
  158. sky/dashboard/out/infra/[context].html +1 -0
  159. sky/dashboard/out/infra.html +1 -0
  160. sky/dashboard/out/jobs/[job].html +1 -1
  161. sky/dashboard/out/jobs/pools/[pool].html +1 -0
  162. sky/dashboard/out/jobs.html +1 -1
  163. sky/dashboard/out/users.html +1 -0
  164. sky/dashboard/out/volumes.html +1 -0
  165. sky/dashboard/out/workspace/new.html +1 -0
  166. sky/dashboard/out/workspaces/[name].html +1 -0
  167. sky/dashboard/out/workspaces.html +1 -0
  168. sky/data/data_utils.py +137 -1
  169. sky/data/mounting_utils.py +269 -84
  170. sky/data/storage.py +1451 -1807
  171. sky/data/storage_utils.py +43 -57
  172. sky/exceptions.py +132 -2
  173. sky/execution.py +206 -63
  174. sky/global_user_state.py +2374 -586
  175. sky/jobs/__init__.py +5 -0
  176. sky/jobs/client/sdk.py +242 -65
  177. sky/jobs/client/sdk_async.py +143 -0
  178. sky/jobs/constants.py +9 -8
  179. sky/jobs/controller.py +839 -277
  180. sky/jobs/file_content_utils.py +80 -0
  181. sky/jobs/log_gc.py +201 -0
  182. sky/jobs/recovery_strategy.py +398 -152
  183. sky/jobs/scheduler.py +315 -189
  184. sky/jobs/server/core.py +829 -255
  185. sky/jobs/server/server.py +156 -115
  186. sky/jobs/server/utils.py +136 -0
  187. sky/jobs/state.py +2092 -701
  188. sky/jobs/utils.py +1242 -160
  189. sky/logs/__init__.py +21 -0
  190. sky/logs/agent.py +108 -0
  191. sky/logs/aws.py +243 -0
  192. sky/logs/gcp.py +91 -0
  193. sky/metrics/__init__.py +0 -0
  194. sky/metrics/utils.py +443 -0
  195. sky/models.py +78 -1
  196. sky/optimizer.py +164 -70
  197. sky/provision/__init__.py +90 -4
  198. sky/provision/aws/config.py +147 -26
  199. sky/provision/aws/instance.py +135 -50
  200. sky/provision/azure/instance.py +10 -5
  201. sky/provision/common.py +13 -1
  202. sky/provision/cudo/cudo_machine_type.py +1 -1
  203. sky/provision/cudo/cudo_utils.py +14 -8
  204. sky/provision/cudo/cudo_wrapper.py +72 -71
  205. sky/provision/cudo/instance.py +10 -6
  206. sky/provision/do/instance.py +10 -6
  207. sky/provision/do/utils.py +4 -3
  208. sky/provision/docker_utils.py +114 -23
  209. sky/provision/fluidstack/instance.py +13 -8
  210. sky/provision/gcp/__init__.py +1 -0
  211. sky/provision/gcp/config.py +301 -19
  212. sky/provision/gcp/constants.py +218 -0
  213. sky/provision/gcp/instance.py +36 -8
  214. sky/provision/gcp/instance_utils.py +18 -4
  215. sky/provision/gcp/volume_utils.py +247 -0
  216. sky/provision/hyperbolic/__init__.py +12 -0
  217. sky/provision/hyperbolic/config.py +10 -0
  218. sky/provision/hyperbolic/instance.py +437 -0
  219. sky/provision/hyperbolic/utils.py +373 -0
  220. sky/provision/instance_setup.py +93 -14
  221. sky/provision/kubernetes/__init__.py +5 -0
  222. sky/provision/kubernetes/config.py +9 -52
  223. sky/provision/kubernetes/constants.py +17 -0
  224. sky/provision/kubernetes/instance.py +789 -247
  225. sky/provision/kubernetes/manifests/fusermount-server-daemonset.yaml +1 -2
  226. sky/provision/kubernetes/network.py +27 -17
  227. sky/provision/kubernetes/network_utils.py +40 -43
  228. sky/provision/kubernetes/utils.py +1192 -531
  229. sky/provision/kubernetes/volume.py +282 -0
  230. sky/provision/lambda_cloud/instance.py +22 -16
  231. sky/provision/nebius/constants.py +50 -0
  232. sky/provision/nebius/instance.py +19 -6
  233. sky/provision/nebius/utils.py +196 -91
  234. sky/provision/oci/instance.py +10 -5
  235. sky/provision/paperspace/instance.py +10 -7
  236. sky/provision/paperspace/utils.py +1 -1
  237. sky/provision/primeintellect/__init__.py +10 -0
  238. sky/provision/primeintellect/config.py +11 -0
  239. sky/provision/primeintellect/instance.py +454 -0
  240. sky/provision/primeintellect/utils.py +398 -0
  241. sky/provision/provisioner.py +110 -36
  242. sky/provision/runpod/__init__.py +5 -0
  243. sky/provision/runpod/instance.py +27 -6
  244. sky/provision/runpod/utils.py +51 -18
  245. sky/provision/runpod/volume.py +180 -0
  246. sky/provision/scp/__init__.py +15 -0
  247. sky/provision/scp/config.py +93 -0
  248. sky/provision/scp/instance.py +531 -0
  249. sky/provision/seeweb/__init__.py +11 -0
  250. sky/provision/seeweb/config.py +13 -0
  251. sky/provision/seeweb/instance.py +807 -0
  252. sky/provision/shadeform/__init__.py +11 -0
  253. sky/provision/shadeform/config.py +12 -0
  254. sky/provision/shadeform/instance.py +351 -0
  255. sky/provision/shadeform/shadeform_utils.py +83 -0
  256. sky/provision/ssh/__init__.py +18 -0
  257. sky/provision/vast/instance.py +13 -8
  258. sky/provision/vast/utils.py +10 -7
  259. sky/provision/vsphere/common/vim_utils.py +1 -2
  260. sky/provision/vsphere/instance.py +15 -10
  261. sky/provision/vsphere/vsphere_utils.py +9 -19
  262. sky/py.typed +0 -0
  263. sky/resources.py +844 -118
  264. sky/schemas/__init__.py +0 -0
  265. sky/schemas/api/__init__.py +0 -0
  266. sky/schemas/api/responses.py +225 -0
  267. sky/schemas/db/README +4 -0
  268. sky/schemas/db/env.py +90 -0
  269. sky/schemas/db/global_user_state/001_initial_schema.py +124 -0
  270. sky/schemas/db/global_user_state/002_add_workspace_to_cluster_history.py +35 -0
  271. sky/schemas/db/global_user_state/003_fix_initial_revision.py +61 -0
  272. sky/schemas/db/global_user_state/004_is_managed.py +34 -0
  273. sky/schemas/db/global_user_state/005_cluster_event.py +32 -0
  274. sky/schemas/db/global_user_state/006_provision_log.py +41 -0
  275. sky/schemas/db/global_user_state/007_cluster_event_request_id.py +34 -0
  276. sky/schemas/db/global_user_state/008_skylet_ssh_tunnel_metadata.py +34 -0
  277. sky/schemas/db/global_user_state/009_last_activity_and_launched_at.py +89 -0
  278. sky/schemas/db/global_user_state/010_save_ssh_key.py +66 -0
  279. sky/schemas/db/script.py.mako +28 -0
  280. sky/schemas/db/serve_state/001_initial_schema.py +67 -0
  281. sky/schemas/db/skypilot_config/001_initial_schema.py +30 -0
  282. sky/schemas/db/spot_jobs/001_initial_schema.py +97 -0
  283. sky/schemas/db/spot_jobs/002_cluster_pool.py +42 -0
  284. sky/schemas/db/spot_jobs/003_pool_hash.py +34 -0
  285. sky/schemas/db/spot_jobs/004_job_file_contents.py +42 -0
  286. sky/schemas/db/spot_jobs/005_logs_gc.py +38 -0
  287. sky/schemas/generated/__init__.py +0 -0
  288. sky/schemas/generated/autostopv1_pb2.py +36 -0
  289. sky/schemas/generated/autostopv1_pb2.pyi +43 -0
  290. sky/schemas/generated/autostopv1_pb2_grpc.py +146 -0
  291. sky/schemas/generated/jobsv1_pb2.py +86 -0
  292. sky/schemas/generated/jobsv1_pb2.pyi +254 -0
  293. sky/schemas/generated/jobsv1_pb2_grpc.py +542 -0
  294. sky/schemas/generated/managed_jobsv1_pb2.py +74 -0
  295. sky/schemas/generated/managed_jobsv1_pb2.pyi +278 -0
  296. sky/schemas/generated/managed_jobsv1_pb2_grpc.py +278 -0
  297. sky/schemas/generated/servev1_pb2.py +58 -0
  298. sky/schemas/generated/servev1_pb2.pyi +115 -0
  299. sky/schemas/generated/servev1_pb2_grpc.py +322 -0
  300. sky/serve/autoscalers.py +357 -5
  301. sky/serve/client/impl.py +310 -0
  302. sky/serve/client/sdk.py +47 -139
  303. sky/serve/client/sdk_async.py +130 -0
  304. sky/serve/constants.py +10 -8
  305. sky/serve/controller.py +64 -19
  306. sky/serve/load_balancer.py +106 -60
  307. sky/serve/load_balancing_policies.py +115 -1
  308. sky/serve/replica_managers.py +273 -162
  309. sky/serve/serve_rpc_utils.py +179 -0
  310. sky/serve/serve_state.py +554 -251
  311. sky/serve/serve_utils.py +733 -220
  312. sky/serve/server/core.py +66 -711
  313. sky/serve/server/impl.py +1093 -0
  314. sky/serve/server/server.py +21 -18
  315. sky/serve/service.py +133 -48
  316. sky/serve/service_spec.py +135 -16
  317. sky/serve/spot_placer.py +3 -0
  318. sky/server/auth/__init__.py +0 -0
  319. sky/server/auth/authn.py +50 -0
  320. sky/server/auth/loopback.py +38 -0
  321. sky/server/auth/oauth2_proxy.py +200 -0
  322. sky/server/common.py +475 -181
  323. sky/server/config.py +81 -23
  324. sky/server/constants.py +44 -6
  325. sky/server/daemons.py +229 -0
  326. sky/server/html/token_page.html +185 -0
  327. sky/server/metrics.py +160 -0
  328. sky/server/requests/executor.py +528 -138
  329. sky/server/requests/payloads.py +351 -17
  330. sky/server/requests/preconditions.py +21 -17
  331. sky/server/requests/process.py +112 -29
  332. sky/server/requests/request_names.py +120 -0
  333. sky/server/requests/requests.py +817 -224
  334. sky/server/requests/serializers/decoders.py +82 -31
  335. sky/server/requests/serializers/encoders.py +140 -22
  336. sky/server/requests/threads.py +106 -0
  337. sky/server/rest.py +417 -0
  338. sky/server/server.py +1290 -284
  339. sky/server/state.py +20 -0
  340. sky/server/stream_utils.py +345 -57
  341. sky/server/uvicorn.py +217 -3
  342. sky/server/versions.py +270 -0
  343. sky/setup_files/MANIFEST.in +5 -0
  344. sky/setup_files/alembic.ini +156 -0
  345. sky/setup_files/dependencies.py +136 -31
  346. sky/setup_files/setup.py +44 -42
  347. sky/sky_logging.py +102 -5
  348. sky/skylet/attempt_skylet.py +1 -0
  349. sky/skylet/autostop_lib.py +129 -8
  350. sky/skylet/configs.py +27 -20
  351. sky/skylet/constants.py +171 -19
  352. sky/skylet/events.py +105 -21
  353. sky/skylet/job_lib.py +335 -104
  354. sky/skylet/log_lib.py +297 -18
  355. sky/skylet/log_lib.pyi +44 -1
  356. sky/skylet/ray_patches/__init__.py +17 -3
  357. sky/skylet/ray_patches/autoscaler.py.diff +18 -0
  358. sky/skylet/ray_patches/cli.py.diff +19 -0
  359. sky/skylet/ray_patches/command_runner.py.diff +17 -0
  360. sky/skylet/ray_patches/log_monitor.py.diff +20 -0
  361. sky/skylet/ray_patches/resource_demand_scheduler.py.diff +32 -0
  362. sky/skylet/ray_patches/updater.py.diff +18 -0
  363. sky/skylet/ray_patches/worker.py.diff +41 -0
  364. sky/skylet/services.py +564 -0
  365. sky/skylet/skylet.py +63 -4
  366. sky/skylet/subprocess_daemon.py +103 -29
  367. sky/skypilot_config.py +506 -99
  368. sky/ssh_node_pools/__init__.py +1 -0
  369. sky/ssh_node_pools/core.py +135 -0
  370. sky/ssh_node_pools/server.py +233 -0
  371. sky/task.py +621 -137
  372. sky/templates/aws-ray.yml.j2 +10 -3
  373. sky/templates/azure-ray.yml.j2 +1 -1
  374. sky/templates/do-ray.yml.j2 +1 -1
  375. sky/templates/gcp-ray.yml.j2 +57 -0
  376. sky/templates/hyperbolic-ray.yml.j2 +67 -0
  377. sky/templates/jobs-controller.yaml.j2 +27 -24
  378. sky/templates/kubernetes-loadbalancer.yml.j2 +2 -0
  379. sky/templates/kubernetes-ray.yml.j2 +607 -51
  380. sky/templates/lambda-ray.yml.j2 +1 -1
  381. sky/templates/nebius-ray.yml.j2 +33 -12
  382. sky/templates/paperspace-ray.yml.j2 +1 -1
  383. sky/templates/primeintellect-ray.yml.j2 +71 -0
  384. sky/templates/runpod-ray.yml.j2 +9 -1
  385. sky/templates/scp-ray.yml.j2 +3 -50
  386. sky/templates/seeweb-ray.yml.j2 +108 -0
  387. sky/templates/shadeform-ray.yml.j2 +72 -0
  388. sky/templates/sky-serve-controller.yaml.j2 +22 -2
  389. sky/templates/websocket_proxy.py +178 -18
  390. sky/usage/usage_lib.py +18 -11
  391. sky/users/__init__.py +0 -0
  392. sky/users/model.conf +15 -0
  393. sky/users/permission.py +387 -0
  394. sky/users/rbac.py +121 -0
  395. sky/users/server.py +720 -0
  396. sky/users/token_service.py +218 -0
  397. sky/utils/accelerator_registry.py +34 -5
  398. sky/utils/admin_policy_utils.py +84 -38
  399. sky/utils/annotations.py +16 -5
  400. sky/utils/asyncio_utils.py +78 -0
  401. sky/utils/auth_utils.py +153 -0
  402. sky/utils/benchmark_utils.py +60 -0
  403. sky/utils/cli_utils/status_utils.py +159 -86
  404. sky/utils/cluster_utils.py +31 -9
  405. sky/utils/command_runner.py +354 -68
  406. sky/utils/command_runner.pyi +93 -3
  407. sky/utils/common.py +35 -8
  408. sky/utils/common_utils.py +310 -87
  409. sky/utils/config_utils.py +87 -5
  410. sky/utils/context.py +402 -0
  411. sky/utils/context_utils.py +222 -0
  412. sky/utils/controller_utils.py +264 -89
  413. sky/utils/dag_utils.py +31 -12
  414. sky/utils/db/__init__.py +0 -0
  415. sky/utils/db/db_utils.py +470 -0
  416. sky/utils/db/migration_utils.py +133 -0
  417. sky/utils/directory_utils.py +12 -0
  418. sky/utils/env_options.py +13 -0
  419. sky/utils/git.py +567 -0
  420. sky/utils/git_clone.sh +460 -0
  421. sky/utils/infra_utils.py +195 -0
  422. sky/utils/kubernetes/cleanup-tunnel.sh +62 -0
  423. sky/utils/kubernetes/config_map_utils.py +133 -0
  424. sky/utils/kubernetes/create_cluster.sh +13 -27
  425. sky/utils/kubernetes/delete_cluster.sh +10 -7
  426. sky/utils/kubernetes/deploy_remote_cluster.py +1299 -0
  427. sky/utils/kubernetes/exec_kubeconfig_converter.py +22 -31
  428. sky/utils/kubernetes/generate_kind_config.py +6 -66
  429. sky/utils/kubernetes/generate_kubeconfig.sh +4 -1
  430. sky/utils/kubernetes/gpu_labeler.py +5 -5
  431. sky/utils/kubernetes/kubernetes_deploy_utils.py +354 -47
  432. sky/utils/kubernetes/ssh-tunnel.sh +379 -0
  433. sky/utils/kubernetes/ssh_utils.py +221 -0
  434. sky/utils/kubernetes_enums.py +8 -15
  435. sky/utils/lock_events.py +94 -0
  436. sky/utils/locks.py +368 -0
  437. sky/utils/log_utils.py +300 -6
  438. sky/utils/perf_utils.py +22 -0
  439. sky/utils/resource_checker.py +298 -0
  440. sky/utils/resources_utils.py +249 -32
  441. sky/utils/rich_utils.py +213 -37
  442. sky/utils/schemas.py +905 -147
  443. sky/utils/serialize_utils.py +16 -0
  444. sky/utils/status_lib.py +10 -0
  445. sky/utils/subprocess_utils.py +38 -15
  446. sky/utils/tempstore.py +70 -0
  447. sky/utils/timeline.py +24 -52
  448. sky/utils/ux_utils.py +84 -15
  449. sky/utils/validator.py +11 -1
  450. sky/utils/volume.py +86 -0
  451. sky/utils/yaml_utils.py +111 -0
  452. sky/volumes/__init__.py +13 -0
  453. sky/volumes/client/__init__.py +0 -0
  454. sky/volumes/client/sdk.py +149 -0
  455. sky/volumes/server/__init__.py +0 -0
  456. sky/volumes/server/core.py +258 -0
  457. sky/volumes/server/server.py +122 -0
  458. sky/volumes/volume.py +212 -0
  459. sky/workspaces/__init__.py +0 -0
  460. sky/workspaces/core.py +655 -0
  461. sky/workspaces/server.py +101 -0
  462. sky/workspaces/utils.py +56 -0
  463. skypilot_nightly-1.0.0.dev20251107.dist-info/METADATA +675 -0
  464. skypilot_nightly-1.0.0.dev20251107.dist-info/RECORD +594 -0
  465. {skypilot_nightly-1.0.0.dev20250509.dist-info → skypilot_nightly-1.0.0.dev20251107.dist-info}/WHEEL +1 -1
  466. sky/benchmark/benchmark_state.py +0 -256
  467. sky/benchmark/benchmark_utils.py +0 -641
  468. sky/clouds/service_catalog/constants.py +0 -7
  469. sky/dashboard/out/_next/static/LksQgChY5izXjokL3LcEu/_buildManifest.js +0 -1
  470. sky/dashboard/out/_next/static/chunks/236-f49500b82ad5392d.js +0 -6
  471. sky/dashboard/out/_next/static/chunks/312-c3c8845990db8ffc.js +0 -15
  472. sky/dashboard/out/_next/static/chunks/37-0a572fe0dbb89c4d.js +0 -6
  473. sky/dashboard/out/_next/static/chunks/678-206dddca808e6d16.js +0 -59
  474. sky/dashboard/out/_next/static/chunks/845-0f8017370869e269.js +0 -1
  475. sky/dashboard/out/_next/static/chunks/979-7bf73a4c7cea0f5c.js +0 -1
  476. sky/dashboard/out/_next/static/chunks/fd9d1056-2821b0f0cabcd8bd.js +0 -1
  477. sky/dashboard/out/_next/static/chunks/framework-87d061ee6ed71b28.js +0 -33
  478. sky/dashboard/out/_next/static/chunks/main-app-241eb28595532291.js +0 -1
  479. sky/dashboard/out/_next/static/chunks/main-e0e2335212e72357.js +0 -1
  480. sky/dashboard/out/_next/static/chunks/pages/_app-e6b013bc3f77ad60.js +0 -1
  481. sky/dashboard/out/_next/static/chunks/pages/_error-1be831200e60c5c0.js +0 -1
  482. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-e15db85d0ea1fbe1.js +0 -1
  483. sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]-f383db7389368ea7.js +0 -1
  484. sky/dashboard/out/_next/static/chunks/pages/clusters-a93b93e10b8b074e.js +0 -1
  485. sky/dashboard/out/_next/static/chunks/pages/index-f9f039532ca8cbc4.js +0 -1
  486. sky/dashboard/out/_next/static/chunks/pages/jobs/[job]-03f279c6741fb48b.js +0 -1
  487. sky/dashboard/out/_next/static/chunks/pages/jobs-a75029b67aab6a2e.js +0 -1
  488. sky/dashboard/out/_next/static/chunks/webpack-830f59b8404e96b8.js +0 -1
  489. sky/dashboard/out/_next/static/css/c6933bbb2ce7f4dd.css +0 -3
  490. sky/jobs/dashboard/dashboard.py +0 -223
  491. sky/jobs/dashboard/static/favicon.ico +0 -0
  492. sky/jobs/dashboard/templates/index.html +0 -831
  493. sky/jobs/server/dashboard_utils.py +0 -69
  494. sky/skylet/providers/scp/__init__.py +0 -2
  495. sky/skylet/providers/scp/config.py +0 -149
  496. sky/skylet/providers/scp/node_provider.py +0 -578
  497. sky/templates/kubernetes-ssh-jump.yml.j2 +0 -94
  498. sky/utils/db_utils.py +0 -100
  499. sky/utils/kubernetes/deploy_remote_cluster.sh +0 -308
  500. sky/utils/kubernetes/ssh_jump_lifecycle_manager.py +0 -191
  501. skypilot_nightly-1.0.0.dev20250509.dist-info/METADATA +0 -361
  502. skypilot_nightly-1.0.0.dev20250509.dist-info/RECORD +0 -396
  503. /sky/{clouds/service_catalog → catalog}/config.py +0 -0
  504. /sky/{benchmark → catalog/data_fetchers}/__init__.py +0 -0
  505. /sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_azure.py +0 -0
  506. /sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_fluidstack.py +0 -0
  507. /sky/{clouds/service_catalog → catalog}/data_fetchers/fetch_ibm.py +0 -0
  508. /sky/{clouds/service_catalog/data_fetchers → client/cli}/__init__.py +0 -0
  509. /sky/dashboard/out/_next/static/{LksQgChY5izXjokL3LcEu → zB0ed6ge_W1MDszVHhijS}/_ssgManifest.js +0 -0
  510. {skypilot_nightly-1.0.0.dev20250509.dist-info → skypilot_nightly-1.0.0.dev20251107.dist-info}/entry_points.txt +0 -0
  511. {skypilot_nightly-1.0.0.dev20250509.dist-info → skypilot_nightly-1.0.0.dev20251107.dist-info}/licenses/LICENSE +0 -0
  512. {skypilot_nightly-1.0.0.dev20250509.dist-info → skypilot_nightly-1.0.0.dev20251107.dist-info}/top_level.txt +0 -0
sky/jobs/state.py CHANGED
@@ -1,28 +1,59 @@
1
1
  """The database for managed jobs status."""
2
2
  # TODO(zhwu): maybe use file based status instead of database, so
3
3
  # that we can easily switch to a s3-based storage.
4
+ import asyncio
4
5
  import enum
6
+ import functools
7
+ import ipaddress
5
8
  import json
6
- import pathlib
7
9
  import sqlite3
10
+ import threading
8
11
  import time
9
12
  import typing
10
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
13
+ from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Union
14
+ import urllib.parse
11
15
 
12
16
  import colorama
17
+ import sqlalchemy
18
+ from sqlalchemy import exc as sqlalchemy_exc
19
+ from sqlalchemy import orm
20
+ from sqlalchemy.dialects import postgresql
21
+ from sqlalchemy.dialects import sqlite
22
+ from sqlalchemy.ext import asyncio as sql_async
23
+ from sqlalchemy.ext import declarative
13
24
 
14
25
  from sky import exceptions
15
26
  from sky import sky_logging
27
+ from sky import skypilot_config
28
+ from sky.adaptors import common as adaptors_common
29
+ from sky.skylet import constants
16
30
  from sky.utils import common_utils
17
- from sky.utils import db_utils
31
+ from sky.utils import context_utils
32
+ from sky.utils.db import db_utils
33
+ from sky.utils.db import migration_utils
18
34
 
19
35
  if typing.TYPE_CHECKING:
20
- import sky
36
+ from sqlalchemy.engine import row
21
37
 
22
- CallbackType = Callable[[str], None]
38
+ from sky.schemas.generated import managed_jobsv1_pb2
39
+ else:
40
+ managed_jobsv1_pb2 = adaptors_common.LazyImport(
41
+ 'sky.schemas.generated.managed_jobsv1_pb2')
42
+
43
+ # Separate callback types for sync and async contexts
44
+ SyncCallbackType = Callable[[str], None]
45
+ AsyncCallbackType = Callable[[str], Awaitable[Any]]
46
+ CallbackType = Union[SyncCallbackType, AsyncCallbackType]
23
47
 
24
48
  logger = sky_logging.init_logger(__name__)
25
49
 
50
+ _SQLALCHEMY_ENGINE: Optional[sqlalchemy.engine.Engine] = None
51
+ _SQLALCHEMY_ENGINE_ASYNC: Optional[sql_async.AsyncEngine] = None
52
+ _SQLALCHEMY_ENGINE_LOCK = threading.Lock()
53
+
54
+ _DB_RETRY_TIMES = 30
55
+
56
+ Base = declarative.declarative_base()
26
57
 
27
58
  # === Database schema ===
28
59
  # `spot` table contains all the finest-grained tasks, including all the
@@ -35,122 +66,248 @@ logger = sky_logging.init_logger(__name__)
35
66
  # identifier/primary key for all the tasks. We will use `spot_job_id`
36
67
  # to identify the job.
37
68
  # TODO(zhwu): schema migration may be needed.
38
- def create_table(cursor, conn):
69
+
70
+ spot_table = sqlalchemy.Table(
71
+ 'spot',
72
+ Base.metadata,
73
+ sqlalchemy.Column('job_id',
74
+ sqlalchemy.Integer,
75
+ primary_key=True,
76
+ autoincrement=True),
77
+ sqlalchemy.Column('job_name', sqlalchemy.Text),
78
+ sqlalchemy.Column('resources', sqlalchemy.Text),
79
+ sqlalchemy.Column('submitted_at', sqlalchemy.Float),
80
+ sqlalchemy.Column('status', sqlalchemy.Text),
81
+ sqlalchemy.Column('run_timestamp', sqlalchemy.Text),
82
+ sqlalchemy.Column('start_at', sqlalchemy.Float, server_default=None),
83
+ sqlalchemy.Column('end_at', sqlalchemy.Float, server_default=None),
84
+ sqlalchemy.Column('last_recovered_at',
85
+ sqlalchemy.Float,
86
+ server_default='-1'),
87
+ sqlalchemy.Column('recovery_count', sqlalchemy.Integer, server_default='0'),
88
+ sqlalchemy.Column('job_duration', sqlalchemy.Float, server_default='0'),
89
+ sqlalchemy.Column('failure_reason', sqlalchemy.Text),
90
+ sqlalchemy.Column('spot_job_id', sqlalchemy.Integer, index=True),
91
+ sqlalchemy.Column('task_id', sqlalchemy.Integer, server_default='0'),
92
+ sqlalchemy.Column('task_name', sqlalchemy.Text),
93
+ sqlalchemy.Column('specs', sqlalchemy.Text),
94
+ sqlalchemy.Column('local_log_file', sqlalchemy.Text, server_default=None),
95
+ sqlalchemy.Column('metadata', sqlalchemy.Text, server_default='{}'),
96
+ sqlalchemy.Column('logs_cleaned_at', sqlalchemy.Float, server_default=None),
97
+ )
98
+
99
+ job_info_table = sqlalchemy.Table(
100
+ 'job_info',
101
+ Base.metadata,
102
+ sqlalchemy.Column('spot_job_id',
103
+ sqlalchemy.Integer,
104
+ primary_key=True,
105
+ autoincrement=True),
106
+ sqlalchemy.Column('name', sqlalchemy.Text),
107
+ sqlalchemy.Column('schedule_state', sqlalchemy.Text),
108
+ sqlalchemy.Column('controller_pid', sqlalchemy.Integer,
109
+ server_default=None),
110
+ sqlalchemy.Column('dag_yaml_path', sqlalchemy.Text),
111
+ sqlalchemy.Column('env_file_path', sqlalchemy.Text),
112
+ sqlalchemy.Column('dag_yaml_content', sqlalchemy.Text, server_default=None),
113
+ sqlalchemy.Column('env_file_content', sqlalchemy.Text, server_default=None),
114
+ sqlalchemy.Column('user_hash', sqlalchemy.Text),
115
+ sqlalchemy.Column('workspace', sqlalchemy.Text, server_default=None),
116
+ sqlalchemy.Column('priority',
117
+ sqlalchemy.Integer,
118
+ server_default=str(constants.DEFAULT_PRIORITY)),
119
+ sqlalchemy.Column('entrypoint', sqlalchemy.Text, server_default=None),
120
+ sqlalchemy.Column('original_user_yaml_path',
121
+ sqlalchemy.Text,
122
+ server_default=None),
123
+ sqlalchemy.Column('original_user_yaml_content',
124
+ sqlalchemy.Text,
125
+ server_default=None),
126
+ sqlalchemy.Column('pool', sqlalchemy.Text, server_default=None),
127
+ sqlalchemy.Column('current_cluster_name',
128
+ sqlalchemy.Text,
129
+ server_default=None),
130
+ sqlalchemy.Column('job_id_on_pool_cluster',
131
+ sqlalchemy.Integer,
132
+ server_default=None),
133
+ sqlalchemy.Column('pool_hash', sqlalchemy.Text, server_default=None),
134
+ sqlalchemy.Column('controller_logs_cleaned_at',
135
+ sqlalchemy.Float,
136
+ server_default=None),
137
+ )
138
+
139
+ ha_recovery_script_table = sqlalchemy.Table(
140
+ 'ha_recovery_script',
141
+ Base.metadata,
142
+ sqlalchemy.Column('job_id', sqlalchemy.Integer, primary_key=True),
143
+ sqlalchemy.Column('script', sqlalchemy.Text),
144
+ )
145
+
146
+
147
+ def create_table(engine: sqlalchemy.engine.Engine):
39
148
  # Enable WAL mode to avoid locking issues.
40
149
  # See: issue #3863, #1441 and PR #1509
41
150
  # https://github.com/microsoft/WSL/issues/2395
42
151
  # TODO(romilb): We do not enable WAL for WSL because of known issue in WSL.
43
152
  # This may cause the database locked problem from WSL issue #1441.
44
- if not common_utils.is_wsl():
153
+ if (engine.dialect.name == db_utils.SQLAlchemyDialect.SQLITE.value and
154
+ not common_utils.is_wsl()):
45
155
  try:
46
- cursor.execute('PRAGMA journal_mode=WAL')
47
- except sqlite3.OperationalError as e:
156
+ with orm.Session(engine) as session:
157
+ session.execute(sqlalchemy.text('PRAGMA journal_mode=WAL'))
158
+ session.execute(sqlalchemy.text('PRAGMA synchronous=1'))
159
+ session.commit()
160
+ except sqlalchemy_exc.OperationalError as e:
48
161
  if 'database is locked' not in str(e):
49
162
  raise
50
163
  # If the database is locked, it is OK to continue, as the WAL mode
51
164
  # is not critical and is likely to be enabled by other processes.
52
165
 
53
- cursor.execute("""\
54
- CREATE TABLE IF NOT EXISTS spot (
55
- job_id INTEGER PRIMARY KEY AUTOINCREMENT,
56
- job_name TEXT,
57
- resources TEXT,
58
- submitted_at FLOAT,
59
- status TEXT,
60
- run_timestamp TEXT CANDIDATE KEY,
61
- start_at FLOAT DEFAULT NULL,
62
- end_at FLOAT DEFAULT NULL,
63
- last_recovered_at FLOAT DEFAULT -1,
64
- recovery_count INTEGER DEFAULT 0,
65
- job_duration FLOAT DEFAULT 0,
66
- failure_reason TEXT,
67
- spot_job_id INTEGER,
68
- task_id INTEGER DEFAULT 0,
69
- task_name TEXT,
70
- specs TEXT,
71
- local_log_file TEXT DEFAULT NULL)""")
72
- conn.commit()
73
-
74
- db_utils.add_column_to_table(cursor, conn, 'spot', 'failure_reason', 'TEXT')
75
- # Create a new column `spot_job_id`, which is the same for tasks of the
76
- # same managed job.
77
- # The original `job_id` no longer has an actual meaning, but only a legacy
78
- # identifier for all tasks in database.
79
- db_utils.add_column_to_table(cursor,
80
- conn,
81
- 'spot',
82
- 'spot_job_id',
83
- 'INTEGER',
84
- copy_from='job_id')
85
- db_utils.add_column_to_table(cursor,
86
- conn,
87
- 'spot',
88
- 'task_id',
89
- 'INTEGER DEFAULT 0',
90
- value_to_replace_existing_entries=0)
91
- db_utils.add_column_to_table(cursor,
92
- conn,
93
- 'spot',
94
- 'task_name',
95
- 'TEXT',
96
- copy_from='job_name')
97
-
98
- # Specs is some useful information about the task, e.g., the
99
- # max_restarts_on_errors value. It is stored in JSON format.
100
- db_utils.add_column_to_table(cursor,
101
- conn,
102
- 'spot',
103
- 'specs',
104
- 'TEXT',
105
- value_to_replace_existing_entries=json.dumps({
106
- 'max_restarts_on_errors': 0,
107
- }))
108
- db_utils.add_column_to_table(cursor, conn, 'spot', 'local_log_file',
109
- 'TEXT DEFAULT NULL')
110
-
111
- # `job_info` contains the mapping from job_id to the job_name, as well as
112
- # information used by the scheduler.
113
- cursor.execute("""\
114
- CREATE TABLE IF NOT EXISTS job_info (
115
- spot_job_id INTEGER PRIMARY KEY AUTOINCREMENT,
116
- name TEXT,
117
- schedule_state TEXT,
118
- controller_pid INTEGER DEFAULT NULL,
119
- dag_yaml_path TEXT,
120
- env_file_path TEXT,
121
- user_hash TEXT)""")
122
-
123
- db_utils.add_column_to_table(cursor, conn, 'job_info', 'schedule_state',
124
- 'TEXT')
125
-
126
- db_utils.add_column_to_table(cursor, conn, 'job_info', 'controller_pid',
127
- 'INTEGER DEFAULT NULL')
128
-
129
- db_utils.add_column_to_table(cursor, conn, 'job_info', 'dag_yaml_path',
130
- 'TEXT')
131
-
132
- db_utils.add_column_to_table(cursor, conn, 'job_info', 'env_file_path',
133
- 'TEXT')
134
-
135
- db_utils.add_column_to_table(cursor, conn, 'job_info', 'user_hash', 'TEXT')
136
-
137
- conn.commit()
138
-
139
-
140
- # Module-level connection/cursor; thread-safe as the module is only imported
141
- # once.
142
- def _get_db_path() -> str:
143
- """Workaround to collapse multi-step Path ops for type checker.
144
- Ensures _DB_PATH is str, avoiding Union[Path, str] inference.
145
- """
146
- path = pathlib.Path('~/.sky/spot_jobs.db')
147
- path = path.expanduser().absolute()
148
- path.parents[0].mkdir(parents=True, exist_ok=True)
149
- return str(path)
166
+ migration_utils.safe_alembic_upgrade(engine,
167
+ migration_utils.SPOT_JOBS_DB_NAME,
168
+ migration_utils.SPOT_JOBS_VERSION)
169
+
150
170
 
171
+ def force_no_postgres() -> bool:
172
+ """Force no postgres.
173
+
174
+ If the db is localhost on the api server, and we are not in consolidation
175
+ mode, we must force using sqlite and not using the api server on the jobs
176
+ controller.
177
+ """
178
+ conn_string = skypilot_config.get_nested(('db',), None)
179
+
180
+ if conn_string:
181
+ parsed = urllib.parse.urlparse(conn_string)
182
+ # it freezes if we use the normal get_consolidation_mode function
183
+ consolidation_mode = skypilot_config.get_nested(
184
+ ('jobs', 'controller', 'consolidation_mode'), default_value=False)
185
+ if ((parsed.hostname == 'localhost' or
186
+ ipaddress.ip_address(parsed.hostname).is_loopback) and
187
+ not consolidation_mode):
188
+ return True
189
+ return False
190
+
191
+
192
+ def initialize_and_get_db_async() -> sql_async.AsyncEngine:
193
+ global _SQLALCHEMY_ENGINE_ASYNC
194
+ if _SQLALCHEMY_ENGINE_ASYNC is not None:
195
+ return _SQLALCHEMY_ENGINE_ASYNC
196
+ with _SQLALCHEMY_ENGINE_LOCK:
197
+ if _SQLALCHEMY_ENGINE_ASYNC is not None:
198
+ return _SQLALCHEMY_ENGINE_ASYNC
199
+
200
+ _SQLALCHEMY_ENGINE_ASYNC = db_utils.get_engine('spot_jobs',
201
+ async_engine=True)
202
+
203
+ # to create the table in case an async function gets called first
204
+ initialize_and_get_db()
205
+ return _SQLALCHEMY_ENGINE_ASYNC
206
+
207
+
208
+ # We wrap the sqlalchemy engine initialization in a thread
209
+ # lock to ensure that multiple threads do not initialize the
210
+ # engine which could result in a rare race condition where
211
+ # a session has already been created with _SQLALCHEMY_ENGINE = e1,
212
+ # and then another thread overwrites _SQLALCHEMY_ENGINE = e2
213
+ # which could result in e1 being garbage collected unexpectedly.
214
+ def initialize_and_get_db() -> sqlalchemy.engine.Engine:
215
+ global _SQLALCHEMY_ENGINE
216
+ if _SQLALCHEMY_ENGINE is not None:
217
+ return _SQLALCHEMY_ENGINE
218
+
219
+ with _SQLALCHEMY_ENGINE_LOCK:
220
+ if _SQLALCHEMY_ENGINE is not None:
221
+ return _SQLALCHEMY_ENGINE
222
+ # get an engine to the db
223
+ engine = db_utils.get_engine('spot_jobs')
224
+
225
+ # run migrations if needed
226
+ create_table(engine)
227
+
228
+ # return engine
229
+ _SQLALCHEMY_ENGINE = engine
230
+ return _SQLALCHEMY_ENGINE
231
+
232
+
233
+ def _init_db_async(func):
234
+ """Initialize the async database. Add backoff to the function call."""
235
+
236
+ @functools.wraps(func)
237
+ async def wrapper(*args, **kwargs):
238
+ if _SQLALCHEMY_ENGINE_ASYNC is None:
239
+ # this may happen multiple times since there is no locking
240
+ # here but thats fine, this is just a short circuit for the
241
+ # common case.
242
+ await context_utils.to_thread(initialize_and_get_db_async)
243
+
244
+ backoff = common_utils.Backoff(initial_backoff=1, max_backoff_factor=5)
245
+ last_exc = None
246
+ for _ in range(_DB_RETRY_TIMES):
247
+ try:
248
+ return await func(*args, **kwargs)
249
+ except (sqlalchemy_exc.OperationalError,
250
+ asyncio.exceptions.TimeoutError, OSError,
251
+ sqlalchemy_exc.TimeoutError, sqlite3.OperationalError,
252
+ sqlalchemy_exc.InterfaceError, sqlite3.InterfaceError) as e:
253
+ last_exc = e
254
+ logger.debug(f'DB error: {last_exc}')
255
+ await asyncio.sleep(backoff.current_backoff())
256
+ assert last_exc is not None
257
+ raise last_exc
258
+
259
+ return wrapper
260
+
261
+
262
+ def _init_db(func):
263
+ """Initialize the database. Add backoff to the function call."""
264
+
265
+ @functools.wraps(func)
266
+ def wrapper(*args, **kwargs):
267
+ if _SQLALCHEMY_ENGINE is None:
268
+ # this may happen multiple times since there is no locking
269
+ # here but thats fine, this is just a short circuit for the
270
+ # common case.
271
+ initialize_and_get_db()
272
+
273
+ backoff = common_utils.Backoff(initial_backoff=1, max_backoff_factor=10)
274
+ last_exc = None
275
+ for _ in range(_DB_RETRY_TIMES):
276
+ try:
277
+ return func(*args, **kwargs)
278
+ except (sqlalchemy_exc.OperationalError,
279
+ asyncio.exceptions.TimeoutError, OSError,
280
+ sqlalchemy_exc.TimeoutError, sqlite3.OperationalError,
281
+ sqlalchemy_exc.InterfaceError, sqlite3.InterfaceError) as e:
282
+ last_exc = e
283
+ logger.debug(f'DB error: {last_exc}')
284
+ time.sleep(backoff.current_backoff())
285
+ assert last_exc is not None
286
+ raise last_exc
287
+
288
+ return wrapper
289
+
290
+
291
+ async def _describe_task_transition_failure(session: sql_async.AsyncSession,
292
+ job_id: int, task_id: int) -> str:
293
+ """Return a human-readable description when a task transition fails."""
294
+ details = 'Couldn\'t fetch the task details.'
295
+ try:
296
+ debug_result = await session.execute(
297
+ sqlalchemy.select(spot_table.c.status, spot_table.c.end_at).where(
298
+ sqlalchemy.and_(spot_table.c.spot_job_id == job_id,
299
+ spot_table.c.task_id == task_id)))
300
+ rows = debug_result.mappings().all()
301
+ details = (f'{len(rows)} rows matched job {job_id} and task '
302
+ f'{task_id}.')
303
+ for row in rows:
304
+ status = row['status']
305
+ end_at = row['end_at']
306
+ details += f' Status: {status}, End time: {end_at}.'
307
+ except Exception as exc: # pylint: disable=broad-except
308
+ details += f' Error fetching task details: {exc}'
309
+ return details
151
310
 
152
- _DB_PATH = _get_db_path()
153
- db_utils.SQLiteConn(_DB_PATH, create_table)
154
311
 
155
312
  # job_duration is the time a job actually runs (including the
156
313
  # setup duration) before last_recover, excluding the provision
@@ -164,33 +321,52 @@ db_utils.SQLiteConn(_DB_PATH, create_table)
164
321
  # e.g., via sky jobs queue. These may not correspond to actual
165
322
  # column names in the DB and it corresponds to the combined view
166
323
  # by joining the spot and job_info tables.
167
- columns = [
168
- '_job_id',
169
- '_task_name',
170
- 'resources',
171
- 'submitted_at',
172
- 'status',
173
- 'run_timestamp',
174
- 'start_at',
175
- 'end_at',
176
- 'last_recovered_at',
177
- 'recovery_count',
178
- 'job_duration',
179
- 'failure_reason',
180
- 'job_id',
181
- 'task_id',
182
- 'task_name',
183
- 'specs',
184
- 'local_log_file',
185
- # columns from the job_info table
186
- '_job_info_job_id', # This should be the same as job_id
187
- 'job_name',
188
- 'schedule_state',
189
- 'controller_pid',
190
- 'dag_yaml_path',
191
- 'env_file_path',
192
- 'user_hash',
193
- ]
324
+ def _get_jobs_dict(r: 'row.RowMapping') -> Dict[str, Any]:
325
+ # WARNING: If you update these you may also need to update GetJobTable in
326
+ # the skylet ManagedJobsServiceImpl.
327
+ return {
328
+ '_job_id': r.get('job_id'), # from spot table
329
+ '_task_name': r.get('job_name'), # deprecated, from spot table
330
+ 'resources': r.get('resources'),
331
+ 'submitted_at': r.get('submitted_at'),
332
+ 'status': r.get('status'),
333
+ 'run_timestamp': r.get('run_timestamp'),
334
+ 'start_at': r.get('start_at'),
335
+ 'end_at': r.get('end_at'),
336
+ 'last_recovered_at': r.get('last_recovered_at'),
337
+ 'recovery_count': r.get('recovery_count'),
338
+ 'job_duration': r.get('job_duration'),
339
+ 'failure_reason': r.get('failure_reason'),
340
+ 'job_id': r.get(spot_table.c.spot_job_id
341
+ ), # ambiguous, use table.column
342
+ 'task_id': r.get('task_id'),
343
+ 'task_name': r.get('task_name'),
344
+ 'specs': r.get('specs'),
345
+ 'local_log_file': r.get('local_log_file'),
346
+ 'metadata': r.get('metadata'),
347
+ # columns from job_info table (some may be None for legacy jobs)
348
+ '_job_info_job_id': r.get(job_info_table.c.spot_job_id
349
+ ), # ambiguous, use table.column
350
+ 'job_name': r.get('name'), # from job_info table
351
+ 'schedule_state': r.get('schedule_state'),
352
+ 'controller_pid': r.get('controller_pid'),
353
+ # the _path columns are for backwards compatibility, use the _content
354
+ # columns instead
355
+ 'dag_yaml_path': r.get('dag_yaml_path'),
356
+ 'env_file_path': r.get('env_file_path'),
357
+ 'dag_yaml_content': r.get('dag_yaml_content'),
358
+ 'env_file_content': r.get('env_file_content'),
359
+ 'user_hash': r.get('user_hash'),
360
+ 'workspace': r.get('workspace'),
361
+ 'priority': r.get('priority'),
362
+ 'entrypoint': r.get('entrypoint'),
363
+ 'original_user_yaml_path': r.get('original_user_yaml_path'),
364
+ 'original_user_yaml_content': r.get('original_user_yaml_content'),
365
+ 'pool': r.get('pool'),
366
+ 'current_cluster_name': r.get('current_cluster_name'),
367
+ 'job_id_on_pool_cluster': r.get('job_id_on_pool_cluster'),
368
+ 'pool_hash': r.get('pool_hash'),
369
+ }
194
370
 
195
371
 
196
372
  class ManagedJobStatus(enum.Enum):
@@ -206,7 +382,7 @@ class ManagedJobStatus(enum.Enum):
206
382
  reset to INIT or SETTING_UP multiple times (depending on the preemptions).
207
383
 
208
384
  However, a managed job only has one ManagedJobStatus on the jobs controller.
209
- ManagedJobStatus = [PENDING, SUBMITTED, STARTING, RUNNING, ...]
385
+ ManagedJobStatus = [PENDING, STARTING, RUNNING, ...]
210
386
  Mapping from JobStatus to ManagedJobStatus:
211
387
  INIT -> STARTING/RECOVERING
212
388
  SETTING_UP -> RUNNING
@@ -226,10 +402,14 @@ class ManagedJobStatus(enum.Enum):
226
402
  # PENDING: Waiting for the jobs controller to have a slot to run the
227
403
  # controller process.
228
404
  PENDING = 'PENDING'
405
+ # SUBMITTED: This state used to be briefly set before immediately changing
406
+ # to STARTING. Its use was removed in #5682. We keep it for backwards
407
+ # compatibility, so we can still parse old jobs databases that may have jobs
408
+ # in this state.
409
+ # TODO(cooperc): remove this in v0.12.0
410
+ DEPRECATED_SUBMITTED = 'SUBMITTED'
229
411
  # The submitted_at timestamp of the managed job in the 'spot' table will be
230
412
  # set to the time when the job controller begins running.
231
- # SUBMITTED: The jobs controller starts the controller process.
232
- SUBMITTED = 'SUBMITTED'
233
413
  # STARTING: The controller process is launching the cluster for the managed
234
414
  # job.
235
415
  STARTING = 'STARTING'
@@ -302,10 +482,88 @@ class ManagedJobStatus(enum.Enum):
302
482
  cls.FAILED_NO_RESOURCE, cls.FAILED_CONTROLLER
303
483
  ]
304
484
 
485
+ @classmethod
486
+ def processing_statuses(cls) -> List['ManagedJobStatus']:
487
+ # Any status that is not terminal and is not CANCELLING.
488
+ return [
489
+ cls.PENDING,
490
+ cls.STARTING,
491
+ cls.RUNNING,
492
+ cls.RECOVERING,
493
+ ]
494
+
495
+ @classmethod
496
+ def from_protobuf(
497
+ cls, protobuf_value: 'managed_jobsv1_pb2.ManagedJobStatus'
498
+ ) -> Optional['ManagedJobStatus']:
499
+ """Convert protobuf ManagedJobStatus enum to Python enum value."""
500
+ protobuf_to_enum = {
501
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_UNSPECIFIED: None,
502
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_PENDING: cls.PENDING,
503
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_SUBMITTED:
504
+ cls.DEPRECATED_SUBMITTED,
505
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_STARTING: cls.STARTING,
506
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_RUNNING: cls.RUNNING,
507
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_SUCCEEDED: cls.SUCCEEDED,
508
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED: cls.FAILED,
509
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_CONTROLLER:
510
+ cls.FAILED_CONTROLLER,
511
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_SETUP:
512
+ cls.FAILED_SETUP,
513
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_CANCELLED: cls.CANCELLED,
514
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_RECOVERING: cls.RECOVERING,
515
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_CANCELLING: cls.CANCELLING,
516
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_PRECHECKS:
517
+ cls.FAILED_PRECHECKS,
518
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_NO_RESOURCE:
519
+ cls.FAILED_NO_RESOURCE,
520
+ }
521
+
522
+ if protobuf_value not in protobuf_to_enum:
523
+ raise ValueError(
524
+ f'Unknown protobuf ManagedJobStatus value: {protobuf_value}')
525
+
526
+ return protobuf_to_enum[protobuf_value]
527
+
528
+ def to_protobuf(self) -> 'managed_jobsv1_pb2.ManagedJobStatus':
529
+ """Convert this Python enum value to protobuf enum value."""
530
+ enum_to_protobuf = {
531
+ ManagedJobStatus.PENDING:
532
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_PENDING,
533
+ ManagedJobStatus.DEPRECATED_SUBMITTED:
534
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_SUBMITTED,
535
+ ManagedJobStatus.STARTING:
536
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_STARTING,
537
+ ManagedJobStatus.RUNNING:
538
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_RUNNING,
539
+ ManagedJobStatus.SUCCEEDED:
540
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_SUCCEEDED,
541
+ ManagedJobStatus.FAILED:
542
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED,
543
+ ManagedJobStatus.FAILED_CONTROLLER:
544
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_CONTROLLER,
545
+ ManagedJobStatus.FAILED_SETUP:
546
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_SETUP,
547
+ ManagedJobStatus.CANCELLED:
548
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_CANCELLED,
549
+ ManagedJobStatus.RECOVERING:
550
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_RECOVERING,
551
+ ManagedJobStatus.CANCELLING:
552
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_CANCELLING,
553
+ ManagedJobStatus.FAILED_PRECHECKS:
554
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_PRECHECKS,
555
+ ManagedJobStatus.FAILED_NO_RESOURCE:
556
+ managed_jobsv1_pb2.MANAGED_JOB_STATUS_FAILED_NO_RESOURCE,
557
+ }
558
+
559
+ if self not in enum_to_protobuf:
560
+ raise ValueError(f'Unknown ManagedJobStatus value: {self}')
561
+
562
+ return enum_to_protobuf[self]
563
+
305
564
 
306
565
  _SPOT_STATUS_TO_COLOR = {
307
566
  ManagedJobStatus.PENDING: colorama.Fore.BLUE,
308
- ManagedJobStatus.SUBMITTED: colorama.Fore.BLUE,
309
567
  ManagedJobStatus.STARTING: colorama.Fore.BLUE,
310
568
  ManagedJobStatus.RUNNING: colorama.Fore.GREEN,
311
569
  ManagedJobStatus.RECOVERING: colorama.Fore.CYAN,
@@ -317,6 +575,8 @@ _SPOT_STATUS_TO_COLOR = {
317
575
  ManagedJobStatus.FAILED_CONTROLLER: colorama.Fore.RED,
318
576
  ManagedJobStatus.CANCELLING: colorama.Fore.YELLOW,
319
577
  ManagedJobStatus.CANCELLED: colorama.Fore.YELLOW,
578
+ # TODO(cooperc): backwards compatibility, remove this in v0.12.0
579
+ ManagedJobStatus.DEPRECATED_SUBMITTED: colorama.Fore.BLUE,
320
580
  }
321
581
 
322
582
 
@@ -333,8 +593,12 @@ class ManagedJobScheduleState(enum.Enum):
333
593
  - LAUNCHING -> ALIVE: The launch attempt was completed. It may have
334
594
  succeeded or failed. The job controller is not allowed to sky.launch again
335
595
  without transitioning to ALIVE_WAITING and then LAUNCHING.
596
+ - LAUNCHING -> ALIVE_BACKOFF: The launch failed to find resources, and is
597
+ in backoff waiting for resources.
336
598
  - ALIVE -> ALIVE_WAITING: The job controller wants to sky.launch again,
337
599
  either for recovery or to launch a subsequent task.
600
+ - ALIVE_BACKOFF -> ALIVE_WAITING: The backoff period has ended, and the job
601
+ controller wants to try to launch again.
338
602
  - ALIVE_WAITING -> LAUNCHING: The scheduler has determined that the job
339
603
  controller may launch again.
340
604
  - LAUNCHING, ALIVE, or ALIVE_WAITING -> DONE: The job controller is exiting
@@ -348,6 +612,7 @@ class ManagedJobScheduleState(enum.Enum):
348
612
  state or vice versa. (In fact, schedule state is defined on the job and
349
613
  status on the task.)
350
614
  - INACTIVE or WAITING should only be seen when a job is PENDING.
615
+ - ALIVE_BACKOFF should only be seen when a job is STARTING.
351
616
  - ALIVE_WAITING should only be seen when a job is RECOVERING, has multiple
352
617
  tasks, or needs to retry launching.
353
618
  - LAUNCHING and ALIVE can be seen in many different statuses.
@@ -359,6 +624,10 @@ class ManagedJobScheduleState(enum.Enum):
359
624
  # This job may have been created before scheduler was introduced in #4458.
360
625
  # This state is not used by scheduler but just for backward compatibility.
361
626
  # TODO(cooperc): remove this in v0.11.0
627
+ # TODO(luca): the only states we need are INACTIVE, WAITING, ALIVE, and
628
+ # DONE. ALIVE = old LAUNCHING + ALIVE + ALIVE_BACKOFF + ALIVE_WAITING and
629
+ # will represent jobs that are claimed by a controller. Delete the rest
630
+ # in v0.13.0
362
631
  INVALID = None
363
632
  # The job should be ignored by the scheduler.
364
633
  INACTIVE = 'INACTIVE'
@@ -373,194 +642,205 @@ class ManagedJobScheduleState(enum.Enum):
373
642
  # The job is running sky.launch, or soon will, using a limited number of
374
643
  # allowed launch slots.
375
644
  LAUNCHING = 'LAUNCHING'
645
+ # The job is alive, but is in backoff waiting for resources - a special case
646
+ # of ALIVE.
647
+ ALIVE_BACKOFF = 'ALIVE_BACKOFF'
376
648
  # The controller for the job is running, but it's not currently launching.
377
649
  ALIVE = 'ALIVE'
378
650
  # The job is in a terminal state. (Not necessarily SUCCEEDED.)
379
651
  DONE = 'DONE'
380
652
 
653
+ @classmethod
654
+ def from_protobuf(
655
+ cls, protobuf_value: 'managed_jobsv1_pb2.ManagedJobScheduleState'
656
+ ) -> Optional['ManagedJobScheduleState']:
657
+ """Convert protobuf ManagedJobScheduleState enum to Python enum value.
658
+ """
659
+ protobuf_to_enum = {
660
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_UNSPECIFIED: None,
661
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_INVALID: cls.INVALID,
662
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_INACTIVE:
663
+ cls.INACTIVE,
664
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_WAITING: cls.WAITING,
665
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE_WAITING:
666
+ cls.ALIVE_WAITING,
667
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_LAUNCHING:
668
+ cls.LAUNCHING,
669
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE_BACKOFF:
670
+ cls.ALIVE_BACKOFF,
671
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE: cls.ALIVE,
672
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_DONE: cls.DONE,
673
+ }
674
+
675
+ if protobuf_value not in protobuf_to_enum:
676
+ raise ValueError('Unknown protobuf ManagedJobScheduleState value: '
677
+ f'{protobuf_value}')
678
+
679
+ return protobuf_to_enum[protobuf_value]
680
+
681
+ def to_protobuf(self) -> 'managed_jobsv1_pb2.ManagedJobScheduleState':
682
+ """Convert this Python enum value to protobuf enum value."""
683
+ enum_to_protobuf = {
684
+ ManagedJobScheduleState.INVALID:
685
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_INVALID,
686
+ ManagedJobScheduleState.INACTIVE:
687
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_INACTIVE,
688
+ ManagedJobScheduleState.WAITING:
689
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_WAITING,
690
+ ManagedJobScheduleState.ALIVE_WAITING:
691
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE_WAITING,
692
+ ManagedJobScheduleState.LAUNCHING:
693
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_LAUNCHING,
694
+ ManagedJobScheduleState.ALIVE_BACKOFF:
695
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE_BACKOFF,
696
+ ManagedJobScheduleState.ALIVE:
697
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_ALIVE,
698
+ ManagedJobScheduleState.DONE:
699
+ managed_jobsv1_pb2.MANAGED_JOB_SCHEDULE_STATE_DONE,
700
+ }
701
+
702
+ if self not in enum_to_protobuf:
703
+ raise ValueError(f'Unknown ManagedJobScheduleState value: {self}')
704
+
705
+ return enum_to_protobuf[self]
706
+
381
707
 
382
708
  # === Status transition functions ===
383
- def set_job_info(job_id: int, name: str):
384
- with db_utils.safe_cursor(_DB_PATH) as cursor:
385
- cursor.execute(
386
- """\
387
- INSERT INTO job_info
388
- (spot_job_id, name, schedule_state)
389
- VALUES (?, ?, ?)""",
390
- (job_id, name, ManagedJobScheduleState.INACTIVE.value))
709
+ @_init_db
710
+ def set_job_info_without_job_id(name: str, workspace: str, entrypoint: str,
711
+ pool: Optional[str], pool_hash: Optional[str],
712
+ user_hash: Optional[str]) -> int:
713
+ assert _SQLALCHEMY_ENGINE is not None
714
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
715
+ if (_SQLALCHEMY_ENGINE.dialect.name ==
716
+ db_utils.SQLAlchemyDialect.SQLITE.value):
717
+ insert_func = sqlite.insert
718
+ elif (_SQLALCHEMY_ENGINE.dialect.name ==
719
+ db_utils.SQLAlchemyDialect.POSTGRESQL.value):
720
+ insert_func = postgresql.insert
721
+ else:
722
+ raise ValueError('Unsupported database dialect')
723
+
724
+ insert_stmt = insert_func(job_info_table).values(
725
+ name=name,
726
+ schedule_state=ManagedJobScheduleState.INACTIVE.value,
727
+ workspace=workspace,
728
+ entrypoint=entrypoint,
729
+ pool=pool,
730
+ pool_hash=pool_hash,
731
+ user_hash=user_hash,
732
+ )
391
733
 
734
+ if (_SQLALCHEMY_ENGINE.dialect.name ==
735
+ db_utils.SQLAlchemyDialect.SQLITE.value):
736
+ result = session.execute(insert_stmt)
737
+ ret = result.lastrowid
738
+ session.commit()
739
+ return ret
740
+ elif (_SQLALCHEMY_ENGINE.dialect.name ==
741
+ db_utils.SQLAlchemyDialect.POSTGRESQL.value):
742
+ result = session.execute(
743
+ insert_stmt.returning(job_info_table.c.spot_job_id))
744
+ ret = result.scalar()
745
+ session.commit()
746
+ return ret
747
+ else:
748
+ raise ValueError('Unsupported database dialect')
392
749
 
393
- def set_pending(job_id: int, task_id: int, task_name: str, resources_str: str):
394
- """Set the task to pending state."""
395
- with db_utils.safe_cursor(_DB_PATH) as cursor:
396
- cursor.execute(
397
- """\
398
- INSERT INTO spot
399
- (spot_job_id, task_id, task_name, resources, status)
400
- VALUES (?, ?, ?, ?, ?)""",
401
- (job_id, task_id, task_name, resources_str,
402
- ManagedJobStatus.PENDING.value))
403
-
404
-
405
- def set_submitted(job_id: int, task_id: int, run_timestamp: str,
406
- submit_time: float, resources_str: str,
407
- specs: Dict[str, Union[str,
408
- int]], callback_func: CallbackType):
409
- """Set the task to submitted.
410
750
 
411
- Args:
412
- job_id: The managed job ID.
413
- task_id: The task ID.
414
- run_timestamp: The run_timestamp of the run. This will be used to
415
- determine the log directory of the managed task.
416
- submit_time: The time when the managed task is submitted.
417
- resources_str: The resources string of the managed task.
418
- specs: The specs of the managed task.
419
- callback_func: The callback function.
751
+ @_init_db
752
+ def set_pending(
753
+ job_id: int,
754
+ task_id: int,
755
+ task_name: str,
756
+ resources_str: str,
757
+ metadata: str,
758
+ ):
759
+ """Set the task to pending state."""
760
+ assert _SQLALCHEMY_ENGINE is not None
761
+
762
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
763
+ session.execute(
764
+ sqlalchemy.insert(spot_table).values(
765
+ spot_job_id=job_id,
766
+ task_id=task_id,
767
+ task_name=task_name,
768
+ resources=resources_str,
769
+ metadata=metadata,
770
+ status=ManagedJobStatus.PENDING.value,
771
+ ))
772
+ session.commit()
773
+
774
+
775
+ @_init_db_async
776
+ async def set_backoff_pending_async(job_id: int, task_id: int):
777
+ """Set the task to PENDING state if it is in backoff.
778
+
779
+ This should only be used to transition from STARTING or RECOVERING back to
780
+ PENDING.
420
781
  """
421
- # Use the timestamp in the `run_timestamp` ('sky-2022-10...'), to make
422
- # the log directory and submission time align with each other, so as to
423
- # make it easier to find them based on one of the values.
424
- # Also, using the earlier timestamp should be closer to the term
425
- # `submit_at`, which represents the time the managed task is submitted.
426
- with db_utils.safe_cursor(_DB_PATH) as cursor:
427
- cursor.execute(
428
- """\
429
- UPDATE spot SET
430
- resources=(?),
431
- submitted_at=(?),
432
- status=(?),
433
- run_timestamp=(?),
434
- specs=(?)
435
- WHERE spot_job_id=(?) AND
436
- task_id=(?) AND
437
- status=(?) AND
438
- end_at IS null""",
439
- (resources_str, submit_time, ManagedJobStatus.SUBMITTED.value,
440
- run_timestamp, json.dumps(specs), job_id, task_id,
441
- ManagedJobStatus.PENDING.value))
442
- if cursor.rowcount != 1:
443
- raise exceptions.ManagedJobStatusError(
444
- f'Failed to set the task to submitted. '
445
- f'({cursor.rowcount} rows updated)')
446
- callback_func('SUBMITTED')
447
-
448
-
449
- def set_starting(job_id: int, task_id: int, callback_func: CallbackType):
450
- """Set the task to starting state."""
451
- logger.info('Launching the spot cluster...')
452
- with db_utils.safe_cursor(_DB_PATH) as cursor:
453
- cursor.execute(
454
- """\
455
- UPDATE spot SET status=(?)
456
- WHERE spot_job_id=(?) AND
457
- task_id=(?) AND
458
- status=(?) AND
459
- end_at IS null""", (ManagedJobStatus.STARTING.value, job_id,
460
- task_id, ManagedJobStatus.SUBMITTED.value))
461
- if cursor.rowcount != 1:
462
- raise exceptions.ManagedJobStatusError(
463
- f'Failed to set the task to starting. '
464
- f'({cursor.rowcount} rows updated)')
465
- callback_func('STARTING')
466
-
467
-
468
- def set_started(job_id: int, task_id: int, start_time: float,
469
- callback_func: CallbackType):
470
- """Set the task to started state."""
471
- logger.info('Job started.')
472
- with db_utils.safe_cursor(_DB_PATH) as cursor:
473
- cursor.execute(
474
- """\
475
- UPDATE spot SET status=(?), start_at=(?), last_recovered_at=(?)
476
- WHERE spot_job_id=(?) AND
477
- task_id=(?) AND
478
- status IN (?, ?) AND
479
- end_at IS null""",
480
- (
481
- ManagedJobStatus.RUNNING.value,
482
- start_time,
483
- start_time,
484
- job_id,
485
- task_id,
486
- ManagedJobStatus.STARTING.value,
487
- # If the task is empty, we will jump straight from PENDING to
488
- # RUNNING
489
- ManagedJobStatus.PENDING.value,
490
- ),
782
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
783
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
784
+ result = await session.execute(
785
+ sqlalchemy.update(spot_table).where(
786
+ sqlalchemy.and_(
787
+ spot_table.c.spot_job_id == job_id,
788
+ spot_table.c.task_id == task_id,
789
+ spot_table.c.status.in_([
790
+ ManagedJobStatus.STARTING.value,
791
+ ManagedJobStatus.RECOVERING.value
792
+ ]),
793
+ spot_table.c.end_at.is_(None),
794
+ )).values({spot_table.c.status: ManagedJobStatus.PENDING.value})
491
795
  )
492
- if cursor.rowcount != 1:
493
- raise exceptions.ManagedJobStatusError(
494
- f'Failed to set the task to started. '
495
- f'({cursor.rowcount} rows updated)')
496
- callback_func('STARTED')
497
-
498
-
499
- def set_recovering(job_id: int, task_id: int, callback_func: CallbackType):
500
- """Set the task to recovering state, and update the job duration."""
501
- logger.info('=== Recovering... ===')
502
- with db_utils.safe_cursor(_DB_PATH) as cursor:
503
- cursor.execute(
504
- """\
505
- UPDATE spot SET
506
- status=(?), job_duration=job_duration+(?)-last_recovered_at
507
- WHERE spot_job_id=(?) AND
508
- task_id=(?) AND
509
- status=(?) AND
510
- end_at IS null""",
511
- (ManagedJobStatus.RECOVERING.value, time.time(), job_id, task_id,
512
- ManagedJobStatus.RUNNING.value))
513
- if cursor.rowcount != 1:
514
- raise exceptions.ManagedJobStatusError(
515
- f'Failed to set the task to recovering. '
516
- f'({cursor.rowcount} rows updated)')
517
- callback_func('RECOVERING')
518
-
519
-
520
- def set_recovered(job_id: int, task_id: int, recovered_time: float,
521
- callback_func: CallbackType):
522
- """Set the task to recovered."""
523
- with db_utils.safe_cursor(_DB_PATH) as cursor:
524
- cursor.execute(
525
- """\
526
- UPDATE spot SET
527
- status=(?), last_recovered_at=(?), recovery_count=recovery_count+1
528
- WHERE spot_job_id=(?) AND
529
- task_id=(?) AND
530
- status=(?) AND
531
- end_at IS null""",
532
- (ManagedJobStatus.RUNNING.value, recovered_time, job_id, task_id,
533
- ManagedJobStatus.RECOVERING.value))
534
- if cursor.rowcount != 1:
535
- raise exceptions.ManagedJobStatusError(
536
- f'Failed to set the task to recovered. '
537
- f'({cursor.rowcount} rows updated)')
538
- logger.info('==== Recovered. ====')
539
- callback_func('RECOVERED')
540
-
541
-
542
- def set_succeeded(job_id: int, task_id: int, end_time: float,
543
- callback_func: CallbackType):
544
- """Set the task to succeeded, if it is in a non-terminal state."""
545
- with db_utils.safe_cursor(_DB_PATH) as cursor:
546
- cursor.execute(
547
- """\
548
- UPDATE spot SET
549
- status=(?), end_at=(?)
550
- WHERE spot_job_id=(?) AND
551
- task_id=(?) AND
552
- status=(?) AND
553
- end_at IS null""",
554
- (ManagedJobStatus.SUCCEEDED.value, end_time, job_id, task_id,
555
- ManagedJobStatus.RUNNING.value))
556
- if cursor.rowcount != 1:
557
- raise exceptions.ManagedJobStatusError(
558
- f'Failed to set the task to succeeded. '
559
- f'({cursor.rowcount} rows updated)')
560
- callback_func('SUCCEEDED')
561
- logger.info('Job succeeded.')
562
-
563
-
796
+ count = result.rowcount
797
+ await session.commit()
798
+ if count != 1:
799
+ details = await _describe_task_transition_failure(
800
+ session, job_id, task_id)
801
+ message = ('Failed to set the task back to pending. '
802
+ f'({count} rows updated. {details})')
803
+ logger.error(message)
804
+ raise exceptions.ManagedJobStatusError(message)
805
+ # Do not call callback_func here, as we don't use the callback for PENDING.
806
+
807
+
808
+ @_init_db
809
+ async def set_restarting_async(job_id: int, task_id: int, recovering: bool):
810
+ """Set the task back to STARTING or RECOVERING from PENDING.
811
+
812
+ This should not be used for the initial transition from PENDING to STARTING.
813
+ In that case, use set_starting instead. This function should only be used
814
+ after using set_backoff_pending to transition back to PENDING during
815
+ launch retry backoff.
816
+ """
817
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
818
+ target_status = ManagedJobStatus.STARTING.value
819
+ if recovering:
820
+ target_status = ManagedJobStatus.RECOVERING.value
821
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
822
+ result = await session.execute(
823
+ sqlalchemy.update(spot_table).where(
824
+ sqlalchemy.and_(
825
+ spot_table.c.spot_job_id == job_id,
826
+ spot_table.c.task_id == task_id,
827
+ spot_table.c.end_at.is_(None),
828
+ )).values({spot_table.c.status: target_status}))
829
+ count = result.rowcount
830
+ await session.commit()
831
+ logger.debug(f'back to {target_status}')
832
+ if count != 1:
833
+ details = await _describe_task_transition_failure(
834
+ session, job_id, task_id)
835
+ message = (f'Failed to set the task back to {target_status}. '
836
+ f'({count} rows updated. {details})')
837
+ logger.error(message)
838
+ raise exceptions.ManagedJobStatusError(message)
839
+ # Do not call callback_func here, as it should only be invoked for the
840
+ # initial (pre-`set_backoff_pending`) transition to STARTING or RECOVERING.
841
+
842
+
843
+ @_init_db
564
844
  def set_failed(
565
845
  job_id: int,
566
846
  task_id: Optional[int],
@@ -585,188 +865,158 @@ def set_failed(
585
865
  override_terminal: If True, override the current status even if end_at
586
866
  is already set.
587
867
  """
868
+ assert _SQLALCHEMY_ENGINE is not None
588
869
  assert failure_type.is_failed(), failure_type
589
870
  end_time = time.time() if end_time is None else end_time
590
871
 
591
872
  fields_to_set: Dict[str, Any] = {
592
- 'status': failure_type.value,
593
- 'failure_reason': failure_reason,
873
+ spot_table.c.status: failure_type.value,
874
+ spot_table.c.failure_reason: failure_reason,
594
875
  }
595
- with db_utils.safe_cursor(_DB_PATH) as cursor:
596
- previous_status = cursor.execute(
597
- 'SELECT status FROM spot WHERE spot_job_id=(?)',
598
- (job_id,)).fetchone()[0]
876
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
877
+ # Get previous status
878
+ previous_status = session.execute(
879
+ sqlalchemy.select(spot_table.c.status).where(
880
+ spot_table.c.spot_job_id == job_id)).fetchone()[0]
599
881
  previous_status = ManagedJobStatus(previous_status)
600
882
  if previous_status == ManagedJobStatus.RECOVERING:
601
883
  # If the job is recovering, we should set the last_recovered_at to
602
884
  # the end_time, so that the end_at - last_recovered_at will not be
603
885
  # affect the job duration calculation.
604
- fields_to_set['last_recovered_at'] = end_time
605
- set_str = ', '.join(f'{k}=(?)' for k in fields_to_set)
606
- task_query_str = '' if task_id is None else 'AND task_id=(?)'
607
- task_value = [] if task_id is None else [
608
- task_id,
609
- ]
886
+ fields_to_set[spot_table.c.last_recovered_at] = end_time
887
+ where_conditions = [spot_table.c.spot_job_id == job_id]
888
+ if task_id is not None:
889
+ where_conditions.append(spot_table.c.task_id == task_id)
610
890
 
891
+ # Handle failure_reason prepending when override_terminal is True
611
892
  if override_terminal:
893
+ # Get existing failure_reason with row lock to prevent race
894
+ # conditions
895
+ existing_reason_result = session.execute(
896
+ sqlalchemy.select(spot_table.c.failure_reason).where(
897
+ sqlalchemy.and_(*where_conditions)).with_for_update())
898
+ existing_reason_row = existing_reason_result.fetchone()
899
+ if existing_reason_row and existing_reason_row[0]:
900
+ # Prepend new failure reason to existing one
901
+ fields_to_set[spot_table.c.failure_reason] = (
902
+ failure_reason + '. Previously: ' + existing_reason_row[0])
612
903
  # Use COALESCE for end_at to avoid overriding the existing end_at if
613
904
  # it's already set.
614
- cursor.execute(
615
- f"""\
616
- UPDATE spot SET
617
- end_at = COALESCE(end_at, ?),
618
- {set_str}
619
- WHERE spot_job_id=(?) {task_query_str}""",
620
- (end_time, *list(fields_to_set.values()), job_id, *task_value))
905
+ fields_to_set[spot_table.c.end_at] = sqlalchemy.func.coalesce(
906
+ spot_table.c.end_at, end_time)
621
907
  else:
622
- # Only set if end_at is null, i.e. the previous status is not
623
- # terminal.
624
- cursor.execute(
625
- f"""\
626
- UPDATE spot SET
627
- end_at = (?),
628
- {set_str}
629
- WHERE spot_job_id=(?) {task_query_str} AND end_at IS null""",
630
- (end_time, *list(fields_to_set.values()), job_id, *task_value))
631
-
632
- updated = cursor.rowcount > 0
908
+ fields_to_set[spot_table.c.end_at] = end_time
909
+ where_conditions.append(spot_table.c.end_at.is_(None))
910
+ count = session.query(spot_table).filter(
911
+ sqlalchemy.and_(*where_conditions)).update(fields_to_set)
912
+ session.commit()
913
+ updated = count > 0
633
914
  if callback_func and updated:
634
915
  callback_func('FAILED')
635
916
  logger.info(failure_reason)
636
917
 
637
918
 
638
- def set_cancelling(job_id: int, callback_func: CallbackType):
639
- """Set tasks in the job as cancelling, if they are in non-terminal states.
640
-
641
- task_id is not needed, because we expect the job should be cancelled
642
- as a whole, and we should not cancel a single task.
643
- """
644
- with db_utils.safe_cursor(_DB_PATH) as cursor:
645
- rows = cursor.execute(
646
- """\
647
- UPDATE spot SET
648
- status=(?)
649
- WHERE spot_job_id=(?) AND end_at IS null""",
650
- (ManagedJobStatus.CANCELLING.value, job_id))
651
- updated = rows.rowcount > 0
652
- if updated:
653
- logger.info('Cancelling the job...')
654
- callback_func('CANCELLING')
655
- else:
656
- logger.info('Cancellation skipped, job is already terminal')
657
-
919
+ @_init_db
920
+ def set_pending_cancelled(job_id: int):
921
+ """Set the job as pending cancelled, if it is in non-terminal states."""
922
+ assert _SQLALCHEMY_ENGINE is not None
923
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
924
+ # Subquery to get the spot_job_ids that match the joined condition
925
+ subquery = session.query(spot_table.c.job_id).join(
926
+ job_info_table,
927
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id
928
+ ).filter(
929
+ spot_table.c.spot_job_id == job_id,
930
+ spot_table.c.status == ManagedJobStatus.PENDING.value,
931
+ # Note: it's possible that a WAITING job actually needs to be
932
+ # cleaned up, if we are in the middle of an upgrade/recovery and
933
+ # the job is waiting to be reclaimed by a new controller. But,
934
+ # in this case the status will not be PENDING.
935
+ sqlalchemy.or_(
936
+ job_info_table.c.schedule_state ==
937
+ ManagedJobScheduleState.WAITING.value,
938
+ job_info_table.c.schedule_state ==
939
+ ManagedJobScheduleState.INACTIVE.value,
940
+ ),
941
+ ).subquery()
658
942
 
659
- def set_cancelled(job_id: int, callback_func: CallbackType):
660
- """Set tasks in the job as cancelled, if they are in CANCELLING state.
661
-
662
- The set_cancelling should be called before this function.
663
- """
664
- with db_utils.safe_cursor(_DB_PATH) as cursor:
665
- rows = cursor.execute(
666
- """\
667
- UPDATE spot SET
668
- status=(?), end_at=(?)
669
- WHERE spot_job_id=(?) AND status=(?)""",
670
- (ManagedJobStatus.CANCELLED.value, time.time(), job_id,
671
- ManagedJobStatus.CANCELLING.value))
672
- updated = rows.rowcount > 0
673
- if updated:
674
- logger.info('Job cancelled.')
675
- callback_func('CANCELLED')
676
- else:
677
- logger.info('Cancellation skipped, job is not CANCELLING')
943
+ count = session.query(spot_table).filter(
944
+ spot_table.c.job_id.in_(subquery)).update(
945
+ {spot_table.c.status: ManagedJobStatus.CANCELLED.value},
946
+ synchronize_session=False)
947
+ session.commit()
948
+ return count > 0
678
949
 
679
950
 
951
+ @_init_db
680
952
  def set_local_log_file(job_id: int, task_id: Optional[int],
681
953
  local_log_file: str):
682
954
  """Set the local log file for a job."""
683
- filter_str = 'spot_job_id=(?)'
684
- filter_args = [local_log_file, job_id]
685
- if task_id is not None:
686
- filter_str += ' AND task_id=(?)'
687
- filter_args.append(task_id)
688
- with db_utils.safe_cursor(_DB_PATH) as cursor:
689
- cursor.execute(
690
- 'UPDATE spot SET local_log_file=(?) '
691
- f'WHERE {filter_str}', filter_args)
955
+ assert _SQLALCHEMY_ENGINE is not None
956
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
957
+ where_conditions = [spot_table.c.spot_job_id == job_id]
958
+ if task_id is not None:
959
+ where_conditions.append(spot_table.c.task_id == task_id)
960
+ session.query(spot_table).filter(
961
+ sqlalchemy.and_(*where_conditions)).update(
962
+ {spot_table.c.local_log_file: local_log_file})
963
+ session.commit()
692
964
 
693
965
 
694
966
  # ======== utility functions ========
967
+ @_init_db
695
968
  def get_nonterminal_job_ids_by_name(name: Optional[str],
969
+ user_hash: Optional[str] = None,
696
970
  all_users: bool = False) -> List[int]:
697
- """Get non-terminal job ids by name."""
698
- statuses = ', '.join(['?'] * len(ManagedJobStatus.terminal_statuses()))
699
- field_values = [
700
- status.value for status in ManagedJobStatus.terminal_statuses()
701
- ]
702
-
703
- job_filter = ''
704
- if name is None and not all_users:
705
- job_filter += 'AND (job_info.user_hash=(?)) '
706
- field_values.append(common_utils.get_user_hash())
707
- if name is not None:
708
- # We match the job name from `job_info` for the jobs submitted after
709
- # #1982, and from `spot` for the jobs submitted before #1982, whose
710
- # job_info is not available.
711
- job_filter += ('AND (job_info.name=(?) OR '
712
- '(job_info.name IS NULL AND spot.task_name=(?))) ')
713
- field_values.extend([name, name])
714
-
715
- # Left outer join is used here instead of join, because the job_info does
716
- # not contain the managed jobs submitted before #1982.
717
- with db_utils.safe_cursor(_DB_PATH) as cursor:
718
- rows = cursor.execute(
719
- f"""\
720
- SELECT DISTINCT spot.spot_job_id
721
- FROM spot
722
- LEFT OUTER JOIN job_info
723
- ON spot.spot_job_id=job_info.spot_job_id
724
- WHERE status NOT IN
725
- ({statuses})
726
- {job_filter}
727
- ORDER BY spot.spot_job_id DESC""", field_values).fetchall()
728
- job_ids = [row[0] for row in rows if row[0] is not None]
729
- return job_ids
730
-
971
+ """Get non-terminal job ids by name.
731
972
 
732
- def get_schedule_live_jobs(job_id: Optional[int]) -> List[Dict[str, Any]]:
733
- """Get jobs from the database that have a live schedule_state.
734
-
735
- This should return job(s) that are not INACTIVE, WAITING, or DONE. So a
736
- returned job should correspond to a live job controller process, with one
737
- exception: the job may have just transitioned from WAITING to LAUNCHING, but
738
- the controller process has not yet started.
973
+ If name is None:
974
+ 1. if all_users is False, get for the given user_hash
975
+ 2. otherwise, get for all users
739
976
  """
740
- job_filter = '' if job_id is None else 'AND spot_job_id=(?)'
741
- job_value = (job_id,) if job_id is not None else ()
742
-
743
- # Join spot and job_info tables to get the job name for each task.
744
- # We use LEFT OUTER JOIN mainly for backward compatibility, as for an
745
- # existing controller before #1982, the job_info table may not exist,
746
- # and all the managed jobs created before will not present in the
747
- # job_info.
748
- with db_utils.safe_cursor(_DB_PATH) as cursor:
749
- rows = cursor.execute(
750
- f"""\
751
- SELECT spot_job_id, schedule_state, controller_pid
752
- FROM job_info
753
- WHERE schedule_state not in (?, ?, ?)
754
- {job_filter}
755
- ORDER BY spot_job_id DESC""",
756
- (ManagedJobScheduleState.INACTIVE.value,
757
- ManagedJobScheduleState.WAITING.value,
758
- ManagedJobScheduleState.DONE.value, *job_value)).fetchall()
759
- jobs = []
760
- for row in rows:
761
- job_dict = {
762
- 'job_id': row[0],
763
- 'schedule_state': ManagedJobScheduleState(row[1]),
764
- 'controller_pid': row[2],
765
- }
766
- jobs.append(job_dict)
767
- return jobs
977
+ assert _SQLALCHEMY_ENGINE is not None
978
+
979
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
980
+ # Build the query using SQLAlchemy core
981
+ query = sqlalchemy.select(
982
+ spot_table.c.spot_job_id.distinct()).select_from(
983
+ spot_table.outerjoin(
984
+ job_info_table,
985
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id,
986
+ ))
987
+ where_conditions = [
988
+ ~spot_table.c.status.in_([
989
+ status.value for status in ManagedJobStatus.terminal_statuses()
990
+ ])
991
+ ]
992
+ if name is None and not all_users:
993
+ if user_hash is None:
994
+ # For backwards compatibility. With codegen, USER_ID_ENV_VAR
995
+ # was set to the correct value by the jobs controller, as
996
+ # part of ManagedJobCodeGen._build(). This is no longer the
997
+ # case for the Skylet gRPC server, which is why we need to
998
+ # pass it explicitly through the request body.
999
+ logger.debug('user_hash is None, using current user hash')
1000
+ user_hash = common_utils.get_user_hash()
1001
+ where_conditions.append(job_info_table.c.user_hash == user_hash)
1002
+ if name is not None:
1003
+ # We match the job name from `job_info` for the jobs submitted after
1004
+ # #1982, and from `spot` for the jobs submitted before #1982, whose
1005
+ # job_info is not available.
1006
+ where_conditions.append(
1007
+ sqlalchemy.or_(
1008
+ job_info_table.c.name == name,
1009
+ sqlalchemy.and_(job_info_table.c.name.is_(None),
1010
+ spot_table.c.task_name == name),
1011
+ ))
1012
+ query = query.where(sqlalchemy.and_(*where_conditions)).order_by(
1013
+ spot_table.c.spot_job_id.desc())
1014
+ rows = session.execute(query).fetchall()
1015
+ job_ids = [row[0] for row in rows if row[0] is not None]
1016
+ return job_ids
768
1017
 
769
1018
 
1019
+ @_init_db
770
1020
  def get_jobs_to_check_status(job_id: Optional[int] = None) -> List[int]:
771
1021
  """Get jobs that need controller process checking.
772
1022
 
@@ -778,89 +1028,87 @@ def get_jobs_to_check_status(job_id: Optional[int] = None) -> List[int]:
778
1028
  - Jobs have schedule_state DONE but are in a non-terminal status
779
1029
  - Legacy jobs (that is, no schedule state) that are in non-terminal status
780
1030
  """
781
- job_filter = '' if job_id is None else 'AND spot.spot_job_id=(?)'
782
- job_value = () if job_id is None else (job_id,)
783
-
784
- status_filter_str = ', '.join(['?'] *
785
- len(ManagedJobStatus.terminal_statuses()))
786
- terminal_status_values = [
787
- status.value for status in ManagedJobStatus.terminal_statuses()
788
- ]
789
-
790
- # Get jobs that are either:
791
- # 1. Have schedule state that is not DONE, or
792
- # 2. Have schedule state DONE AND are in non-terminal status (unexpected
793
- # inconsistent state), or
794
- # 3. Have no schedule state (legacy) AND are in non-terminal status
795
- with db_utils.safe_cursor(_DB_PATH) as cursor:
796
- rows = cursor.execute(
797
- f"""\
798
- SELECT DISTINCT spot.spot_job_id
799
- FROM spot
800
- LEFT OUTER JOIN job_info
801
- ON spot.spot_job_id=job_info.spot_job_id
802
- WHERE (
803
- -- non-legacy jobs that are not DONE
804
- (job_info.schedule_state IS NOT NULL AND
805
- job_info.schedule_state IS NOT ?)
806
- OR
807
- -- legacy or that are in non-terminal status or
808
- -- DONE jobs that are in non-terminal status
809
- ((-- legacy jobs
810
- job_info.schedule_state IS NULL OR
811
- -- non-legacy DONE jobs
812
- job_info.schedule_state IS ?
813
- ) AND
814
- -- non-terminal
815
- status NOT IN ({status_filter_str}))
816
- )
817
- {job_filter}
818
- ORDER BY spot.spot_job_id DESC""", [
819
- ManagedJobScheduleState.DONE.value,
820
- ManagedJobScheduleState.DONE.value, *terminal_status_values,
821
- *job_value
822
- ]).fetchall()
823
- return [row[0] for row in rows if row[0] is not None]
1031
+ assert _SQLALCHEMY_ENGINE is not None
824
1032
 
1033
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1034
+ terminal_status_values = [
1035
+ status.value for status in ManagedJobStatus.terminal_statuses()
1036
+ ]
825
1037
 
826
- def get_all_job_ids_by_name(name: Optional[str]) -> List[int]:
827
- """Get all job ids by name."""
828
- name_filter = ''
829
- field_values = []
830
- if name is not None:
831
- # We match the job name from `job_info` for the jobs submitted after
832
- # #1982, and from `spot` for the jobs submitted before #1982, whose
833
- # job_info is not available.
834
- name_filter = ('WHERE (job_info.name=(?) OR '
835
- '(job_info.name IS NULL AND spot.task_name=(?)))')
836
- field_values = [name, name]
837
-
838
- # Left outer join is used here instead of join, because the job_info does
839
- # not contain the managed jobs submitted before #1982.
840
- with db_utils.safe_cursor(_DB_PATH) as cursor:
841
- rows = cursor.execute(
842
- f"""\
843
- SELECT DISTINCT spot.spot_job_id
844
- FROM spot
845
- LEFT OUTER JOIN job_info
846
- ON spot.spot_job_id=job_info.spot_job_id
847
- {name_filter}
848
- ORDER BY spot.spot_job_id DESC""", field_values).fetchall()
849
- job_ids = [row[0] for row in rows if row[0] is not None]
850
- return job_ids
1038
+ query = sqlalchemy.select(
1039
+ spot_table.c.spot_job_id.distinct()).select_from(
1040
+ spot_table.outerjoin(
1041
+ job_info_table,
1042
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id))
1043
+
1044
+ # Get jobs that are either:
1045
+ # 1. Have schedule state that is not DONE, or
1046
+ # 2. Have schedule state DONE AND are in non-terminal status (unexpected
1047
+ # inconsistent state), or
1048
+ # 3. Have no schedule state (legacy) AND are in non-terminal status
1049
+
1050
+ # non-legacy jobs that are not DONE
1051
+ condition1 = sqlalchemy.and_(
1052
+ job_info_table.c.schedule_state.is_not(None),
1053
+ job_info_table.c.schedule_state !=
1054
+ ManagedJobScheduleState.DONE.value)
1055
+ # legacy or that are in non-terminal status or
1056
+ # DONE jobs that are in non-terminal status
1057
+ condition2 = sqlalchemy.and_(
1058
+ sqlalchemy.or_(
1059
+ # legacy jobs
1060
+ job_info_table.c.schedule_state.is_(None),
1061
+ # non-legacy DONE jobs
1062
+ job_info_table.c.schedule_state ==
1063
+ ManagedJobScheduleState.DONE.value),
1064
+ # non-terminal
1065
+ ~spot_table.c.status.in_(terminal_status_values),
1066
+ )
1067
+ where_condition = sqlalchemy.or_(condition1, condition2)
1068
+ if job_id is not None:
1069
+ where_condition = sqlalchemy.and_(
1070
+ where_condition, spot_table.c.spot_job_id == job_id)
1071
+
1072
+ query = query.where(where_condition).order_by(
1073
+ spot_table.c.spot_job_id.desc())
1074
+
1075
+ rows = session.execute(query).fetchall()
1076
+ return [row[0] for row in rows if row[0] is not None]
851
1077
 
852
1078
 
1079
+ @_init_db
853
1080
  def _get_all_task_ids_statuses(
854
1081
  job_id: int) -> List[Tuple[int, ManagedJobStatus]]:
855
- with db_utils.safe_cursor(_DB_PATH) as cursor:
856
- id_statuses = cursor.execute(
857
- """\
858
- SELECT task_id, status FROM spot
859
- WHERE spot_job_id=(?)
860
- ORDER BY task_id ASC""", (job_id,)).fetchall()
1082
+ assert _SQLALCHEMY_ENGINE is not None
1083
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1084
+ id_statuses = session.execute(
1085
+ sqlalchemy.select(
1086
+ spot_table.c.task_id,
1087
+ spot_table.c.status,
1088
+ ).where(spot_table.c.spot_job_id == job_id).order_by(
1089
+ spot_table.c.task_id.asc())).fetchall()
861
1090
  return [(row[0], ManagedJobStatus(row[1])) for row in id_statuses]
862
1091
 
863
1092
 
1093
+ @_init_db
1094
+ def get_all_task_ids_names_statuses_logs(
1095
+ job_id: int
1096
+ ) -> List[Tuple[int, str, ManagedJobStatus, str, Optional[float]]]:
1097
+ assert _SQLALCHEMY_ENGINE is not None
1098
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1099
+ id_names = session.execute(
1100
+ sqlalchemy.select(
1101
+ spot_table.c.task_id,
1102
+ spot_table.c.task_name,
1103
+ spot_table.c.status,
1104
+ spot_table.c.local_log_file,
1105
+ spot_table.c.logs_cleaned_at,
1106
+ ).where(spot_table.c.spot_job_id == job_id).order_by(
1107
+ spot_table.c.task_id.asc())).fetchall()
1108
+ return [(row[0], row[1], ManagedJobStatus(row[2]), row[3], row[4])
1109
+ for row in id_names]
1110
+
1111
+
864
1112
  def get_num_tasks(job_id: int) -> int:
865
1113
  return len(_get_all_task_ids_statuses(job_id))
866
1114
 
@@ -888,31 +1136,43 @@ def get_latest_task_id_status(
888
1136
  return task_id, status
889
1137
 
890
1138
 
1139
+ @_init_db
1140
+ def get_job_controller_pid(job_id: int) -> Optional[int]:
1141
+ assert _SQLALCHEMY_ENGINE is not None
1142
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1143
+ pid = session.execute(
1144
+ sqlalchemy.select(job_info_table.c.controller_pid).where(
1145
+ job_info_table.c.spot_job_id == job_id)).fetchone()
1146
+ return pid[0] if pid else None
1147
+
1148
+
891
1149
  def get_status(job_id: int) -> Optional[ManagedJobStatus]:
892
1150
  _, status = get_latest_task_id_status(job_id)
893
1151
  return status
894
1152
 
895
1153
 
1154
+ @_init_db
896
1155
  def get_failure_reason(job_id: int) -> Optional[str]:
897
1156
  """Get the failure reason of a job.
898
1157
 
899
1158
  If the job has multiple tasks, we return the first failure reason.
900
1159
  """
901
- with db_utils.safe_cursor(_DB_PATH) as cursor:
902
- reason = cursor.execute(
903
- """\
904
- SELECT failure_reason FROM spot
905
- WHERE spot_job_id=(?)
906
- ORDER BY task_id ASC""", (job_id,)).fetchall()
1160
+ assert _SQLALCHEMY_ENGINE is not None
1161
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1162
+ reason = session.execute(
1163
+ sqlalchemy.select(spot_table.c.failure_reason).where(
1164
+ spot_table.c.spot_job_id == job_id).order_by(
1165
+ spot_table.c.task_id.asc())).fetchall()
907
1166
  reason = [r[0] for r in reason if r[0] is not None]
908
1167
  if not reason:
909
1168
  return None
910
1169
  return reason[0]
911
1170
 
912
1171
 
913
- def get_managed_jobs(job_id: Optional[int] = None) -> List[Dict[str, Any]]:
914
- """Get managed jobs from the database."""
915
- job_filter = '' if job_id is None else f'WHERE spot.spot_job_id={job_id}'
1172
+ @_init_db
1173
+ def get_managed_job_tasks(job_id: int) -> List[Dict[str, Any]]:
1174
+ """Get managed job tasks for a specific managed job id from the database."""
1175
+ assert _SQLALCHEMY_ENGINE is not None
916
1176
 
917
1177
  # Join spot and job_info tables to get the job name for each task.
918
1178
  # We use LEFT OUTER JOIN mainly for backward compatibility, as for an
@@ -922,197 +1182,1328 @@ def get_managed_jobs(job_id: Optional[int] = None) -> List[Dict[str, Any]]:
922
1182
  # Note: we will get the user_hash here, but don't try to call
923
1183
  # global_user_state.get_user() on it. This runs on the controller, which may
924
1184
  # not have the user info. Prefer to do it on the API server side.
925
- with db_utils.safe_cursor(_DB_PATH) as cursor:
926
- rows = cursor.execute(f"""\
927
- SELECT *
928
- FROM spot
929
- LEFT OUTER JOIN job_info
930
- ON spot.spot_job_id=job_info.spot_job_id
931
- {job_filter}
932
- ORDER BY spot.spot_job_id DESC, spot.task_id ASC""").fetchall()
933
- jobs = []
934
- for row in rows:
935
- job_dict = dict(zip(columns, row))
1185
+ query = sqlalchemy.select(spot_table, job_info_table).select_from(
1186
+ spot_table.outerjoin(
1187
+ job_info_table,
1188
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id))
1189
+ query = query.where(spot_table.c.spot_job_id == job_id)
1190
+ query = query.order_by(spot_table.c.task_id.asc())
1191
+ rows = None
1192
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1193
+ rows = session.execute(query).fetchall()
1194
+ jobs = []
1195
+ for row in rows:
1196
+ job_dict = _get_jobs_dict(row._mapping) # pylint: disable=protected-access
1197
+ job_dict['status'] = ManagedJobStatus(job_dict['status'])
1198
+ job_dict['schedule_state'] = ManagedJobScheduleState(
1199
+ job_dict['schedule_state'])
1200
+ if job_dict['job_name'] is None:
1201
+ job_dict['job_name'] = job_dict['task_name']
1202
+ job_dict['metadata'] = json.loads(job_dict['metadata'])
1203
+
1204
+ # Add user YAML content for managed jobs.
1205
+ job_dict['user_yaml'] = job_dict.get('original_user_yaml_content')
1206
+ if job_dict['user_yaml'] is None:
1207
+ # Backwards compatibility - try to read from file path
1208
+ yaml_path = job_dict.get('original_user_yaml_path')
1209
+ if yaml_path:
1210
+ try:
1211
+ with open(yaml_path, 'r', encoding='utf-8') as f:
1212
+ job_dict['user_yaml'] = f.read()
1213
+ except (FileNotFoundError, IOError, OSError) as e:
1214
+ logger.debug('Failed to read original user YAML for job '
1215
+ f'{job_id} from {yaml_path}: {e}')
1216
+
1217
+ jobs.append(job_dict)
1218
+ return jobs
1219
+
1220
+
1221
+ def _map_response_field_to_db_column(field: str):
1222
+ """Map the response field name to an actual SQLAlchemy ColumnElement.
1223
+
1224
+ This ensures we never pass plain strings to SQLAlchemy 2.0 APIs like
1225
+ Select.with_only_columns().
1226
+ """
1227
+ # Explicit aliases differing from actual DB column names
1228
+ alias_mapping = {
1229
+ '_job_id': spot_table.c.job_id, # spot.job_id
1230
+ '_task_name': spot_table.c.job_name, # deprecated, from spot table
1231
+ 'job_id': spot_table.c.spot_job_id, # public job id -> spot.spot_job_id
1232
+ '_job_info_job_id': job_info_table.c.spot_job_id,
1233
+ 'job_name': job_info_table.c.name, # public job name -> job_info.name
1234
+ }
1235
+ if field in alias_mapping:
1236
+ return alias_mapping[field]
1237
+
1238
+ # Try direct match on the `spot` table columns
1239
+ if field in spot_table.c:
1240
+ return spot_table.c[field]
1241
+
1242
+ # Try direct match on the `job_info` table columns
1243
+ if field in job_info_table.c:
1244
+ return job_info_table.c[field]
1245
+
1246
+ raise ValueError(f'Unknown field: {field}')
1247
+
1248
+
1249
+ @_init_db
1250
+ def get_managed_jobs_total() -> int:
1251
+ """Get the total number of managed jobs."""
1252
+ assert _SQLALCHEMY_ENGINE is not None
1253
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1254
+ result = session.execute(
1255
+ sqlalchemy.select(sqlalchemy.func.count() # pylint: disable=not-callable
1256
+ ).select_from(spot_table)).fetchone()
1257
+ return result[0] if result else 0
1258
+
1259
+
1260
+ @_init_db
1261
+ def get_managed_jobs_highest_priority() -> int:
1262
+ """Get the highest priority of the managed jobs."""
1263
+ assert _SQLALCHEMY_ENGINE is not None
1264
+ query = sqlalchemy.select(sqlalchemy.func.max(
1265
+ job_info_table.c.priority)).where(
1266
+ sqlalchemy.and_(
1267
+ job_info_table.c.schedule_state.in_([
1268
+ ManagedJobScheduleState.LAUNCHING.value,
1269
+ ManagedJobScheduleState.ALIVE_BACKOFF.value,
1270
+ ManagedJobScheduleState.WAITING.value,
1271
+ ManagedJobScheduleState.ALIVE_WAITING.value,
1272
+ ]),
1273
+ job_info_table.c.priority.is_not(None),
1274
+ ))
1275
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1276
+ priority = session.execute(query).fetchone()
1277
+ return priority[0] if priority and priority[
1278
+ 0] is not None else constants.MIN_PRIORITY
1279
+
1280
+
1281
+ def build_managed_jobs_with_filters_no_status_query(
1282
+ fields: Optional[List[str]] = None,
1283
+ job_ids: Optional[List[int]] = None,
1284
+ accessible_workspaces: Optional[List[str]] = None,
1285
+ workspace_match: Optional[str] = None,
1286
+ name_match: Optional[str] = None,
1287
+ pool_match: Optional[str] = None,
1288
+ user_hashes: Optional[List[Optional[str]]] = None,
1289
+ skip_finished: bool = False,
1290
+ count_only: bool = False,
1291
+ status_count: bool = False,
1292
+ ) -> sqlalchemy.Select:
1293
+ """Build a query to get managed jobs from the database with filters."""
1294
+ # Join spot and job_info tables to get the job name for each task.
1295
+ # We use LEFT OUTER JOIN mainly for backward compatibility, as for an
1296
+ # existing controller before #1982, the job_info table may not exist,
1297
+ # and all the managed jobs created before will not present in the
1298
+ # job_info.
1299
+ # Note: we will get the user_hash here, but don't try to call
1300
+ # global_user_state.get_user() on it. This runs on the controller, which may
1301
+ # not have the user info. Prefer to do it on the API server side.
1302
+ if count_only:
1303
+ query = sqlalchemy.select(sqlalchemy.func.count().label('count')) # pylint: disable=not-callable
1304
+ elif status_count:
1305
+ query = sqlalchemy.select(spot_table.c.status,
1306
+ sqlalchemy.func.count().label('count')) # pylint: disable=not-callable
1307
+ else:
1308
+ query = sqlalchemy.select(spot_table, job_info_table)
1309
+ query = query.select_from(
1310
+ spot_table.outerjoin(
1311
+ job_info_table,
1312
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id))
1313
+ if skip_finished:
1314
+ # Filter out finished jobs at the DB level. If a multi-task job is
1315
+ # partially finished, include all its tasks. We do this by first
1316
+ # selecting job_ids that have at least one non-terminal task, then
1317
+ # restricting the main query to those job_ids.
1318
+ terminal_status_values = [
1319
+ s.value for s in ManagedJobStatus.terminal_statuses()
1320
+ ]
1321
+ non_terminal_job_ids_subquery = (sqlalchemy.select(
1322
+ spot_table.c.spot_job_id).where(
1323
+ sqlalchemy.or_(
1324
+ spot_table.c.status.is_(None),
1325
+ sqlalchemy.not_(
1326
+ spot_table.c.status.in_(terminal_status_values)),
1327
+ )).distinct())
1328
+ query = query.where(
1329
+ spot_table.c.spot_job_id.in_(non_terminal_job_ids_subquery))
1330
+ if not count_only and not status_count and fields:
1331
+ # Resolve requested field names to explicit ColumnElements from
1332
+ # the joined tables.
1333
+ selected_columns = [_map_response_field_to_db_column(f) for f in fields]
1334
+ query = query.with_only_columns(*selected_columns)
1335
+ if job_ids is not None:
1336
+ query = query.where(spot_table.c.spot_job_id.in_(job_ids))
1337
+ if accessible_workspaces is not None:
1338
+ query = query.where(
1339
+ job_info_table.c.workspace.in_(accessible_workspaces))
1340
+ if workspace_match is not None:
1341
+ query = query.where(
1342
+ job_info_table.c.workspace.like(f'%{workspace_match}%'))
1343
+ if name_match is not None:
1344
+ query = query.where(job_info_table.c.name.like(f'%{name_match}%'))
1345
+ if pool_match is not None:
1346
+ query = query.where(job_info_table.c.pool.like(f'%{pool_match}%'))
1347
+ if user_hashes is not None:
1348
+ query = query.where(job_info_table.c.user_hash.in_(user_hashes))
1349
+ return query
1350
+
1351
+
1352
+ def build_managed_jobs_with_filters_query(
1353
+ fields: Optional[List[str]] = None,
1354
+ job_ids: Optional[List[int]] = None,
1355
+ accessible_workspaces: Optional[List[str]] = None,
1356
+ workspace_match: Optional[str] = None,
1357
+ name_match: Optional[str] = None,
1358
+ pool_match: Optional[str] = None,
1359
+ user_hashes: Optional[List[Optional[str]]] = None,
1360
+ statuses: Optional[List[str]] = None,
1361
+ skip_finished: bool = False,
1362
+ count_only: bool = False,
1363
+ ) -> sqlalchemy.Select:
1364
+ """Build a query to get managed jobs from the database with filters."""
1365
+ query = build_managed_jobs_with_filters_no_status_query(
1366
+ fields=fields,
1367
+ job_ids=job_ids,
1368
+ accessible_workspaces=accessible_workspaces,
1369
+ workspace_match=workspace_match,
1370
+ name_match=name_match,
1371
+ pool_match=pool_match,
1372
+ user_hashes=user_hashes,
1373
+ skip_finished=skip_finished,
1374
+ count_only=count_only,
1375
+ )
1376
+ if statuses is not None:
1377
+ query = query.where(spot_table.c.status.in_(statuses))
1378
+ return query
1379
+
1380
+
1381
+ @_init_db
1382
+ def get_status_count_with_filters(
1383
+ fields: Optional[List[str]] = None,
1384
+ job_ids: Optional[List[int]] = None,
1385
+ accessible_workspaces: Optional[List[str]] = None,
1386
+ workspace_match: Optional[str] = None,
1387
+ name_match: Optional[str] = None,
1388
+ pool_match: Optional[str] = None,
1389
+ user_hashes: Optional[List[Optional[str]]] = None,
1390
+ skip_finished: bool = False,
1391
+ ) -> Dict[str, int]:
1392
+ """Get the status count of the managed jobs with filters."""
1393
+ query = build_managed_jobs_with_filters_no_status_query(
1394
+ fields=fields,
1395
+ job_ids=job_ids,
1396
+ accessible_workspaces=accessible_workspaces,
1397
+ workspace_match=workspace_match,
1398
+ name_match=name_match,
1399
+ pool_match=pool_match,
1400
+ user_hashes=user_hashes,
1401
+ skip_finished=skip_finished,
1402
+ status_count=True,
1403
+ )
1404
+ query = query.group_by(spot_table.c.status)
1405
+ results: Dict[str, int] = {}
1406
+ assert _SQLALCHEMY_ENGINE is not None
1407
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1408
+ rows = session.execute(query).fetchall()
1409
+ for status_value, count in rows:
1410
+ # status_value is already a string (enum value)
1411
+ results[str(status_value)] = int(count)
1412
+ return results
1413
+
1414
+
1415
+ @_init_db
1416
+ def get_managed_jobs_with_filters(
1417
+ fields: Optional[List[str]] = None,
1418
+ job_ids: Optional[List[int]] = None,
1419
+ accessible_workspaces: Optional[List[str]] = None,
1420
+ workspace_match: Optional[str] = None,
1421
+ name_match: Optional[str] = None,
1422
+ pool_match: Optional[str] = None,
1423
+ user_hashes: Optional[List[Optional[str]]] = None,
1424
+ statuses: Optional[List[str]] = None,
1425
+ skip_finished: bool = False,
1426
+ page: Optional[int] = None,
1427
+ limit: Optional[int] = None,
1428
+ ) -> Tuple[List[Dict[str, Any]], int]:
1429
+ """Get managed jobs from the database with filters.
1430
+
1431
+ Returns:
1432
+ A tuple containing
1433
+ - the list of managed jobs
1434
+ - the total number of managed jobs
1435
+ """
1436
+ assert _SQLALCHEMY_ENGINE is not None
1437
+
1438
+ count_query = build_managed_jobs_with_filters_query(
1439
+ fields=None,
1440
+ job_ids=job_ids,
1441
+ accessible_workspaces=accessible_workspaces,
1442
+ workspace_match=workspace_match,
1443
+ name_match=name_match,
1444
+ pool_match=pool_match,
1445
+ user_hashes=user_hashes,
1446
+ statuses=statuses,
1447
+ skip_finished=skip_finished,
1448
+ count_only=True,
1449
+ )
1450
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1451
+ total = session.execute(count_query).fetchone()[0]
1452
+
1453
+ query = build_managed_jobs_with_filters_query(
1454
+ fields=fields,
1455
+ job_ids=job_ids,
1456
+ accessible_workspaces=accessible_workspaces,
1457
+ workspace_match=workspace_match,
1458
+ name_match=name_match,
1459
+ pool_match=pool_match,
1460
+ user_hashes=user_hashes,
1461
+ statuses=statuses,
1462
+ skip_finished=skip_finished,
1463
+ )
1464
+ query = query.order_by(spot_table.c.spot_job_id.desc(),
1465
+ spot_table.c.task_id.asc())
1466
+ if page is not None and limit is not None:
1467
+ query = query.offset((page - 1) * limit).limit(limit)
1468
+ rows = None
1469
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1470
+ rows = session.execute(query).fetchall()
1471
+ jobs = []
1472
+ for row in rows:
1473
+ job_dict = _get_jobs_dict(row._mapping) # pylint: disable=protected-access
1474
+ if job_dict.get('status') is not None:
936
1475
  job_dict['status'] = ManagedJobStatus(job_dict['status'])
1476
+ if job_dict.get('schedule_state') is not None:
937
1477
  job_dict['schedule_state'] = ManagedJobScheduleState(
938
1478
  job_dict['schedule_state'])
939
- if job_dict['job_name'] is None:
940
- job_dict['job_name'] = job_dict['task_name']
941
- jobs.append(job_dict)
942
- return jobs
943
-
944
-
1479
+ if job_dict.get('job_name') is None:
1480
+ job_dict['job_name'] = job_dict.get('task_name')
1481
+ if job_dict.get('metadata') is not None:
1482
+ job_dict['metadata'] = json.loads(job_dict['metadata'])
1483
+
1484
+ # Add user YAML content for managed jobs.
1485
+ job_dict['user_yaml'] = job_dict.get('original_user_yaml_content')
1486
+ if job_dict['user_yaml'] is None:
1487
+ # Backwards compatibility - try to read from file path
1488
+ yaml_path = job_dict.get('original_user_yaml_path')
1489
+ if yaml_path:
1490
+ try:
1491
+ with open(yaml_path, 'r', encoding='utf-8') as f:
1492
+ job_dict['user_yaml'] = f.read()
1493
+ except (FileNotFoundError, IOError, OSError) as e:
1494
+ job_id = job_dict.get('job_id')
1495
+ if job_id is not None:
1496
+ logger.debug('Failed to read original user YAML for '
1497
+ f'job {job_id} from {yaml_path}: {e}')
1498
+ else:
1499
+ logger.debug('Failed to read original user YAML from '
1500
+ f'{yaml_path}: {e}')
1501
+
1502
+ jobs.append(job_dict)
1503
+ return jobs, total
1504
+
1505
+
1506
+ @_init_db
945
1507
  def get_task_name(job_id: int, task_id: int) -> str:
946
1508
  """Get the task name of a job."""
947
- with db_utils.safe_cursor(_DB_PATH) as cursor:
948
- task_name = cursor.execute(
949
- """\
950
- SELECT task_name FROM spot
951
- WHERE spot_job_id=(?)
952
- AND task_id=(?)""", (job_id, task_id)).fetchone()
1509
+ assert _SQLALCHEMY_ENGINE is not None
1510
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1511
+ task_name = session.execute(
1512
+ sqlalchemy.select(spot_table.c.task_name).where(
1513
+ sqlalchemy.and_(
1514
+ spot_table.c.spot_job_id == job_id,
1515
+ spot_table.c.task_id == task_id,
1516
+ ))).fetchone()
953
1517
  return task_name[0]
954
1518
 
955
1519
 
1520
+ @_init_db
956
1521
  def get_latest_job_id() -> Optional[int]:
957
1522
  """Get the latest job id."""
958
- with db_utils.safe_cursor(_DB_PATH) as cursor:
959
- rows = cursor.execute("""\
960
- SELECT spot_job_id FROM spot
961
- WHERE task_id=0
962
- ORDER BY submitted_at DESC LIMIT 1""")
963
- for (job_id,) in rows:
964
- return job_id
965
- return None
1523
+ assert _SQLALCHEMY_ENGINE is not None
1524
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1525
+ job_id = session.execute(
1526
+ sqlalchemy.select(spot_table.c.spot_job_id).where(
1527
+ spot_table.c.task_id == 0).order_by(
1528
+ spot_table.c.submitted_at.desc()).limit(1)).fetchone()
1529
+ return job_id[0] if job_id else None
966
1530
 
967
1531
 
1532
+ @_init_db
968
1533
  def get_task_specs(job_id: int, task_id: int) -> Dict[str, Any]:
969
- with db_utils.safe_cursor(_DB_PATH) as cursor:
970
- task_specs = cursor.execute(
971
- """\
972
- SELECT specs FROM spot
973
- WHERE spot_job_id=(?) AND task_id=(?)""",
974
- (job_id, task_id)).fetchone()
1534
+ assert _SQLALCHEMY_ENGINE is not None
1535
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1536
+ task_specs = session.execute(
1537
+ sqlalchemy.select(spot_table.c.specs).where(
1538
+ sqlalchemy.and_(
1539
+ spot_table.c.spot_job_id == job_id,
1540
+ spot_table.c.task_id == task_id,
1541
+ ))).fetchone()
975
1542
  return json.loads(task_specs[0])
976
1543
 
977
1544
 
978
- def get_local_log_file(job_id: int, task_id: Optional[int]) -> Optional[str]:
979
- """Get the local log directory for a job."""
980
- filter_str = 'spot_job_id=(?)'
981
- filter_args = [job_id]
982
- if task_id is not None:
983
- filter_str += ' AND task_id=(?)'
984
- filter_args.append(task_id)
985
- with db_utils.safe_cursor(_DB_PATH) as cursor:
986
- local_log_file = cursor.execute(
987
- f'SELECT local_log_file FROM spot '
988
- f'WHERE {filter_str}', filter_args).fetchone()
989
- return local_log_file[-1] if local_log_file else None
990
-
991
-
992
- # === Scheduler state functions ===
993
- # Only the scheduler should call these functions. They may require holding the
994
- # scheduler lock to work correctly.
995
-
996
-
997
- def scheduler_set_waiting(job_id: int, dag_yaml_path: str, env_file_path: str,
998
- user_hash: str) -> None:
999
- """Do not call without holding the scheduler lock."""
1000
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1001
- updated_count = cursor.execute(
1002
- 'UPDATE job_info SET '
1003
- 'schedule_state = (?), dag_yaml_path = (?), env_file_path = (?), '
1004
- ' user_hash = (?) '
1005
- 'WHERE spot_job_id = (?) AND schedule_state = (?)',
1006
- (ManagedJobScheduleState.WAITING.value, dag_yaml_path,
1007
- env_file_path, user_hash, job_id,
1008
- ManagedJobScheduleState.INACTIVE.value)).rowcount
1009
- assert updated_count == 1, (job_id, updated_count)
1010
-
1011
-
1012
- def scheduler_set_launching(job_id: int,
1013
- current_state: ManagedJobScheduleState) -> None:
1014
- """Do not call without holding the scheduler lock."""
1015
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1016
- updated_count = cursor.execute(
1017
- 'UPDATE job_info SET '
1018
- 'schedule_state = (?) '
1019
- 'WHERE spot_job_id = (?) AND schedule_state = (?)',
1020
- (ManagedJobScheduleState.LAUNCHING.value, job_id,
1021
- current_state.value)).rowcount
1022
- assert updated_count == 1, (job_id, updated_count)
1023
-
1545
+ @_init_db
1546
+ def scheduler_set_waiting(job_id: int, dag_yaml_content: str,
1547
+ original_user_yaml_content: str,
1548
+ env_file_content: str, priority: int):
1549
+ """Do not call without holding the scheduler lock.
1024
1550
 
1025
- def scheduler_set_alive(job_id: int) -> None:
1026
- """Do not call without holding the scheduler lock."""
1027
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1028
- updated_count = cursor.execute(
1029
- 'UPDATE job_info SET '
1030
- 'schedule_state = (?) '
1031
- 'WHERE spot_job_id = (?) AND schedule_state = (?)',
1032
- (ManagedJobScheduleState.ALIVE.value, job_id,
1033
- ManagedJobScheduleState.LAUNCHING.value)).rowcount
1034
- assert updated_count == 1, (job_id, updated_count)
1551
+ Returns: Whether this is a recovery run or not.
1552
+ If this is a recovery run, the job may already be in the WAITING
1553
+ state and the update will not change the schedule_state (hence the
1554
+ updated_count will be 0). In this case, we return True.
1555
+ Otherwise, we return False.
1556
+ """
1557
+ assert _SQLALCHEMY_ENGINE is not None
1558
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1559
+ updated_count = session.query(job_info_table).filter(
1560
+ sqlalchemy.and_(job_info_table.c.spot_job_id == job_id,)).update({
1561
+ job_info_table.c.schedule_state:
1562
+ ManagedJobScheduleState.WAITING.value,
1563
+ job_info_table.c.dag_yaml_content: dag_yaml_content,
1564
+ job_info_table.c.original_user_yaml_content:
1565
+ (original_user_yaml_content),
1566
+ job_info_table.c.env_file_content: env_file_content,
1567
+ job_info_table.c.priority: priority,
1568
+ })
1569
+ session.commit()
1570
+ assert updated_count <= 1, (job_id, updated_count)
1571
+
1572
+
1573
+ @_init_db
1574
+ def get_job_file_contents(job_id: int) -> Dict[str, Optional[str]]:
1575
+ """Return file information and stored contents for a managed job."""
1576
+ assert _SQLALCHEMY_ENGINE is not None
1577
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1578
+ row = session.execute(
1579
+ sqlalchemy.select(
1580
+ job_info_table.c.dag_yaml_path,
1581
+ job_info_table.c.env_file_path,
1582
+ job_info_table.c.dag_yaml_content,
1583
+ job_info_table.c.env_file_content,
1584
+ ).where(job_info_table.c.spot_job_id == job_id)).fetchone()
1585
+
1586
+ if row is None:
1587
+ return {
1588
+ 'dag_yaml_path': None,
1589
+ 'env_file_path': None,
1590
+ 'dag_yaml_content': None,
1591
+ 'env_file_content': None,
1592
+ }
1593
+
1594
+ return {
1595
+ 'dag_yaml_path': row[0],
1596
+ 'env_file_path': row[1],
1597
+ 'dag_yaml_content': row[2],
1598
+ 'env_file_content': row[3],
1599
+ }
1035
1600
 
1036
1601
 
1037
- def scheduler_set_alive_waiting(job_id: int) -> None:
1602
+ @_init_db
1603
+ def get_pool_from_job_id(job_id: int) -> Optional[str]:
1604
+ """Get the pool from the job id."""
1605
+ assert _SQLALCHEMY_ENGINE is not None
1606
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1607
+ pool = session.execute(
1608
+ sqlalchemy.select(job_info_table.c.pool).where(
1609
+ job_info_table.c.spot_job_id == job_id)).fetchone()
1610
+ return pool[0] if pool else None
1611
+
1612
+
1613
+ @_init_db
1614
+ def set_current_cluster_name(job_id: int, current_cluster_name: str) -> None:
1615
+ """Set the current cluster name for a job."""
1616
+ assert _SQLALCHEMY_ENGINE is not None
1617
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1618
+ session.query(job_info_table).filter(
1619
+ job_info_table.c.spot_job_id == job_id).update(
1620
+ {job_info_table.c.current_cluster_name: current_cluster_name})
1621
+ session.commit()
1622
+
1623
+
1624
+ @_init_db_async
1625
+ async def set_job_id_on_pool_cluster_async(job_id: int,
1626
+ job_id_on_pool_cluster: int) -> None:
1627
+ """Set the job id on the pool cluster for a job."""
1628
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1629
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1630
+ await session.execute(
1631
+ sqlalchemy.update(job_info_table).
1632
+ where(job_info_table.c.spot_job_id == job_id).values({
1633
+ job_info_table.c.job_id_on_pool_cluster: job_id_on_pool_cluster
1634
+ }))
1635
+ await session.commit()
1636
+
1637
+
1638
+ @_init_db
1639
+ def get_pool_submit_info(job_id: int) -> Tuple[Optional[str], Optional[int]]:
1640
+ """Get the cluster name and job id on the pool from the managed job id."""
1641
+ assert _SQLALCHEMY_ENGINE is not None
1642
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1643
+ info = session.execute(
1644
+ sqlalchemy.select(
1645
+ job_info_table.c.current_cluster_name,
1646
+ job_info_table.c.job_id_on_pool_cluster).where(
1647
+ job_info_table.c.spot_job_id == job_id)).fetchone()
1648
+ if info is None:
1649
+ return None, None
1650
+ return info[0], info[1]
1651
+
1652
+
1653
+ @_init_db_async
1654
+ async def get_pool_submit_info_async(
1655
+ job_id: int) -> Tuple[Optional[str], Optional[int]]:
1656
+ """Get the cluster name and job id on the pool from the managed job id."""
1657
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1658
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1659
+ result = await session.execute(
1660
+ sqlalchemy.select(job_info_table.c.current_cluster_name,
1661
+ job_info_table.c.job_id_on_pool_cluster).where(
1662
+ job_info_table.c.spot_job_id == job_id))
1663
+ info = result.fetchone()
1664
+ if info is None:
1665
+ return None, None
1666
+ return info[0], info[1]
1667
+
1668
+
1669
+ @_init_db_async
1670
+ async def scheduler_set_launching_async(job_id: int):
1671
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1672
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1673
+ await session.execute(
1674
+ sqlalchemy.update(job_info_table).where(
1675
+ sqlalchemy.and_(job_info_table.c.spot_job_id == job_id)).values(
1676
+ {
1677
+ job_info_table.c.schedule_state:
1678
+ ManagedJobScheduleState.LAUNCHING.value
1679
+ }))
1680
+ await session.commit()
1681
+
1682
+
1683
+ @_init_db_async
1684
+ async def scheduler_set_alive_async(job_id: int) -> None:
1038
1685
  """Do not call without holding the scheduler lock."""
1039
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1040
- updated_count = cursor.execute(
1041
- 'UPDATE job_info SET '
1042
- 'schedule_state = (?) '
1043
- 'WHERE spot_job_id = (?) AND schedule_state = (?)',
1044
- (ManagedJobScheduleState.ALIVE_WAITING.value, job_id,
1045
- ManagedJobScheduleState.ALIVE.value)).rowcount
1046
- assert updated_count == 1, (job_id, updated_count)
1047
-
1048
-
1686
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1687
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1688
+ result = await session.execute(
1689
+ sqlalchemy.update(job_info_table).where(
1690
+ sqlalchemy.and_(
1691
+ job_info_table.c.spot_job_id == job_id,
1692
+ job_info_table.c.schedule_state ==
1693
+ ManagedJobScheduleState.LAUNCHING.value,
1694
+ )).values({
1695
+ job_info_table.c.schedule_state:
1696
+ ManagedJobScheduleState.ALIVE.value
1697
+ }))
1698
+ changes = result.rowcount
1699
+ await session.commit()
1700
+ assert changes == 1, (job_id, changes)
1701
+
1702
+
1703
+ @_init_db
1049
1704
  def scheduler_set_done(job_id: int, idempotent: bool = False) -> None:
1050
1705
  """Do not call without holding the scheduler lock."""
1051
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1052
- updated_count = cursor.execute(
1053
- 'UPDATE job_info SET '
1054
- 'schedule_state = (?) '
1055
- 'WHERE spot_job_id = (?) AND schedule_state != (?)',
1056
- (ManagedJobScheduleState.DONE.value, job_id,
1057
- ManagedJobScheduleState.DONE.value)).rowcount
1706
+ assert _SQLALCHEMY_ENGINE is not None
1707
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1708
+ updated_count = session.query(job_info_table).filter(
1709
+ sqlalchemy.and_(
1710
+ job_info_table.c.spot_job_id == job_id,
1711
+ job_info_table.c.schedule_state !=
1712
+ ManagedJobScheduleState.DONE.value,
1713
+ )).update({
1714
+ job_info_table.c.schedule_state:
1715
+ ManagedJobScheduleState.DONE.value
1716
+ })
1717
+ session.commit()
1058
1718
  if not idempotent:
1059
1719
  assert updated_count == 1, (job_id, updated_count)
1060
1720
 
1061
1721
 
1062
- def set_job_controller_pid(job_id: int, pid: int):
1063
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1064
- updated_count = cursor.execute(
1065
- 'UPDATE job_info SET '
1066
- 'controller_pid = (?) '
1067
- 'WHERE spot_job_id = (?)', (pid, job_id)).rowcount
1068
- assert updated_count == 1, (job_id, updated_count)
1069
-
1070
-
1722
+ @_init_db
1071
1723
  def get_job_schedule_state(job_id: int) -> ManagedJobScheduleState:
1072
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1073
- state = cursor.execute(
1074
- 'SELECT schedule_state FROM job_info WHERE spot_job_id = (?)',
1075
- (job_id,)).fetchone()[0]
1724
+ assert _SQLALCHEMY_ENGINE is not None
1725
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1726
+ state = session.execute(
1727
+ sqlalchemy.select(job_info_table.c.schedule_state).where(
1728
+ job_info_table.c.spot_job_id == job_id)).fetchone()[0]
1076
1729
  return ManagedJobScheduleState(state)
1077
1730
 
1078
1731
 
1732
+ @_init_db
1079
1733
  def get_num_launching_jobs() -> int:
1080
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1081
- return cursor.execute(
1082
- 'SELECT COUNT(*) '
1083
- 'FROM job_info '
1084
- 'WHERE schedule_state = (?)',
1085
- (ManagedJobScheduleState.LAUNCHING.value,)).fetchone()[0]
1086
-
1087
-
1088
- def get_num_alive_jobs() -> int:
1089
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1090
- return cursor.execute(
1091
- 'SELECT COUNT(*) '
1092
- 'FROM job_info '
1093
- 'WHERE schedule_state IN (?, ?, ?)',
1094
- (ManagedJobScheduleState.ALIVE_WAITING.value,
1095
- ManagedJobScheduleState.LAUNCHING.value,
1096
- ManagedJobScheduleState.ALIVE.value)).fetchone()[0]
1097
-
1098
-
1099
- def get_waiting_job() -> Optional[Dict[str, Any]]:
1734
+ assert _SQLALCHEMY_ENGINE is not None
1735
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1736
+ return session.execute(
1737
+ sqlalchemy.select(
1738
+ sqlalchemy.func.count() # pylint: disable=not-callable
1739
+ ).select_from(job_info_table).where(
1740
+ sqlalchemy.and_(
1741
+ job_info_table.c.schedule_state ==
1742
+ ManagedJobScheduleState.LAUNCHING.value,
1743
+ # We only count jobs that are not in the pool, because the
1744
+ # job in the pool does not actually calling the sky.launch.
1745
+ job_info_table.c.pool.is_(None)))).fetchone()[0]
1746
+
1747
+
1748
+ @_init_db
1749
+ def get_num_alive_jobs(pool: Optional[str] = None) -> int:
1750
+ assert _SQLALCHEMY_ENGINE is not None
1751
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1752
+ where_conditions = [
1753
+ job_info_table.c.schedule_state.in_([
1754
+ ManagedJobScheduleState.ALIVE_WAITING.value,
1755
+ ManagedJobScheduleState.LAUNCHING.value,
1756
+ ManagedJobScheduleState.ALIVE.value,
1757
+ ManagedJobScheduleState.ALIVE_BACKOFF.value,
1758
+ ])
1759
+ ]
1760
+
1761
+ if pool is not None:
1762
+ where_conditions.append(job_info_table.c.pool == pool)
1763
+
1764
+ return session.execute(
1765
+ sqlalchemy.select(
1766
+ sqlalchemy.func.count() # pylint: disable=not-callable
1767
+ ).select_from(job_info_table).where(
1768
+ sqlalchemy.and_(*where_conditions))).fetchone()[0]
1769
+
1770
+
1771
+ @_init_db
1772
+ def get_nonterminal_job_ids_by_pool(pool: str,
1773
+ cluster_name: Optional[str] = None
1774
+ ) -> List[int]:
1775
+ """Get nonterminal job ids in a pool."""
1776
+ assert _SQLALCHEMY_ENGINE is not None
1777
+
1778
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1779
+ query = sqlalchemy.select(
1780
+ spot_table.c.spot_job_id.distinct()).select_from(
1781
+ spot_table.outerjoin(
1782
+ job_info_table,
1783
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id))
1784
+ and_conditions = [
1785
+ ~spot_table.c.status.in_([
1786
+ status.value for status in ManagedJobStatus.terminal_statuses()
1787
+ ]),
1788
+ job_info_table.c.pool == pool,
1789
+ ]
1790
+ if cluster_name is not None:
1791
+ and_conditions.append(
1792
+ job_info_table.c.current_cluster_name == cluster_name)
1793
+ query = query.where(sqlalchemy.and_(*and_conditions)).order_by(
1794
+ spot_table.c.spot_job_id.asc())
1795
+ rows = session.execute(query).fetchall()
1796
+ job_ids = [row[0] for row in rows if row[0] is not None]
1797
+ return job_ids
1798
+
1799
+
1800
+ @_init_db_async
1801
+ async def get_waiting_job_async(pid: int) -> Optional[Dict[str, Any]]:
1100
1802
  """Get the next job that should transition to LAUNCHING.
1101
1803
 
1804
+ Selects the highest-priority WAITING or ALIVE_WAITING job and atomically
1805
+ transitions it to LAUNCHING state to prevent race conditions.
1806
+
1807
+ Returns the job information if a job was successfully transitioned to
1808
+ LAUNCHING, or None if no suitable job was found.
1809
+
1102
1810
  Backwards compatibility note: jobs submitted before #4485 will have no
1103
1811
  schedule_state and will be ignored by this SQL query.
1104
1812
  """
1105
- with db_utils.safe_cursor(_DB_PATH) as cursor:
1106
- row = cursor.execute(
1107
- 'SELECT spot_job_id, schedule_state, dag_yaml_path, env_file_path '
1108
- 'FROM job_info '
1109
- 'WHERE schedule_state in (?, ?) '
1110
- 'ORDER BY spot_job_id LIMIT 1',
1111
- (ManagedJobScheduleState.WAITING.value,
1112
- ManagedJobScheduleState.ALIVE_WAITING.value)).fetchone()
1813
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1814
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1815
+ # Select the highest priority waiting job for update (locks the row)
1816
+ select_query = sqlalchemy.select(
1817
+ job_info_table.c.spot_job_id,
1818
+ job_info_table.c.schedule_state,
1819
+ job_info_table.c.dag_yaml_path,
1820
+ job_info_table.c.env_file_path,
1821
+ job_info_table.c.controller_pid,
1822
+ job_info_table.c.pool,
1823
+ ).where(
1824
+ job_info_table.c.schedule_state.in_([
1825
+ ManagedJobScheduleState.WAITING.value,
1826
+ ])).order_by(
1827
+ job_info_table.c.priority.desc(),
1828
+ job_info_table.c.spot_job_id.asc(),
1829
+ ).limit(1).with_for_update()
1830
+
1831
+ # Execute the select with row locking
1832
+ result = await session.execute(select_query)
1833
+ waiting_job_row = result.fetchone()
1834
+
1835
+ if waiting_job_row is None:
1836
+ return None
1837
+
1838
+ job_id = waiting_job_row[0]
1839
+ current_state = ManagedJobScheduleState(waiting_job_row[1])
1840
+ dag_yaml_path = waiting_job_row[2]
1841
+ env_file_path = waiting_job_row[3]
1842
+ controller_pid = waiting_job_row[4]
1843
+ pool = waiting_job_row[5]
1844
+
1845
+ # Update the job state to LAUNCHING
1846
+ update_result = await session.execute(
1847
+ sqlalchemy.update(job_info_table).where(
1848
+ sqlalchemy.and_(
1849
+ job_info_table.c.spot_job_id == job_id,
1850
+ job_info_table.c.schedule_state == current_state.value,
1851
+ )).values({
1852
+ job_info_table.c.schedule_state:
1853
+ ManagedJobScheduleState.LAUNCHING.value,
1854
+ job_info_table.c.controller_pid: pid,
1855
+ }))
1856
+
1857
+ if update_result.rowcount != 1:
1858
+ # Update failed, rollback and return None
1859
+ await session.rollback()
1860
+ return None
1861
+
1862
+ # Commit the transaction
1863
+ await session.commit()
1864
+
1113
1865
  return {
1866
+ 'job_id': job_id,
1867
+ 'schedule_state': current_state,
1868
+ 'dag_yaml_path': dag_yaml_path,
1869
+ 'env_file_path': env_file_path,
1870
+ 'old_pid': controller_pid,
1871
+ 'pool': pool,
1872
+ }
1873
+
1874
+
1875
+ @_init_db
1876
+ def get_workspace(job_id: int) -> str:
1877
+ """Get the workspace of a job."""
1878
+ assert _SQLALCHEMY_ENGINE is not None
1879
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1880
+ workspace = session.execute(
1881
+ sqlalchemy.select(job_info_table.c.workspace).where(
1882
+ job_info_table.c.spot_job_id == job_id)).fetchone()
1883
+ job_workspace = workspace[0] if workspace else None
1884
+ if job_workspace is None:
1885
+ return constants.SKYPILOT_DEFAULT_WORKSPACE
1886
+ return job_workspace
1887
+
1888
+
1889
+ # === HA Recovery Script functions ===
1890
+
1891
+
1892
+ @_init_db
1893
+ def get_ha_recovery_script(job_id: int) -> Optional[str]:
1894
+ """Get the HA recovery script for a job."""
1895
+ assert _SQLALCHEMY_ENGINE is not None
1896
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1897
+ row = session.query(ha_recovery_script_table).filter_by(
1898
+ job_id=job_id).first()
1899
+ if row is None:
1900
+ return None
1901
+ return row.script
1902
+
1903
+
1904
+ @_init_db
1905
+ def set_ha_recovery_script(job_id: int, script: str) -> None:
1906
+ """Set the HA recovery script for a job."""
1907
+ assert _SQLALCHEMY_ENGINE is not None
1908
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1909
+ if (_SQLALCHEMY_ENGINE.dialect.name ==
1910
+ db_utils.SQLAlchemyDialect.SQLITE.value):
1911
+ insert_func = sqlite.insert
1912
+ elif (_SQLALCHEMY_ENGINE.dialect.name ==
1913
+ db_utils.SQLAlchemyDialect.POSTGRESQL.value):
1914
+ insert_func = postgresql.insert
1915
+ else:
1916
+ raise ValueError('Unsupported database dialect')
1917
+ insert_stmt = insert_func(ha_recovery_script_table).values(
1918
+ job_id=job_id, script=script)
1919
+ do_update_stmt = insert_stmt.on_conflict_do_update(
1920
+ index_elements=[ha_recovery_script_table.c.job_id],
1921
+ set_={ha_recovery_script_table.c.script: script})
1922
+ session.execute(do_update_stmt)
1923
+ session.commit()
1924
+
1925
+
1926
+ @_init_db
1927
+ def remove_ha_recovery_script(job_id: int) -> None:
1928
+ """Remove the HA recovery script for a job."""
1929
+ assert _SQLALCHEMY_ENGINE is not None
1930
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
1931
+ session.query(ha_recovery_script_table).filter_by(
1932
+ job_id=job_id).delete()
1933
+ session.commit()
1934
+
1935
+
1936
+ @_init_db_async
1937
+ async def get_latest_task_id_status_async(
1938
+ job_id: int) -> Union[Tuple[int, ManagedJobStatus], Tuple[None, None]]:
1939
+ """Returns the (task id, status) of the latest task of a job."""
1940
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1941
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1942
+ result = await session.execute(
1943
+ sqlalchemy.select(
1944
+ spot_table.c.task_id,
1945
+ spot_table.c.status,
1946
+ ).where(spot_table.c.spot_job_id == job_id).order_by(
1947
+ spot_table.c.task_id.asc()))
1948
+ id_statuses = [
1949
+ (row[0], ManagedJobStatus(row[1])) for row in result.fetchall()
1950
+ ]
1951
+
1952
+ if not id_statuses:
1953
+ return None, None
1954
+ task_id, status = next(
1955
+ ((tid, st) for tid, st in id_statuses if not st.is_terminal()),
1956
+ id_statuses[-1],
1957
+ )
1958
+ return task_id, status
1959
+
1960
+
1961
+ @_init_db_async
1962
+ async def set_starting_async(job_id: int, task_id: int, run_timestamp: str,
1963
+ submit_time: float, resources_str: str,
1964
+ specs: Dict[str, Union[str, int]],
1965
+ callback_func: AsyncCallbackType):
1966
+ """Set the task to starting state."""
1967
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
1968
+ logger.info('Launching the spot cluster...')
1969
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
1970
+ result = await session.execute(
1971
+ sqlalchemy.update(spot_table).where(
1972
+ sqlalchemy.and_(
1973
+ spot_table.c.spot_job_id == job_id,
1974
+ spot_table.c.task_id == task_id,
1975
+ spot_table.c.status == ManagedJobStatus.PENDING.value,
1976
+ spot_table.c.end_at.is_(None),
1977
+ )).values({
1978
+ spot_table.c.resources: resources_str,
1979
+ spot_table.c.submitted_at: submit_time,
1980
+ spot_table.c.status: ManagedJobStatus.STARTING.value,
1981
+ spot_table.c.run_timestamp: run_timestamp,
1982
+ spot_table.c.specs: json.dumps(specs),
1983
+ }))
1984
+ count = result.rowcount
1985
+ await session.commit()
1986
+ if count != 1:
1987
+ details = await _describe_task_transition_failure(
1988
+ session, job_id, task_id)
1989
+ message = ('Failed to set the task to starting. '
1990
+ f'({count} rows updated. {details})')
1991
+ logger.error(message)
1992
+ raise exceptions.ManagedJobStatusError(message)
1993
+ await callback_func('SUBMITTED')
1994
+ await callback_func('STARTING')
1995
+
1996
+
1997
+ @_init_db_async
1998
+ async def set_started_async(job_id: int, task_id: int, start_time: float,
1999
+ callback_func: AsyncCallbackType):
2000
+ """Set the task to started state."""
2001
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2002
+ logger.info('Job started.')
2003
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2004
+ result = await session.execute(
2005
+ sqlalchemy.update(spot_table).where(
2006
+ sqlalchemy.and_(
2007
+ spot_table.c.spot_job_id == job_id,
2008
+ spot_table.c.task_id == task_id,
2009
+ spot_table.c.status.in_([
2010
+ ManagedJobStatus.STARTING.value,
2011
+ ManagedJobStatus.PENDING.value
2012
+ ]),
2013
+ spot_table.c.end_at.is_(None),
2014
+ )).values({
2015
+ spot_table.c.status: ManagedJobStatus.RUNNING.value,
2016
+ spot_table.c.start_at: start_time,
2017
+ spot_table.c.last_recovered_at: start_time,
2018
+ }))
2019
+ count = result.rowcount
2020
+ await session.commit()
2021
+ if count != 1:
2022
+ details = await _describe_task_transition_failure(
2023
+ session, job_id, task_id)
2024
+ message = (f'Failed to set the task to started. '
2025
+ f'({count} rows updated. {details})')
2026
+ logger.error(message)
2027
+ raise exceptions.ManagedJobStatusError(message)
2028
+ await callback_func('STARTED')
2029
+
2030
+
2031
+ @_init_db_async
2032
+ async def get_job_status_with_task_id_async(
2033
+ job_id: int, task_id: int) -> Optional[ManagedJobStatus]:
2034
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2035
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2036
+ result = await session.execute(
2037
+ sqlalchemy.select(spot_table.c.status).where(
2038
+ sqlalchemy.and_(spot_table.c.spot_job_id == job_id,
2039
+ spot_table.c.task_id == task_id)))
2040
+ status = result.fetchone()
2041
+ return ManagedJobStatus(status[0]) if status else None
2042
+
2043
+
2044
+ @_init_db_async
2045
+ async def set_recovering_async(job_id: int, task_id: int,
2046
+ force_transit_to_recovering: bool,
2047
+ callback_func: AsyncCallbackType):
2048
+ """Set the task to recovering state, and update the job duration."""
2049
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2050
+ logger.info('=== Recovering... ===')
2051
+ current_time = time.time()
2052
+
2053
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2054
+ if force_transit_to_recovering:
2055
+ status_condition = spot_table.c.status.in_(
2056
+ [s.value for s in ManagedJobStatus.processing_statuses()])
2057
+ else:
2058
+ status_condition = (
2059
+ spot_table.c.status == ManagedJobStatus.RUNNING.value)
2060
+
2061
+ result = await session.execute(
2062
+ sqlalchemy.update(spot_table).where(
2063
+ sqlalchemy.and_(
2064
+ spot_table.c.spot_job_id == job_id,
2065
+ spot_table.c.task_id == task_id,
2066
+ status_condition,
2067
+ spot_table.c.end_at.is_(None),
2068
+ )).values({
2069
+ spot_table.c.status: ManagedJobStatus.RECOVERING.value,
2070
+ spot_table.c.job_duration: sqlalchemy.case(
2071
+ (spot_table.c.last_recovered_at >= 0,
2072
+ spot_table.c.job_duration + current_time -
2073
+ spot_table.c.last_recovered_at),
2074
+ else_=spot_table.c.job_duration),
2075
+ spot_table.c.last_recovered_at: sqlalchemy.case(
2076
+ (spot_table.c.last_recovered_at < 0, current_time),
2077
+ else_=spot_table.c.last_recovered_at),
2078
+ }))
2079
+ count = result.rowcount
2080
+ await session.commit()
2081
+ if count != 1:
2082
+ details = await _describe_task_transition_failure(
2083
+ session, job_id, task_id)
2084
+ message = ('Failed to set the task to recovering with '
2085
+ 'force_transit_to_recovering='
2086
+ f'{force_transit_to_recovering}. '
2087
+ f'({count} rows updated. {details})')
2088
+ logger.error(message)
2089
+ raise exceptions.ManagedJobStatusError(message)
2090
+ await callback_func('RECOVERING')
2091
+
2092
+
2093
+ @_init_db_async
2094
+ async def set_recovered_async(job_id: int, task_id: int, recovered_time: float,
2095
+ callback_func: AsyncCallbackType):
2096
+ """Set the task to recovered."""
2097
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2098
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2099
+ result = await session.execute(
2100
+ sqlalchemy.update(spot_table).where(
2101
+ sqlalchemy.and_(
2102
+ spot_table.c.spot_job_id == job_id,
2103
+ spot_table.c.task_id == task_id,
2104
+ spot_table.c.status == ManagedJobStatus.RECOVERING.value,
2105
+ spot_table.c.end_at.is_(None),
2106
+ )).values({
2107
+ spot_table.c.status: ManagedJobStatus.RUNNING.value,
2108
+ spot_table.c.last_recovered_at: recovered_time,
2109
+ spot_table.c.recovery_count: spot_table.c.recovery_count +
2110
+ 1,
2111
+ }))
2112
+ count = result.rowcount
2113
+ await session.commit()
2114
+ if count != 1:
2115
+ details = await _describe_task_transition_failure(
2116
+ session, job_id, task_id)
2117
+ message = (f'Failed to set the task to recovered. '
2118
+ f'({count} rows updated. {details})')
2119
+ logger.error(message)
2120
+ raise exceptions.ManagedJobStatusError(message)
2121
+ logger.info('==== Recovered. ====')
2122
+ await callback_func('RECOVERED')
2123
+
2124
+
2125
+ @_init_db_async
2126
+ async def set_succeeded_async(job_id: int, task_id: int, end_time: float,
2127
+ callback_func: AsyncCallbackType):
2128
+ """Set the task to succeeded, if it is in a non-terminal state."""
2129
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2130
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2131
+ result = await session.execute(
2132
+ sqlalchemy.update(spot_table).where(
2133
+ sqlalchemy.and_(
2134
+ spot_table.c.spot_job_id == job_id,
2135
+ spot_table.c.task_id == task_id,
2136
+ spot_table.c.status == ManagedJobStatus.RUNNING.value,
2137
+ spot_table.c.end_at.is_(None),
2138
+ )).values({
2139
+ spot_table.c.status: ManagedJobStatus.SUCCEEDED.value,
2140
+ spot_table.c.end_at: end_time,
2141
+ }))
2142
+ count = result.rowcount
2143
+ await session.commit()
2144
+ if count != 1:
2145
+ details = await _describe_task_transition_failure(
2146
+ session, job_id, task_id)
2147
+ message = (f'Failed to set the task to succeeded. '
2148
+ f'({count} rows updated. {details})')
2149
+ logger.error(message)
2150
+ raise exceptions.ManagedJobStatusError(message)
2151
+ await callback_func('SUCCEEDED')
2152
+ logger.info('Job succeeded.')
2153
+
2154
+
2155
+ @_init_db_async
2156
+ async def set_failed_async(
2157
+ job_id: int,
2158
+ task_id: Optional[int],
2159
+ failure_type: ManagedJobStatus,
2160
+ failure_reason: str,
2161
+ callback_func: Optional[AsyncCallbackType] = None,
2162
+ end_time: Optional[float] = None,
2163
+ override_terminal: bool = False,
2164
+ ):
2165
+ """Set an entire job or task to failed."""
2166
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2167
+ assert failure_type.is_failed(), failure_type
2168
+ end_time = time.time() if end_time is None else end_time
2169
+
2170
+ fields_to_set: Dict[str, Any] = {
2171
+ spot_table.c.status: failure_type.value,
2172
+ spot_table.c.failure_reason: failure_reason,
2173
+ }
2174
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2175
+ # Get previous status
2176
+ result = await session.execute(
2177
+ sqlalchemy.select(
2178
+ spot_table.c.status).where(spot_table.c.spot_job_id == job_id))
2179
+ previous_status_row = result.fetchone()
2180
+ previous_status = ManagedJobStatus(previous_status_row[0])
2181
+ if previous_status == ManagedJobStatus.RECOVERING:
2182
+ fields_to_set[spot_table.c.last_recovered_at] = end_time
2183
+ where_conditions = [spot_table.c.spot_job_id == job_id]
2184
+ if task_id is not None:
2185
+ where_conditions.append(spot_table.c.task_id == task_id)
2186
+
2187
+ # Handle failure_reason prepending when override_terminal is True
2188
+ if override_terminal:
2189
+ # Get existing failure_reason with row lock to prevent race
2190
+ # conditions
2191
+ existing_reason_result = await session.execute(
2192
+ sqlalchemy.select(spot_table.c.failure_reason).where(
2193
+ sqlalchemy.and_(*where_conditions)).with_for_update())
2194
+ existing_reason_row = existing_reason_result.fetchone()
2195
+ if existing_reason_row and existing_reason_row[0]:
2196
+ # Prepend new failure reason to existing one
2197
+ fields_to_set[spot_table.c.failure_reason] = (
2198
+ failure_reason + '. Previously: ' + existing_reason_row[0])
2199
+ fields_to_set[spot_table.c.end_at] = sqlalchemy.func.coalesce(
2200
+ spot_table.c.end_at, end_time)
2201
+ else:
2202
+ fields_to_set[spot_table.c.end_at] = end_time
2203
+ where_conditions.append(spot_table.c.end_at.is_(None))
2204
+ result = await session.execute(
2205
+ sqlalchemy.update(spot_table).where(
2206
+ sqlalchemy.and_(*where_conditions)).values(fields_to_set))
2207
+ count = result.rowcount
2208
+ await session.commit()
2209
+ updated = count > 0
2210
+ if callback_func and updated:
2211
+ await callback_func('FAILED')
2212
+ logger.info(failure_reason)
2213
+
2214
+
2215
+ @_init_db_async
2216
+ async def set_cancelling_async(job_id: int, callback_func: AsyncCallbackType):
2217
+ """Set tasks in the job as cancelling, if they are in non-terminal
2218
+ states."""
2219
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2220
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2221
+ result = await session.execute(
2222
+ sqlalchemy.update(spot_table).where(
2223
+ sqlalchemy.and_(
2224
+ spot_table.c.spot_job_id == job_id,
2225
+ spot_table.c.end_at.is_(None),
2226
+ )).values(
2227
+ {spot_table.c.status: ManagedJobStatus.CANCELLING.value}))
2228
+ count = result.rowcount
2229
+ await session.commit()
2230
+ updated = count > 0
2231
+ if updated:
2232
+ logger.info('Cancelling the job...')
2233
+ await callback_func('CANCELLING')
2234
+ else:
2235
+ logger.info('Cancellation skipped, job is already terminal')
2236
+
2237
+
2238
+ @_init_db_async
2239
+ async def set_cancelled_async(job_id: int, callback_func: AsyncCallbackType):
2240
+ """Set tasks in the job as cancelled, if they are in CANCELLING state."""
2241
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2242
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2243
+ result = await session.execute(
2244
+ sqlalchemy.update(spot_table).where(
2245
+ sqlalchemy.and_(
2246
+ spot_table.c.spot_job_id == job_id,
2247
+ spot_table.c.status == ManagedJobStatus.CANCELLING.value,
2248
+ )).values({
2249
+ spot_table.c.status: ManagedJobStatus.CANCELLED.value,
2250
+ spot_table.c.end_at: time.time(),
2251
+ }))
2252
+ count = result.rowcount
2253
+ await session.commit()
2254
+ updated = count > 0
2255
+ if updated:
2256
+ logger.info('Job cancelled.')
2257
+ await callback_func('CANCELLED')
2258
+ else:
2259
+ logger.info('Cancellation skipped, job is not CANCELLING')
2260
+
2261
+
2262
+ @_init_db_async
2263
+ async def remove_ha_recovery_script_async(job_id: int) -> None:
2264
+ """Remove the HA recovery script for a job."""
2265
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2266
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2267
+ await session.execute(
2268
+ sqlalchemy.delete(ha_recovery_script_table).where(
2269
+ ha_recovery_script_table.c.job_id == job_id))
2270
+ await session.commit()
2271
+
2272
+
2273
+ async def get_status_async(job_id: int) -> Optional[ManagedJobStatus]:
2274
+ _, status = await get_latest_task_id_status_async(job_id)
2275
+ return status
2276
+
2277
+
2278
+ @_init_db_async
2279
+ async def get_job_schedule_state_async(job_id: int) -> ManagedJobScheduleState:
2280
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2281
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2282
+ result = await session.execute(
2283
+ sqlalchemy.select(job_info_table.c.schedule_state).where(
2284
+ job_info_table.c.spot_job_id == job_id))
2285
+ state = result.fetchone()[0]
2286
+ return ManagedJobScheduleState(state)
2287
+
2288
+
2289
+ @_init_db_async
2290
+ async def scheduler_set_done_async(job_id: int,
2291
+ idempotent: bool = False) -> None:
2292
+ """Do not call without holding the scheduler lock."""
2293
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2294
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2295
+ result = await session.execute(
2296
+ sqlalchemy.update(job_info_table).where(
2297
+ sqlalchemy.and_(
2298
+ job_info_table.c.spot_job_id == job_id,
2299
+ job_info_table.c.schedule_state !=
2300
+ ManagedJobScheduleState.DONE.value,
2301
+ )).values({
2302
+ job_info_table.c.schedule_state:
2303
+ ManagedJobScheduleState.DONE.value
2304
+ }))
2305
+ updated_count = result.rowcount
2306
+ await session.commit()
2307
+ if not idempotent:
2308
+ assert updated_count == 1, (job_id, updated_count)
2309
+
2310
+
2311
+ # ==== needed for codegen ====
2312
+ # functions have no use outside of codegen, remove at your own peril
2313
+
2314
+
2315
+ @_init_db
2316
+ def set_job_info(job_id: int,
2317
+ name: str,
2318
+ workspace: str,
2319
+ entrypoint: str,
2320
+ pool: Optional[str],
2321
+ pool_hash: Optional[str],
2322
+ user_hash: Optional[str] = None):
2323
+ assert _SQLALCHEMY_ENGINE is not None
2324
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
2325
+ if (_SQLALCHEMY_ENGINE.dialect.name ==
2326
+ db_utils.SQLAlchemyDialect.SQLITE.value):
2327
+ insert_func = sqlite.insert
2328
+ elif (_SQLALCHEMY_ENGINE.dialect.name ==
2329
+ db_utils.SQLAlchemyDialect.POSTGRESQL.value):
2330
+ insert_func = postgresql.insert
2331
+ else:
2332
+ raise ValueError('Unsupported database dialect')
2333
+ insert_stmt = insert_func(job_info_table).values(
2334
+ spot_job_id=job_id,
2335
+ name=name,
2336
+ schedule_state=ManagedJobScheduleState.INACTIVE.value,
2337
+ workspace=workspace,
2338
+ entrypoint=entrypoint,
2339
+ pool=pool,
2340
+ pool_hash=pool_hash,
2341
+ user_hash=user_hash,
2342
+ )
2343
+ session.execute(insert_stmt)
2344
+ session.commit()
2345
+
2346
+
2347
+ @_init_db
2348
+ def reset_jobs_for_recovery() -> None:
2349
+ """Remove controller PIDs for live jobs, allowing them to be recovered."""
2350
+ assert _SQLALCHEMY_ENGINE is not None
2351
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
2352
+ session.query(job_info_table).filter(
2353
+ # PID should be set.
2354
+ job_info_table.c.controller_pid.isnot(None),
2355
+ # Schedule state should be alive.
2356
+ job_info_table.c.schedule_state.isnot(None),
2357
+ (job_info_table.c.schedule_state !=
2358
+ ManagedJobScheduleState.INVALID.value),
2359
+ (job_info_table.c.schedule_state !=
2360
+ ManagedJobScheduleState.WAITING.value),
2361
+ (job_info_table.c.schedule_state !=
2362
+ ManagedJobScheduleState.DONE.value),
2363
+ ).update({
2364
+ job_info_table.c.controller_pid: None,
2365
+ job_info_table.c.schedule_state:
2366
+ (ManagedJobScheduleState.WAITING.value)
2367
+ })
2368
+ session.commit()
2369
+
2370
+
2371
+ @_init_db
2372
+ def get_all_job_ids_by_name(name: Optional[str]) -> List[int]:
2373
+ """Get all job ids by name."""
2374
+ assert _SQLALCHEMY_ENGINE is not None
2375
+
2376
+ with orm.Session(_SQLALCHEMY_ENGINE) as session:
2377
+ query = sqlalchemy.select(
2378
+ spot_table.c.spot_job_id.distinct()).select_from(
2379
+ spot_table.outerjoin(
2380
+ job_info_table,
2381
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id))
2382
+ if name is not None:
2383
+ # We match the job name from `job_info` for the jobs submitted after
2384
+ # #1982, and from `spot` for the jobs submitted before #1982, whose
2385
+ # job_info is not available.
2386
+ name_condition = sqlalchemy.or_(
2387
+ job_info_table.c.name == name,
2388
+ sqlalchemy.and_(job_info_table.c.name.is_(None),
2389
+ spot_table.c.task_name == name))
2390
+ query = query.where(name_condition)
2391
+ query = query.order_by(spot_table.c.spot_job_id.desc())
2392
+ rows = session.execute(query).fetchall()
2393
+ job_ids = [row[0] for row in rows if row[0] is not None]
2394
+ return job_ids
2395
+
2396
+
2397
+ @_init_db_async
2398
+ async def get_task_logs_to_clean_async(retention_seconds: int,
2399
+ batch_size) -> List[Dict[str, Any]]:
2400
+ """Get the logs of job tasks to clean.
2401
+
2402
+ The logs of a task will only cleaned when:
2403
+ - the job schedule state is DONE
2404
+ - AND the end time of the task is older than the retention period
2405
+ """
2406
+
2407
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2408
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2409
+ now = time.time()
2410
+ result = await session.execute(
2411
+ sqlalchemy.select(
2412
+ spot_table.c.spot_job_id,
2413
+ spot_table.c.task_id,
2414
+ spot_table.c.local_log_file,
2415
+ ).select_from(
2416
+ spot_table.join(
2417
+ job_info_table,
2418
+ spot_table.c.spot_job_id == job_info_table.c.spot_job_id,
2419
+ )).
2420
+ where(
2421
+ sqlalchemy.and_(
2422
+ job_info_table.c.schedule_state.is_(
2423
+ ManagedJobScheduleState.DONE.value),
2424
+ spot_table.c.end_at.isnot(None),
2425
+ spot_table.c.end_at < (now - retention_seconds),
2426
+ spot_table.c.logs_cleaned_at.is_(None),
2427
+ # The local log file is set AFTER the task is finished,
2428
+ # add this condition to ensure the entire log file has
2429
+ # been written.
2430
+ spot_table.c.local_log_file.isnot(None),
2431
+ )).limit(batch_size))
2432
+ rows = result.fetchall()
2433
+ return [{
1114
2434
  'job_id': row[0],
1115
- 'schedule_state': ManagedJobScheduleState(row[1]),
1116
- 'dag_yaml_path': row[2],
1117
- 'env_file_path': row[3],
1118
- } if row is not None else None
2435
+ 'task_id': row[1],
2436
+ 'local_log_file': row[2]
2437
+ } for row in rows]
2438
+
2439
+
2440
+ @_init_db_async
2441
+ async def get_controller_logs_to_clean_async(
2442
+ retention_seconds: int, batch_size: int) -> List[Dict[str, Any]]:
2443
+ """Get the controller logs to clean.
2444
+
2445
+ The controller logs will only cleaned when:
2446
+ - the job schedule state is DONE
2447
+ - AND the end time of the latest task is older than the retention period
2448
+ """
2449
+
2450
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2451
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2452
+ now = time.time()
2453
+
2454
+ result = await session.execute(
2455
+ sqlalchemy.select(job_info_table.c.spot_job_id,).select_from(
2456
+ job_info_table.join(
2457
+ spot_table,
2458
+ job_info_table.c.spot_job_id == spot_table.c.spot_job_id,
2459
+ )).where(
2460
+ sqlalchemy.and_(
2461
+ job_info_table.c.schedule_state.is_(
2462
+ ManagedJobScheduleState.DONE.value),
2463
+ spot_table.c.local_log_file.isnot(None),
2464
+ job_info_table.c.controller_logs_cleaned_at.is_(None),
2465
+ )).group_by(
2466
+ job_info_table.c.spot_job_id,
2467
+ job_info_table.c.current_cluster_name,
2468
+ ).having(
2469
+ sqlalchemy.func.max(
2470
+ spot_table.c.end_at).isnot(None),).having(
2471
+ sqlalchemy.func.max(spot_table.c.end_at) < (
2472
+ now - retention_seconds)).limit(batch_size))
2473
+ rows = result.fetchall()
2474
+ return [{'job_id': row[0]} for row in rows]
2475
+
2476
+
2477
+ @_init_db_async
2478
+ async def set_task_logs_cleaned_async(tasks: List[Tuple[int, int]],
2479
+ logs_cleaned_at: float):
2480
+ """Set the task logs cleaned at."""
2481
+ if not tasks:
2482
+ return
2483
+ # Deduplicate
2484
+ task_keys = list(dict.fromkeys(tasks))
2485
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2486
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2487
+ await session.execute(
2488
+ sqlalchemy.update(spot_table).where(
2489
+ sqlalchemy.tuple_(spot_table.c.spot_job_id,
2490
+ spot_table.c.task_id).in_(task_keys)).values(
2491
+ logs_cleaned_at=logs_cleaned_at))
2492
+ await session.commit()
2493
+
2494
+
2495
+ @_init_db_async
2496
+ async def set_controller_logs_cleaned_async(job_ids: List[int],
2497
+ logs_cleaned_at: float):
2498
+ """Set the controller logs cleaned at."""
2499
+ if not job_ids:
2500
+ return
2501
+ # Deduplicate
2502
+ job_ids = list(dict.fromkeys(job_ids))
2503
+ assert _SQLALCHEMY_ENGINE_ASYNC is not None
2504
+ async with sql_async.AsyncSession(_SQLALCHEMY_ENGINE_ASYNC) as session:
2505
+ await session.execute(
2506
+ sqlalchemy.update(job_info_table).where(
2507
+ job_info_table.c.spot_job_id.in_(job_ids)).values(
2508
+ controller_logs_cleaned_at=logs_cleaned_at))
2509
+ await session.commit()