databricks-sdk 0.56.0__py3-none-any.whl → 0.58.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (31) hide show
  1. databricks/sdk/__init__.py +38 -11
  2. databricks/sdk/service/aibuilder.py +122 -17
  3. databricks/sdk/service/apps.py +15 -45
  4. databricks/sdk/service/billing.py +70 -74
  5. databricks/sdk/service/catalog.py +1898 -557
  6. databricks/sdk/service/cleanrooms.py +14 -55
  7. databricks/sdk/service/compute.py +305 -508
  8. databricks/sdk/service/dashboards.py +148 -223
  9. databricks/sdk/service/database.py +657 -127
  10. databricks/sdk/service/files.py +18 -54
  11. databricks/sdk/service/iam.py +55 -165
  12. databricks/sdk/service/jobs.py +238 -214
  13. databricks/sdk/service/marketplace.py +47 -146
  14. databricks/sdk/service/ml.py +1137 -447
  15. databricks/sdk/service/oauth2.py +17 -46
  16. databricks/sdk/service/pipelines.py +93 -69
  17. databricks/sdk/service/provisioning.py +34 -212
  18. databricks/sdk/service/qualitymonitorv2.py +5 -33
  19. databricks/sdk/service/serving.py +69 -55
  20. databricks/sdk/service/settings.py +106 -434
  21. databricks/sdk/service/sharing.py +33 -95
  22. databricks/sdk/service/sql.py +164 -254
  23. databricks/sdk/service/vectorsearch.py +13 -62
  24. databricks/sdk/service/workspace.py +36 -110
  25. databricks/sdk/version.py +1 -1
  26. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/METADATA +1 -1
  27. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/RECORD +31 -31
  28. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/WHEEL +0 -0
  29. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/LICENSE +0 -0
  30. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/NOTICE +0 -0
  31. {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/top_level.txt +0 -0
@@ -219,11 +219,6 @@ class BaseRun:
219
219
  """The URL to the detail page of the run."""
220
220
 
221
221
  run_type: Optional[RunType] = None
222
- """The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
223
- `WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
224
- run. A run created with :method:jobs/submit.
225
-
226
- [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
227
222
 
228
223
  schedule: Optional[CronSchedule] = None
229
224
  """The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
@@ -244,7 +239,6 @@ class BaseRun:
244
239
  """Deprecated. Please use the `status` field instead."""
245
240
 
246
241
  status: Optional[RunStatus] = None
247
- """The current status of the run"""
248
242
 
249
243
  tasks: Optional[List[RunTask]] = None
250
244
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -253,19 +247,8 @@ class BaseRun:
253
247
  root to determine if more results are available."""
254
248
 
255
249
  trigger: Optional[TriggerType] = None
256
- """The type of trigger that fired this run.
257
-
258
- * `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
259
- One time triggers that fire a single run. This occurs you triggered a single run on demand
260
- through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
261
- previously failed run. This occurs when you request to re-run the job in case of failures. *
262
- `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
263
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
264
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
265
- restart a continuous job run."""
266
250
 
267
251
  trigger_info: Optional[TriggerInfo] = None
268
- """Additional details about what triggered the run"""
269
252
 
270
253
  def as_dict(self) -> dict:
271
254
  """Serializes the BaseRun into a dictionary suitable for use as a JSON request body."""
@@ -1004,7 +987,6 @@ class CreateJob:
1004
987
  are used, `git_source` must be defined on the job."""
1005
988
 
1006
989
  health: Optional[JobsHealthRules] = None
1007
- """An optional set of health rules that can be defined for this job."""
1008
990
 
1009
991
  job_clusters: Optional[List[JobCluster]] = None
1010
992
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
@@ -1043,10 +1025,6 @@ class CreateJob:
1043
1025
  """The queue settings of the job."""
1044
1026
 
1045
1027
  run_as: Optional[JobRunAs] = None
1046
- """Write-only setting. Specifies the user or service principal that the job runs as. If not
1047
- specified, the job runs as the user who created the job.
1048
-
1049
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
1050
1028
 
1051
1029
  schedule: Optional[CronSchedule] = None
1052
1030
  """An optional periodic schedule for this job. The default behavior is that the job only runs when
@@ -1399,7 +1377,8 @@ class DashboardTaskOutput:
1399
1377
 
1400
1378
  @dataclass
1401
1379
  class DbtCloudJobRunStep:
1402
- """Format of response retrieved from dbt Cloud, for inclusion in output"""
1380
+ """Format of response retrieved from dbt Cloud, for inclusion in output Deprecated in favor of
1381
+ DbtPlatformJobRunStep"""
1403
1382
 
1404
1383
  index: Optional[int] = None
1405
1384
  """Orders the steps in the job"""
@@ -1410,7 +1389,7 @@ class DbtCloudJobRunStep:
1410
1389
  name: Optional[str] = None
1411
1390
  """Name of the step in the job"""
1412
1391
 
1413
- status: Optional[DbtCloudRunStatus] = None
1392
+ status: Optional[DbtPlatformRunStatus] = None
1414
1393
  """State of the step"""
1415
1394
 
1416
1395
  def as_dict(self) -> dict:
@@ -1446,23 +1425,14 @@ class DbtCloudJobRunStep:
1446
1425
  index=d.get("index", None),
1447
1426
  logs=d.get("logs", None),
1448
1427
  name=d.get("name", None),
1449
- status=_enum(d, "status", DbtCloudRunStatus),
1428
+ status=_enum(d, "status", DbtPlatformRunStatus),
1450
1429
  )
1451
1430
 
1452
1431
 
1453
- class DbtCloudRunStatus(Enum):
1454
- """Response enumeration from calling the dbt Cloud API, for inclusion in output"""
1455
-
1456
- CANCELLED = "CANCELLED"
1457
- ERROR = "ERROR"
1458
- QUEUED = "QUEUED"
1459
- RUNNING = "RUNNING"
1460
- STARTING = "STARTING"
1461
- SUCCESS = "SUCCESS"
1462
-
1463
-
1464
1432
  @dataclass
1465
1433
  class DbtCloudTask:
1434
+ """Deprecated in favor of DbtPlatformTask"""
1435
+
1466
1436
  connection_resource_name: Optional[str] = None
1467
1437
  """The resource name of the UC connection that authenticates the dbt Cloud for this task"""
1468
1438
 
@@ -1498,6 +1468,8 @@ class DbtCloudTask:
1498
1468
 
1499
1469
  @dataclass
1500
1470
  class DbtCloudTaskOutput:
1471
+ """Deprecated in favor of DbtPlatformTaskOutput"""
1472
+
1501
1473
  dbt_cloud_job_run_id: Optional[int] = None
1502
1474
  """Id of the job run in dbt Cloud"""
1503
1475
 
@@ -1572,6 +1544,176 @@ class DbtOutput:
1572
1544
  return cls(artifacts_headers=d.get("artifacts_headers", None), artifacts_link=d.get("artifacts_link", None))
1573
1545
 
1574
1546
 
1547
+ @dataclass
1548
+ class DbtPlatformJobRunStep:
1549
+ """Format of response retrieved from dbt platform, for inclusion in output"""
1550
+
1551
+ index: Optional[int] = None
1552
+ """Orders the steps in the job"""
1553
+
1554
+ logs: Optional[str] = None
1555
+ """Output of the step"""
1556
+
1557
+ logs_truncated: Optional[bool] = None
1558
+ """Whether the logs of this step have been truncated. If true, the logs has been truncated to 10000
1559
+ characters."""
1560
+
1561
+ name: Optional[str] = None
1562
+ """Name of the step in the job"""
1563
+
1564
+ name_truncated: Optional[bool] = None
1565
+ """Whether the name of the job has been truncated. If true, the name has been truncated to 100
1566
+ characters."""
1567
+
1568
+ status: Optional[DbtPlatformRunStatus] = None
1569
+ """State of the step"""
1570
+
1571
+ def as_dict(self) -> dict:
1572
+ """Serializes the DbtPlatformJobRunStep into a dictionary suitable for use as a JSON request body."""
1573
+ body = {}
1574
+ if self.index is not None:
1575
+ body["index"] = self.index
1576
+ if self.logs is not None:
1577
+ body["logs"] = self.logs
1578
+ if self.logs_truncated is not None:
1579
+ body["logs_truncated"] = self.logs_truncated
1580
+ if self.name is not None:
1581
+ body["name"] = self.name
1582
+ if self.name_truncated is not None:
1583
+ body["name_truncated"] = self.name_truncated
1584
+ if self.status is not None:
1585
+ body["status"] = self.status.value
1586
+ return body
1587
+
1588
+ def as_shallow_dict(self) -> dict:
1589
+ """Serializes the DbtPlatformJobRunStep into a shallow dictionary of its immediate attributes."""
1590
+ body = {}
1591
+ if self.index is not None:
1592
+ body["index"] = self.index
1593
+ if self.logs is not None:
1594
+ body["logs"] = self.logs
1595
+ if self.logs_truncated is not None:
1596
+ body["logs_truncated"] = self.logs_truncated
1597
+ if self.name is not None:
1598
+ body["name"] = self.name
1599
+ if self.name_truncated is not None:
1600
+ body["name_truncated"] = self.name_truncated
1601
+ if self.status is not None:
1602
+ body["status"] = self.status
1603
+ return body
1604
+
1605
+ @classmethod
1606
+ def from_dict(cls, d: Dict[str, Any]) -> DbtPlatformJobRunStep:
1607
+ """Deserializes the DbtPlatformJobRunStep from a dictionary."""
1608
+ return cls(
1609
+ index=d.get("index", None),
1610
+ logs=d.get("logs", None),
1611
+ logs_truncated=d.get("logs_truncated", None),
1612
+ name=d.get("name", None),
1613
+ name_truncated=d.get("name_truncated", None),
1614
+ status=_enum(d, "status", DbtPlatformRunStatus),
1615
+ )
1616
+
1617
+
1618
+ class DbtPlatformRunStatus(Enum):
1619
+ """Response enumeration from calling the dbt platform API, for inclusion in output"""
1620
+
1621
+ CANCELLED = "CANCELLED"
1622
+ ERROR = "ERROR"
1623
+ QUEUED = "QUEUED"
1624
+ RUNNING = "RUNNING"
1625
+ STARTING = "STARTING"
1626
+ SUCCESS = "SUCCESS"
1627
+
1628
+
1629
+ @dataclass
1630
+ class DbtPlatformTask:
1631
+ connection_resource_name: Optional[str] = None
1632
+ """The resource name of the UC connection that authenticates the dbt platform for this task"""
1633
+
1634
+ dbt_platform_job_id: Optional[str] = None
1635
+ """Id of the dbt platform job to be triggered. Specified as a string for maximum compatibility with
1636
+ clients."""
1637
+
1638
+ def as_dict(self) -> dict:
1639
+ """Serializes the DbtPlatformTask into a dictionary suitable for use as a JSON request body."""
1640
+ body = {}
1641
+ if self.connection_resource_name is not None:
1642
+ body["connection_resource_name"] = self.connection_resource_name
1643
+ if self.dbt_platform_job_id is not None:
1644
+ body["dbt_platform_job_id"] = self.dbt_platform_job_id
1645
+ return body
1646
+
1647
+ def as_shallow_dict(self) -> dict:
1648
+ """Serializes the DbtPlatformTask into a shallow dictionary of its immediate attributes."""
1649
+ body = {}
1650
+ if self.connection_resource_name is not None:
1651
+ body["connection_resource_name"] = self.connection_resource_name
1652
+ if self.dbt_platform_job_id is not None:
1653
+ body["dbt_platform_job_id"] = self.dbt_platform_job_id
1654
+ return body
1655
+
1656
+ @classmethod
1657
+ def from_dict(cls, d: Dict[str, Any]) -> DbtPlatformTask:
1658
+ """Deserializes the DbtPlatformTask from a dictionary."""
1659
+ return cls(
1660
+ connection_resource_name=d.get("connection_resource_name", None),
1661
+ dbt_platform_job_id=d.get("dbt_platform_job_id", None),
1662
+ )
1663
+
1664
+
1665
+ @dataclass
1666
+ class DbtPlatformTaskOutput:
1667
+ dbt_platform_job_run_id: Optional[str] = None
1668
+ """Id of the job run in dbt platform. Specified as a string for maximum compatibility with clients."""
1669
+
1670
+ dbt_platform_job_run_output: Optional[List[DbtPlatformJobRunStep]] = None
1671
+ """Steps of the job run as received from dbt platform"""
1672
+
1673
+ dbt_platform_job_run_url: Optional[str] = None
1674
+ """Url where full run details can be viewed"""
1675
+
1676
+ steps_truncated: Optional[bool] = None
1677
+ """Whether the number of steps in the output has been truncated. If true, the output will contain
1678
+ the first 20 steps of the output."""
1679
+
1680
+ def as_dict(self) -> dict:
1681
+ """Serializes the DbtPlatformTaskOutput into a dictionary suitable for use as a JSON request body."""
1682
+ body = {}
1683
+ if self.dbt_platform_job_run_id is not None:
1684
+ body["dbt_platform_job_run_id"] = self.dbt_platform_job_run_id
1685
+ if self.dbt_platform_job_run_output:
1686
+ body["dbt_platform_job_run_output"] = [v.as_dict() for v in self.dbt_platform_job_run_output]
1687
+ if self.dbt_platform_job_run_url is not None:
1688
+ body["dbt_platform_job_run_url"] = self.dbt_platform_job_run_url
1689
+ if self.steps_truncated is not None:
1690
+ body["steps_truncated"] = self.steps_truncated
1691
+ return body
1692
+
1693
+ def as_shallow_dict(self) -> dict:
1694
+ """Serializes the DbtPlatformTaskOutput into a shallow dictionary of its immediate attributes."""
1695
+ body = {}
1696
+ if self.dbt_platform_job_run_id is not None:
1697
+ body["dbt_platform_job_run_id"] = self.dbt_platform_job_run_id
1698
+ if self.dbt_platform_job_run_output:
1699
+ body["dbt_platform_job_run_output"] = self.dbt_platform_job_run_output
1700
+ if self.dbt_platform_job_run_url is not None:
1701
+ body["dbt_platform_job_run_url"] = self.dbt_platform_job_run_url
1702
+ if self.steps_truncated is not None:
1703
+ body["steps_truncated"] = self.steps_truncated
1704
+ return body
1705
+
1706
+ @classmethod
1707
+ def from_dict(cls, d: Dict[str, Any]) -> DbtPlatformTaskOutput:
1708
+ """Deserializes the DbtPlatformTaskOutput from a dictionary."""
1709
+ return cls(
1710
+ dbt_platform_job_run_id=d.get("dbt_platform_job_run_id", None),
1711
+ dbt_platform_job_run_output=_repeated_dict(d, "dbt_platform_job_run_output", DbtPlatformJobRunStep),
1712
+ dbt_platform_job_run_url=d.get("dbt_platform_job_run_url", None),
1713
+ steps_truncated=d.get("steps_truncated", None),
1714
+ )
1715
+
1716
+
1575
1717
  @dataclass
1576
1718
  class DbtTask:
1577
1719
  commands: List[str]
@@ -2393,8 +2535,6 @@ class GitSource:
2393
2535
  with git_branch or git_tag."""
2394
2536
 
2395
2537
  git_snapshot: Optional[GitSnapshot] = None
2396
- """Read-only state of the remote repository at the time the job was run. This field is only
2397
- included on job runs."""
2398
2538
 
2399
2539
  git_tag: Optional[str] = None
2400
2540
  """Name of the tag to be checked out and used by this job. This field cannot be specified in
@@ -2567,7 +2707,6 @@ class JobAccessControlRequest:
2567
2707
  """name of the group"""
2568
2708
 
2569
2709
  permission_level: Optional[JobPermissionLevel] = None
2570
- """Permission level"""
2571
2710
 
2572
2711
  service_principal_name: Optional[str] = None
2573
2712
  """application ID of a service principal"""
@@ -2890,9 +3029,6 @@ class JobEnvironment:
2890
3029
  """The key of an environment. It has to be unique within a job."""
2891
3030
 
2892
3031
  spec: Optional[compute.Environment] = None
2893
- """The environment entity used to preserve serverless environment side panel, jobs' environment for
2894
- non-notebook task, and DLT's environment for classic and serverless pipelines. In this minimal
2895
- environment spec, only pip dependencies are supported."""
2896
3032
 
2897
3033
  def as_dict(self) -> dict:
2898
3034
  """Serializes the JobEnvironment into a dictionary suitable for use as a JSON request body."""
@@ -3033,7 +3169,6 @@ class JobPermission:
3033
3169
  inherited_from_object: Optional[List[str]] = None
3034
3170
 
3035
3171
  permission_level: Optional[JobPermissionLevel] = None
3036
- """Permission level"""
3037
3172
 
3038
3173
  def as_dict(self) -> dict:
3039
3174
  """Serializes the JobPermission into a dictionary suitable for use as a JSON request body."""
@@ -3121,7 +3256,6 @@ class JobPermissionsDescription:
3121
3256
  description: Optional[str] = None
3122
3257
 
3123
3258
  permission_level: Optional[JobPermissionLevel] = None
3124
- """Permission level"""
3125
3259
 
3126
3260
  def as_dict(self) -> dict:
3127
3261
  """Serializes the JobPermissionsDescription into a dictionary suitable for use as a JSON request body."""
@@ -3271,7 +3405,6 @@ class JobSettings:
3271
3405
  are used, `git_source` must be defined on the job."""
3272
3406
 
3273
3407
  health: Optional[JobsHealthRules] = None
3274
- """An optional set of health rules that can be defined for this job."""
3275
3408
 
3276
3409
  job_clusters: Optional[List[JobCluster]] = None
3277
3410
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
@@ -3310,10 +3443,6 @@ class JobSettings:
3310
3443
  """The queue settings of the job."""
3311
3444
 
3312
3445
  run_as: Optional[JobRunAs] = None
3313
- """Write-only setting. Specifies the user or service principal that the job runs as. If not
3314
- specified, the job runs as the user who created the job.
3315
-
3316
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
3317
3446
 
3318
3447
  schedule: Optional[CronSchedule] = None
3319
3448
  """An optional periodic schedule for this job. The default behavior is that the job only runs when
@@ -3572,18 +3701,8 @@ class JobsHealthOperator(Enum):
3572
3701
  @dataclass
3573
3702
  class JobsHealthRule:
3574
3703
  metric: JobsHealthMetric
3575
- """Specifies the health metric that is being evaluated for a particular health rule.
3576
-
3577
- * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
3578
- An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
3579
- is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
3580
- across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
3581
- of the maximum consumer delay across all streams. This metric is in Public Preview. *
3582
- `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
3583
- streams. This metric is in Public Preview."""
3584
3704
 
3585
3705
  op: JobsHealthOperator
3586
- """Specifies the operator used to compare the health metric value with the specified threshold."""
3587
3706
 
3588
3707
  value: int
3589
3708
  """Specifies the threshold value that the health metric should obey to satisfy the health rule."""
@@ -4289,11 +4408,6 @@ class PythonWheelTask:
4289
4408
  @dataclass
4290
4409
  class QueueDetails:
4291
4410
  code: Optional[QueueDetailsCodeCode] = None
4292
- """The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to
4293
- reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was
4294
- queued due to reaching the per-job limit of concurrent job runs. *
4295
- `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of
4296
- active run job tasks."""
4297
4411
 
4298
4412
  message: Optional[str] = None
4299
4413
  """A descriptive message with the queuing details. This field is unstructured, and its exact format
@@ -4384,7 +4498,6 @@ class RepairHistoryItem:
4384
4498
  """Deprecated. Please use the `status` field instead."""
4385
4499
 
4386
4500
  status: Optional[RunStatus] = None
4387
- """The current status of the run"""
4388
4501
 
4389
4502
  task_run_ids: Optional[List[int]] = None
4390
4503
  """The run IDs of the task runs that ran as part of this repair history item."""
@@ -5128,11 +5241,6 @@ class Run:
5128
5241
  """The URL to the detail page of the run."""
5129
5242
 
5130
5243
  run_type: Optional[RunType] = None
5131
- """The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
5132
- `WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
5133
- run. A run created with :method:jobs/submit.
5134
-
5135
- [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
5136
5244
 
5137
5245
  schedule: Optional[CronSchedule] = None
5138
5246
  """The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
@@ -5153,7 +5261,6 @@ class Run:
5153
5261
  """Deprecated. Please use the `status` field instead."""
5154
5262
 
5155
5263
  status: Optional[RunStatus] = None
5156
- """The current status of the run"""
5157
5264
 
5158
5265
  tasks: Optional[List[RunTask]] = None
5159
5266
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -5162,19 +5269,8 @@ class Run:
5162
5269
  root to determine if more results are available."""
5163
5270
 
5164
5271
  trigger: Optional[TriggerType] = None
5165
- """The type of trigger that fired this run.
5166
-
5167
- * `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
5168
- One time triggers that fire a single run. This occurs you triggered a single run on demand
5169
- through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
5170
- previously failed run. This occurs when you request to re-run the job in case of failures. *
5171
- `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
5172
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
5173
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
5174
- restart a continuous job run."""
5175
5272
 
5176
5273
  trigger_info: Optional[TriggerInfo] = None
5177
- """Additional details about what triggered the run"""
5178
5274
 
5179
5275
  def as_dict(self) -> dict:
5180
5276
  """Serializes the Run into a dictionary suitable for use as a JSON request body."""
@@ -5955,10 +6051,13 @@ class RunOutput:
5955
6051
  """The output of a dashboard task, if available"""
5956
6052
 
5957
6053
  dbt_cloud_output: Optional[DbtCloudTaskOutput] = None
6054
+ """Deprecated in favor of the new dbt_platform_output"""
5958
6055
 
5959
6056
  dbt_output: Optional[DbtOutput] = None
5960
6057
  """The output of a dbt task, if available."""
5961
6058
 
6059
+ dbt_platform_output: Optional[DbtPlatformTaskOutput] = None
6060
+
5962
6061
  error: Optional[str] = None
5963
6062
  """An error message indicating why a task failed or why output is not available. The message is
5964
6063
  unstructured, and its exact format is subject to change."""
@@ -6008,6 +6107,8 @@ class RunOutput:
6008
6107
  body["dbt_cloud_output"] = self.dbt_cloud_output.as_dict()
6009
6108
  if self.dbt_output:
6010
6109
  body["dbt_output"] = self.dbt_output.as_dict()
6110
+ if self.dbt_platform_output:
6111
+ body["dbt_platform_output"] = self.dbt_platform_output.as_dict()
6011
6112
  if self.error is not None:
6012
6113
  body["error"] = self.error
6013
6114
  if self.error_trace is not None:
@@ -6039,6 +6140,8 @@ class RunOutput:
6039
6140
  body["dbt_cloud_output"] = self.dbt_cloud_output
6040
6141
  if self.dbt_output:
6041
6142
  body["dbt_output"] = self.dbt_output
6143
+ if self.dbt_platform_output:
6144
+ body["dbt_platform_output"] = self.dbt_platform_output
6042
6145
  if self.error is not None:
6043
6146
  body["error"] = self.error
6044
6147
  if self.error_trace is not None:
@@ -6069,6 +6172,7 @@ class RunOutput:
6069
6172
  dashboard_output=_from_dict(d, "dashboard_output", DashboardTaskOutput),
6070
6173
  dbt_cloud_output=_from_dict(d, "dbt_cloud_output", DbtCloudTaskOutput),
6071
6174
  dbt_output=_from_dict(d, "dbt_output", DbtOutput),
6175
+ dbt_platform_output=_from_dict(d, "dbt_platform_output", DbtPlatformTaskOutput),
6072
6176
  error=d.get("error", None),
6073
6177
  error_trace=d.get("error_trace", None),
6074
6178
  info=d.get("info", None),
@@ -6310,7 +6414,6 @@ class RunStatus:
6310
6414
  """If the run was queued, details about the reason for queuing the run."""
6311
6415
 
6312
6416
  state: Optional[RunLifecycleStateV2State] = None
6313
- """The current state of the run."""
6314
6417
 
6315
6418
  termination_details: Optional[TerminationDetails] = None
6316
6419
  """If the run is in a TERMINATING or TERMINATED state, details about the reason for terminating the
@@ -6388,7 +6491,9 @@ class RunTask:
6388
6491
  """The task refreshes a dashboard and sends a snapshot to subscribers."""
6389
6492
 
6390
6493
  dbt_cloud_task: Optional[DbtCloudTask] = None
6391
- """Task type for dbt cloud"""
6494
+ """Task type for dbt cloud, deprecated in favor of the new name dbt_platform_task"""
6495
+
6496
+ dbt_platform_task: Optional[DbtPlatformTask] = None
6392
6497
 
6393
6498
  dbt_task: Optional[DbtTask] = None
6394
6499
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
@@ -6545,7 +6650,6 @@ class RunTask:
6545
6650
  """Deprecated. Please use the `status` field instead."""
6546
6651
 
6547
6652
  status: Optional[RunStatus] = None
6548
- """The current status of the run"""
6549
6653
 
6550
6654
  timeout_seconds: Optional[int] = None
6551
6655
  """An optional timeout applied to each run of this job task. A value of `0` means no timeout."""
@@ -6572,6 +6676,8 @@ class RunTask:
6572
6676
  body["dashboard_task"] = self.dashboard_task.as_dict()
6573
6677
  if self.dbt_cloud_task:
6574
6678
  body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict()
6679
+ if self.dbt_platform_task:
6680
+ body["dbt_platform_task"] = self.dbt_platform_task.as_dict()
6575
6681
  if self.dbt_task:
6576
6682
  body["dbt_task"] = self.dbt_task.as_dict()
6577
6683
  if self.depends_on:
@@ -6669,6 +6775,8 @@ class RunTask:
6669
6775
  body["dashboard_task"] = self.dashboard_task
6670
6776
  if self.dbt_cloud_task:
6671
6777
  body["dbt_cloud_task"] = self.dbt_cloud_task
6778
+ if self.dbt_platform_task:
6779
+ body["dbt_platform_task"] = self.dbt_platform_task
6672
6780
  if self.dbt_task:
6673
6781
  body["dbt_task"] = self.dbt_task
6674
6782
  if self.depends_on:
@@ -6760,6 +6868,7 @@ class RunTask:
6760
6868
  condition_task=_from_dict(d, "condition_task", RunConditionTask),
6761
6869
  dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
6762
6870
  dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask),
6871
+ dbt_platform_task=_from_dict(d, "dbt_platform_task", DbtPlatformTask),
6763
6872
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
6764
6873
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
6765
6874
  description=d.get("description", None),
@@ -6975,10 +7084,6 @@ class SparkSubmitTask:
6975
7084
  @dataclass
6976
7085
  class SqlAlertOutput:
6977
7086
  alert_state: Optional[SqlAlertState] = None
6978
- """The state of the SQL alert.
6979
-
6980
- * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not fulfill trigger
6981
- conditions * TRIGGERED: alert evaluated and fulfilled trigger conditions"""
6982
7087
 
6983
7088
  output_link: Optional[str] = None
6984
7089
  """The link to find the output results."""
@@ -7611,7 +7716,6 @@ class SubmitRun:
7611
7716
  are used, `git_source` must be defined on the job."""
7612
7717
 
7613
7718
  health: Optional[JobsHealthRules] = None
7614
- """An optional set of health rules that can be defined for this job."""
7615
7719
 
7616
7720
  idempotency_token: Optional[str] = None
7617
7721
  """An optional token that can be used to guarantee the idempotency of job run requests. If a run
@@ -7784,7 +7888,9 @@ class SubmitTask:
7784
7888
  """The task refreshes a dashboard and sends a snapshot to subscribers."""
7785
7889
 
7786
7890
  dbt_cloud_task: Optional[DbtCloudTask] = None
7787
- """Task type for dbt cloud"""
7891
+ """Task type for dbt cloud, deprecated in favor of the new name dbt_platform_task"""
7892
+
7893
+ dbt_platform_task: Optional[DbtPlatformTask] = None
7788
7894
 
7789
7895
  dbt_task: Optional[DbtTask] = None
7790
7896
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
@@ -7818,7 +7924,6 @@ class SubmitTask:
7818
7924
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
7819
7925
 
7820
7926
  health: Optional[JobsHealthRules] = None
7821
- """An optional set of health rules that can be defined for this job."""
7822
7927
 
7823
7928
  libraries: Optional[List[compute.Library]] = None
7824
7929
  """An optional list of libraries to be installed on the cluster. The default value is an empty
@@ -7898,6 +8003,8 @@ class SubmitTask:
7898
8003
  body["dashboard_task"] = self.dashboard_task.as_dict()
7899
8004
  if self.dbt_cloud_task:
7900
8005
  body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict()
8006
+ if self.dbt_platform_task:
8007
+ body["dbt_platform_task"] = self.dbt_platform_task.as_dict()
7901
8008
  if self.dbt_task:
7902
8009
  body["dbt_task"] = self.dbt_task.as_dict()
7903
8010
  if self.depends_on:
@@ -7961,6 +8068,8 @@ class SubmitTask:
7961
8068
  body["dashboard_task"] = self.dashboard_task
7962
8069
  if self.dbt_cloud_task:
7963
8070
  body["dbt_cloud_task"] = self.dbt_cloud_task
8071
+ if self.dbt_platform_task:
8072
+ body["dbt_platform_task"] = self.dbt_platform_task
7964
8073
  if self.dbt_task:
7965
8074
  body["dbt_task"] = self.dbt_task
7966
8075
  if self.depends_on:
@@ -8021,6 +8130,7 @@ class SubmitTask:
8021
8130
  condition_task=_from_dict(d, "condition_task", ConditionTask),
8022
8131
  dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
8023
8132
  dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask),
8133
+ dbt_platform_task=_from_dict(d, "dbt_platform_task", DbtPlatformTask),
8024
8134
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
8025
8135
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
8026
8136
  description=d.get("description", None),
@@ -8202,7 +8312,9 @@ class Task:
8202
8312
  """The task refreshes a dashboard and sends a snapshot to subscribers."""
8203
8313
 
8204
8314
  dbt_cloud_task: Optional[DbtCloudTask] = None
8205
- """Task type for dbt cloud"""
8315
+ """Task type for dbt cloud, deprecated in favor of the new name dbt_platform_task"""
8316
+
8317
+ dbt_platform_task: Optional[DbtPlatformTask] = None
8206
8318
 
8207
8319
  dbt_task: Optional[DbtTask] = None
8208
8320
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
@@ -8240,7 +8352,6 @@ class Task:
8240
8352
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
8241
8353
 
8242
8354
  health: Optional[JobsHealthRules] = None
8243
- """An optional set of health rules that can be defined for this job."""
8244
8355
 
8245
8356
  job_cluster_key: Optional[str] = None
8246
8357
  """If job_cluster_key, this task is executed reusing the cluster specified in
@@ -8341,6 +8452,8 @@ class Task:
8341
8452
  body["dashboard_task"] = self.dashboard_task.as_dict()
8342
8453
  if self.dbt_cloud_task:
8343
8454
  body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict()
8455
+ if self.dbt_platform_task:
8456
+ body["dbt_platform_task"] = self.dbt_platform_task.as_dict()
8344
8457
  if self.dbt_task:
8345
8458
  body["dbt_task"] = self.dbt_task.as_dict()
8346
8459
  if self.depends_on:
@@ -8414,6 +8527,8 @@ class Task:
8414
8527
  body["dashboard_task"] = self.dashboard_task
8415
8528
  if self.dbt_cloud_task:
8416
8529
  body["dbt_cloud_task"] = self.dbt_cloud_task
8530
+ if self.dbt_platform_task:
8531
+ body["dbt_platform_task"] = self.dbt_platform_task
8417
8532
  if self.dbt_task:
8418
8533
  body["dbt_task"] = self.dbt_task
8419
8534
  if self.depends_on:
@@ -8484,6 +8599,7 @@ class Task:
8484
8599
  condition_task=_from_dict(d, "condition_task", ConditionTask),
8485
8600
  dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
8486
8601
  dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask),
8602
+ dbt_platform_task=_from_dict(d, "dbt_platform_task", DbtPlatformTask),
8487
8603
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
8488
8604
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
8489
8605
  description=d.get("description", None),
@@ -8747,55 +8863,12 @@ class TerminationCodeCode(Enum):
8747
8863
  @dataclass
8748
8864
  class TerminationDetails:
8749
8865
  code: Optional[TerminationCodeCode] = None
8750
- """The code indicates why the run was terminated. Additional codes might be introduced in future
8751
- releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run
8752
- was completed successfully but some child runs failed. * `USER_CANCELED`: The run was
8753
- successfully canceled during execution by a user. * `CANCELED`: The run was canceled during
8754
- execution by the Databricks platform; for example, if the maximum run duration was exceeded. *
8755
- `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency
8756
- type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The
8757
- run encountered an unexpected error. Refer to the state message for further details. *
8758
- `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. *
8759
- `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further
8760
- details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when
8761
- communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because
8762
- it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The
8763
- workspace has reached the quota for the maximum number of concurrent active runs. Consider
8764
- scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it
8765
- tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The
8766
- number of cluster creation, start, and upsize requests have exceeded the allotted rate limit.
8767
- Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run
8768
- failed due to an error when accessing the customer blob storage. Refer to the state message for
8769
- further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more
8770
- details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to
8771
- a permission issue while accessing a resource. Refer to the state message for further details. *
8772
- `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer
8773
- to the state message for further details. The causes might include, but are not limited to: The
8774
- provided library is invalid, there are insufficient permissions to install the library, and so
8775
- forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum
8776
- concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a
8777
- cluster that has already reached the maximum number of contexts it is configured to create. See:
8778
- [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to
8779
- the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an
8780
- invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The
8781
- run failed due to a cloud provider issue. Refer to the state message for further details. *
8782
- `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
8783
- limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
8784
-
8785
- [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
8786
8866
 
8787
8867
  message: Optional[str] = None
8788
8868
  """A descriptive message with the termination details. This field is unstructured and the format
8789
8869
  might change."""
8790
8870
 
8791
8871
  type: Optional[TerminationTypeType] = None
8792
- """* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the
8793
- Databricks platform. Please look at the [status page] or contact support if the issue persists.
8794
- * `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job
8795
- configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud
8796
- provider.
8797
-
8798
- [status page]: https://status.databricks.com/"""
8799
8872
 
8800
8873
  def as_dict(self) -> dict:
8801
8874
  """Serializes the TerminationDetails into a dictionary suitable for use as a JSON request body."""
@@ -8961,10 +9034,13 @@ class TriggerType(Enum):
8961
9034
  through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
8962
9035
  previously failed run. This occurs when you request to re-run the job in case of failures. *
8963
9036
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
8964
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
8965
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
8966
- restart a continuous job run."""
9037
+ Indicates a run that is triggered by a file arrival. * `CONTINUOUS`: Indicates a run that is
9038
+ triggered by a continuous job. * `TABLE`: Indicates a run that is triggered by a table update. *
9039
+ `CONTINUOUS_RESTART`: Indicates a run created by user to manually restart a continuous job run.
9040
+ * `MODEL`: Indicates a run that is triggered by a model update."""
8967
9041
 
9042
+ CONTINUOUS = "CONTINUOUS"
9043
+ CONTINUOUS_RESTART = "CONTINUOUS_RESTART"
8968
9044
  FILE_ARRIVAL = "FILE_ARRIVAL"
8969
9045
  ONE_TIME = "ONE_TIME"
8970
9046
  PERIODIC = "PERIODIC"
@@ -9274,9 +9350,7 @@ class JobsAPI:
9274
9350
  raise TimeoutError(f"timed out after {timeout}: {status_message}")
9275
9351
 
9276
9352
  def cancel_all_runs(self, *, all_queued_runs: Optional[bool] = None, job_id: Optional[int] = None):
9277
- """Cancel all runs of a job.
9278
-
9279
- Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs
9353
+ """Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs
9280
9354
  from being started.
9281
9355
 
9282
9356
  :param all_queued_runs: bool (optional)
@@ -9299,9 +9373,7 @@ class JobsAPI:
9299
9373
  self._api.do("POST", "/api/2.2/jobs/runs/cancel-all", body=body, headers=headers)
9300
9374
 
9301
9375
  def cancel_run(self, run_id: int) -> Wait[Run]:
9302
- """Cancel a run.
9303
-
9304
- Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when
9376
+ """Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when
9305
9377
  this request completes.
9306
9378
 
9307
9379
  :param run_id: int
@@ -9359,8 +9431,6 @@ class JobsAPI:
9359
9431
  ) -> CreateResponse:
9360
9432
  """Create a new job.
9361
9433
 
9362
- Create a new job.
9363
-
9364
9434
  :param access_control_list: List[:class:`JobAccessControlRequest`] (optional)
9365
9435
  List of permissions to set on the job.
9366
9436
  :param budget_policy_id: str (optional)
@@ -9400,7 +9470,6 @@ class JobsAPI:
9400
9470
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
9401
9471
  used, `git_source` must be defined on the job.
9402
9472
  :param health: :class:`JobsHealthRules` (optional)
9403
- An optional set of health rules that can be defined for this job.
9404
9473
  :param job_clusters: List[:class:`JobCluster`] (optional)
9405
9474
  A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
9406
9475
  cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
@@ -9430,10 +9499,6 @@ class JobsAPI:
9430
9499
  :param queue: :class:`QueueSettings` (optional)
9431
9500
  The queue settings of the job.
9432
9501
  :param run_as: :class:`JobRunAs` (optional)
9433
- Write-only setting. Specifies the user or service principal that the job runs as. If not specified,
9434
- the job runs as the user who created the job.
9435
-
9436
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown.
9437
9502
  :param schedule: :class:`CronSchedule` (optional)
9438
9503
  An optional periodic schedule for this job. The default behavior is that the job only runs when
9439
9504
  triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.
@@ -9518,9 +9583,7 @@ class JobsAPI:
9518
9583
  return CreateResponse.from_dict(res)
9519
9584
 
9520
9585
  def delete(self, job_id: int):
9521
- """Delete a job.
9522
-
9523
- Deletes a job.
9586
+ """Deletes a job.
9524
9587
 
9525
9588
  :param job_id: int
9526
9589
  The canonical identifier of the job to delete. This field is required.
@@ -9537,9 +9600,7 @@ class JobsAPI:
9537
9600
  self._api.do("POST", "/api/2.2/jobs/delete", body=body, headers=headers)
9538
9601
 
9539
9602
  def delete_run(self, run_id: int):
9540
- """Delete a job run.
9541
-
9542
- Deletes a non-active run. Returns an error if the run is active.
9603
+ """Deletes a non-active run. Returns an error if the run is active.
9543
9604
 
9544
9605
  :param run_id: int
9545
9606
  ID of the run to delete.
@@ -9556,9 +9617,7 @@ class JobsAPI:
9556
9617
  self._api.do("POST", "/api/2.2/jobs/runs/delete", body=body, headers=headers)
9557
9618
 
9558
9619
  def export_run(self, run_id: int, *, views_to_export: Optional[ViewsToExport] = None) -> ExportRunOutput:
9559
- """Export and retrieve a job run.
9560
-
9561
- Export and retrieve the job run task.
9620
+ """Export and retrieve the job run task.
9562
9621
 
9563
9622
  :param run_id: int
9564
9623
  The canonical identifier for the run. This field is required.
@@ -9581,9 +9640,7 @@ class JobsAPI:
9581
9640
  return ExportRunOutput.from_dict(res)
9582
9641
 
9583
9642
  def get(self, job_id: int, *, page_token: Optional[str] = None) -> Job:
9584
- """Get a single job.
9585
-
9586
- Retrieves the details for a single job.
9643
+ """Retrieves the details for a single job.
9587
9644
 
9588
9645
  Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9589
9646
  job will return all properties for that job, and the first 100 elements of array properties (`tasks`,
@@ -9614,9 +9671,7 @@ class JobsAPI:
9614
9671
  return Job.from_dict(res)
9615
9672
 
9616
9673
  def get_permission_levels(self, job_id: str) -> GetJobPermissionLevelsResponse:
9617
- """Get job permission levels.
9618
-
9619
- Gets the permission levels that a user can have on an object.
9674
+ """Gets the permission levels that a user can have on an object.
9620
9675
 
9621
9676
  :param job_id: str
9622
9677
  The job for which to get or manage permissions.
@@ -9632,9 +9687,7 @@ class JobsAPI:
9632
9687
  return GetJobPermissionLevelsResponse.from_dict(res)
9633
9688
 
9634
9689
  def get_permissions(self, job_id: str) -> JobPermissions:
9635
- """Get job permissions.
9636
-
9637
- Gets the permissions of a job. Jobs can inherit permissions from their root object.
9690
+ """Gets the permissions of a job. Jobs can inherit permissions from their root object.
9638
9691
 
9639
9692
  :param job_id: str
9640
9693
  The job for which to get or manage permissions.
@@ -9657,9 +9710,7 @@ class JobsAPI:
9657
9710
  include_resolved_values: Optional[bool] = None,
9658
9711
  page_token: Optional[str] = None,
9659
9712
  ) -> Run:
9660
- """Get a single job run.
9661
-
9662
- Retrieves the metadata of a run.
9713
+ """Retrieves the metadata of a run.
9663
9714
 
9664
9715
  Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9665
9716
  run will return all properties for that run, and the first 100 elements of array properties (`tasks`,
@@ -9698,9 +9749,7 @@ class JobsAPI:
9698
9749
  return Run.from_dict(res)
9699
9750
 
9700
9751
  def get_run_output(self, run_id: int) -> RunOutput:
9701
- """Get the output for a single run.
9702
-
9703
- Retrieve the output and metadata of a single task run. When a notebook task returns a value through
9752
+ """Retrieve the output and metadata of a single task run. When a notebook task returns a value through
9704
9753
  the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks
9705
9754
  restricts this API to returning the first 5 MB of the output. To return a larger result, you can store
9706
9755
  job results in a cloud storage service.
@@ -9734,9 +9783,7 @@ class JobsAPI:
9734
9783
  offset: Optional[int] = None,
9735
9784
  page_token: Optional[str] = None,
9736
9785
  ) -> Iterator[BaseJob]:
9737
- """List jobs.
9738
-
9739
- Retrieves a list of jobs.
9786
+ """Retrieves a list of jobs.
9740
9787
 
9741
9788
  :param expand_tasks: bool (optional)
9742
9789
  Whether to include task and cluster details in the response. Note that only the first 100 elements
@@ -9794,9 +9841,7 @@ class JobsAPI:
9794
9841
  start_time_from: Optional[int] = None,
9795
9842
  start_time_to: Optional[int] = None,
9796
9843
  ) -> Iterator[BaseRun]:
9797
- """List job runs.
9798
-
9799
- List runs in descending order by start time.
9844
+ """List runs in descending order by start time.
9800
9845
 
9801
9846
  :param active_only: bool (optional)
9802
9847
  If active_only is `true`, only active runs are included in the results; otherwise, lists both active
@@ -9884,9 +9929,7 @@ class JobsAPI:
9884
9929
  spark_submit_params: Optional[List[str]] = None,
9885
9930
  sql_params: Optional[Dict[str, str]] = None,
9886
9931
  ) -> Wait[Run]:
9887
- """Repair a job run.
9888
-
9889
- Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job
9932
+ """Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job
9890
9933
  and task settings, and can be viewed in the history for the original job run.
9891
9934
 
9892
9935
  :param run_id: int
@@ -10064,9 +10107,7 @@ class JobsAPI:
10064
10107
  ).result(timeout=timeout)
10065
10108
 
10066
10109
  def reset(self, job_id: int, new_settings: JobSettings):
10067
- """Update all job settings (reset).
10068
-
10069
- Overwrite all settings for the given job. Use the [_Update_ endpoint](:method:jobs/update) to update
10110
+ """Overwrite all settings for the given job. Use the [_Update_ endpoint](:method:jobs/update) to update
10070
10111
  job settings partially.
10071
10112
 
10072
10113
  :param job_id: int
@@ -10108,9 +10149,7 @@ class JobsAPI:
10108
10149
  spark_submit_params: Optional[List[str]] = None,
10109
10150
  sql_params: Optional[Dict[str, str]] = None,
10110
10151
  ) -> Wait[Run]:
10111
- """Trigger a new job run.
10112
-
10113
- Run a job and return the `run_id` of the triggered run.
10152
+ """Run a job and return the `run_id` of the triggered run.
10114
10153
 
10115
10154
  :param job_id: int
10116
10155
  The ID of the job to be executed
@@ -10293,9 +10332,7 @@ class JobsAPI:
10293
10332
  def set_permissions(
10294
10333
  self, job_id: str, *, access_control_list: Optional[List[JobAccessControlRequest]] = None
10295
10334
  ) -> JobPermissions:
10296
- """Set job permissions.
10297
-
10298
- Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
10335
+ """Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
10299
10336
  permissions if none are specified. Objects can inherit permissions from their root object.
10300
10337
 
10301
10338
  :param job_id: str
@@ -10333,9 +10370,7 @@ class JobsAPI:
10333
10370
  timeout_seconds: Optional[int] = None,
10334
10371
  webhook_notifications: Optional[WebhookNotifications] = None,
10335
10372
  ) -> Wait[Run]:
10336
- """Create and trigger a one-time run.
10337
-
10338
- Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job.
10373
+ """Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job.
10339
10374
  Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the
10340
10375
  run state after the job is submitted.
10341
10376
 
@@ -10358,7 +10393,6 @@ class JobsAPI:
10358
10393
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
10359
10394
  used, `git_source` must be defined on the job.
10360
10395
  :param health: :class:`JobsHealthRules` (optional)
10361
- An optional set of health rules that can be defined for this job.
10362
10396
  :param idempotency_token: str (optional)
10363
10397
  An optional token that can be used to guarantee the idempotency of job run requests. If a run with
10364
10398
  the provided token already exists, the request does not create a new run but returns the ID of the
@@ -10472,9 +10506,7 @@ class JobsAPI:
10472
10506
  def update(
10473
10507
  self, job_id: int, *, fields_to_remove: Optional[List[str]] = None, new_settings: Optional[JobSettings] = None
10474
10508
  ):
10475
- """Update job settings partially.
10476
-
10477
- Add, update, or remove specific settings of an existing job. Use the [_Reset_
10509
+ """Add, update, or remove specific settings of an existing job. Use the [_Reset_
10478
10510
  endpoint](:method:jobs/reset) to overwrite all job settings.
10479
10511
 
10480
10512
  :param job_id: int
@@ -10512,9 +10544,7 @@ class JobsAPI:
10512
10544
  def update_permissions(
10513
10545
  self, job_id: str, *, access_control_list: Optional[List[JobAccessControlRequest]] = None
10514
10546
  ) -> JobPermissions:
10515
- """Update job permissions.
10516
-
10517
- Updates the permissions on a job. Jobs can inherit permissions from their root object.
10547
+ """Updates the permissions on a job. Jobs can inherit permissions from their root object.
10518
10548
 
10519
10549
  :param job_id: str
10520
10550
  The job for which to get or manage permissions.
@@ -10552,9 +10582,7 @@ class PolicyComplianceForJobsAPI:
10552
10582
  def enforce_compliance(
10553
10583
  self, job_id: int, *, validate_only: Optional[bool] = None
10554
10584
  ) -> EnforcePolicyComplianceResponse:
10555
- """Enforce job policy compliance.
10556
-
10557
- Updates a job so the job clusters that are created when running the job (specified in `new_cluster`)
10585
+ """Updates a job so the job clusters that are created when running the job (specified in `new_cluster`)
10558
10586
  are compliant with the current versions of their respective cluster policies. All-purpose clusters
10559
10587
  used in the job will not be updated.
10560
10588
 
@@ -10579,9 +10607,7 @@ class PolicyComplianceForJobsAPI:
10579
10607
  return EnforcePolicyComplianceResponse.from_dict(res)
10580
10608
 
10581
10609
  def get_compliance(self, job_id: int) -> GetPolicyComplianceResponse:
10582
- """Get job policy compliance.
10583
-
10584
- Returns the policy compliance status of a job. Jobs could be out of compliance if a cluster policy
10610
+ """Returns the policy compliance status of a job. Jobs could be out of compliance if a cluster policy
10585
10611
  they use was updated after the job was last edited and some of its job clusters no longer comply with
10586
10612
  their updated policies.
10587
10613
 
@@ -10604,9 +10630,7 @@ class PolicyComplianceForJobsAPI:
10604
10630
  def list_compliance(
10605
10631
  self, policy_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None
10606
10632
  ) -> Iterator[JobCompliance]:
10607
- """List job policy compliance.
10608
-
10609
- Returns the policy compliance status of all jobs that use a given policy. Jobs could be out of
10633
+ """Returns the policy compliance status of all jobs that use a given policy. Jobs could be out of
10610
10634
  compliance if a cluster policy they use was updated after the job was last edited and its job clusters
10611
10635
  no longer comply with the updated policy.
10612
10636