databricks-sdk 0.57.0__py3-none-any.whl → 0.59.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +38 -9
- databricks/sdk/service/aibuilder.py +0 -163
- databricks/sdk/service/apps.py +53 -49
- databricks/sdk/service/billing.py +62 -223
- databricks/sdk/service/catalog.py +3052 -3707
- databricks/sdk/service/cleanrooms.py +5 -54
- databricks/sdk/service/compute.py +579 -2715
- databricks/sdk/service/dashboards.py +108 -317
- databricks/sdk/service/database.py +603 -122
- databricks/sdk/service/files.py +2 -218
- databricks/sdk/service/iam.py +19 -298
- databricks/sdk/service/jobs.py +77 -1263
- databricks/sdk/service/marketplace.py +3 -575
- databricks/sdk/service/ml.py +816 -2734
- databricks/sdk/service/oauth2.py +122 -238
- databricks/sdk/service/pipelines.py +133 -724
- databricks/sdk/service/provisioning.py +36 -757
- databricks/sdk/service/qualitymonitorv2.py +0 -18
- databricks/sdk/service/serving.py +37 -583
- databricks/sdk/service/settings.py +282 -1768
- databricks/sdk/service/sharing.py +6 -478
- databricks/sdk/service/sql.py +129 -1696
- databricks/sdk/service/vectorsearch.py +0 -410
- databricks/sdk/service/workspace.py +252 -727
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/RECORD +31 -31
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/top_level.txt +0 -0
databricks/sdk/service/jobs.py
CHANGED
|
@@ -219,11 +219,6 @@ class BaseRun:
|
|
|
219
219
|
"""The URL to the detail page of the run."""
|
|
220
220
|
|
|
221
221
|
run_type: Optional[RunType] = None
|
|
222
|
-
"""The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
|
|
223
|
-
`WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
|
|
224
|
-
run. A run created with :method:jobs/submit.
|
|
225
|
-
|
|
226
|
-
[dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
|
|
227
222
|
|
|
228
223
|
schedule: Optional[CronSchedule] = None
|
|
229
224
|
"""The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
|
|
@@ -244,7 +239,6 @@ class BaseRun:
|
|
|
244
239
|
"""Deprecated. Please use the `status` field instead."""
|
|
245
240
|
|
|
246
241
|
status: Optional[RunStatus] = None
|
|
247
|
-
"""The current status of the run"""
|
|
248
242
|
|
|
249
243
|
tasks: Optional[List[RunTask]] = None
|
|
250
244
|
"""The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
|
|
@@ -253,19 +247,8 @@ class BaseRun:
|
|
|
253
247
|
root to determine if more results are available."""
|
|
254
248
|
|
|
255
249
|
trigger: Optional[TriggerType] = None
|
|
256
|
-
"""The type of trigger that fired this run.
|
|
257
|
-
|
|
258
|
-
* `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
|
|
259
|
-
One time triggers that fire a single run. This occurs you triggered a single run on demand
|
|
260
|
-
through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
|
|
261
|
-
previously failed run. This occurs when you request to re-run the job in case of failures. *
|
|
262
|
-
`RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
|
|
263
|
-
Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
|
|
264
|
-
triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
|
|
265
|
-
restart a continuous job run."""
|
|
266
250
|
|
|
267
251
|
trigger_info: Optional[TriggerInfo] = None
|
|
268
|
-
"""Additional details about what triggered the run"""
|
|
269
252
|
|
|
270
253
|
def as_dict(self) -> dict:
|
|
271
254
|
"""Serializes the BaseRun into a dictionary suitable for use as a JSON request body."""
|
|
@@ -449,39 +432,6 @@ class BaseRun:
|
|
|
449
432
|
)
|
|
450
433
|
|
|
451
434
|
|
|
452
|
-
@dataclass
|
|
453
|
-
class CancelAllRuns:
|
|
454
|
-
all_queued_runs: Optional[bool] = None
|
|
455
|
-
"""Optional boolean parameter to cancel all queued runs. If no job_id is provided, all queued runs
|
|
456
|
-
in the workspace are canceled."""
|
|
457
|
-
|
|
458
|
-
job_id: Optional[int] = None
|
|
459
|
-
"""The canonical identifier of the job to cancel all runs of."""
|
|
460
|
-
|
|
461
|
-
def as_dict(self) -> dict:
|
|
462
|
-
"""Serializes the CancelAllRuns into a dictionary suitable for use as a JSON request body."""
|
|
463
|
-
body = {}
|
|
464
|
-
if self.all_queued_runs is not None:
|
|
465
|
-
body["all_queued_runs"] = self.all_queued_runs
|
|
466
|
-
if self.job_id is not None:
|
|
467
|
-
body["job_id"] = self.job_id
|
|
468
|
-
return body
|
|
469
|
-
|
|
470
|
-
def as_shallow_dict(self) -> dict:
|
|
471
|
-
"""Serializes the CancelAllRuns into a shallow dictionary of its immediate attributes."""
|
|
472
|
-
body = {}
|
|
473
|
-
if self.all_queued_runs is not None:
|
|
474
|
-
body["all_queued_runs"] = self.all_queued_runs
|
|
475
|
-
if self.job_id is not None:
|
|
476
|
-
body["job_id"] = self.job_id
|
|
477
|
-
return body
|
|
478
|
-
|
|
479
|
-
@classmethod
|
|
480
|
-
def from_dict(cls, d: Dict[str, Any]) -> CancelAllRuns:
|
|
481
|
-
"""Deserializes the CancelAllRuns from a dictionary."""
|
|
482
|
-
return cls(all_queued_runs=d.get("all_queued_runs", None), job_id=d.get("job_id", None))
|
|
483
|
-
|
|
484
|
-
|
|
485
435
|
@dataclass
|
|
486
436
|
class CancelAllRunsResponse:
|
|
487
437
|
def as_dict(self) -> dict:
|
|
@@ -500,31 +450,6 @@ class CancelAllRunsResponse:
|
|
|
500
450
|
return cls()
|
|
501
451
|
|
|
502
452
|
|
|
503
|
-
@dataclass
|
|
504
|
-
class CancelRun:
|
|
505
|
-
run_id: int
|
|
506
|
-
"""This field is required."""
|
|
507
|
-
|
|
508
|
-
def as_dict(self) -> dict:
|
|
509
|
-
"""Serializes the CancelRun into a dictionary suitable for use as a JSON request body."""
|
|
510
|
-
body = {}
|
|
511
|
-
if self.run_id is not None:
|
|
512
|
-
body["run_id"] = self.run_id
|
|
513
|
-
return body
|
|
514
|
-
|
|
515
|
-
def as_shallow_dict(self) -> dict:
|
|
516
|
-
"""Serializes the CancelRun into a shallow dictionary of its immediate attributes."""
|
|
517
|
-
body = {}
|
|
518
|
-
if self.run_id is not None:
|
|
519
|
-
body["run_id"] = self.run_id
|
|
520
|
-
return body
|
|
521
|
-
|
|
522
|
-
@classmethod
|
|
523
|
-
def from_dict(cls, d: Dict[str, Any]) -> CancelRun:
|
|
524
|
-
"""Deserializes the CancelRun from a dictionary."""
|
|
525
|
-
return cls(run_id=d.get("run_id", None))
|
|
526
|
-
|
|
527
|
-
|
|
528
453
|
@dataclass
|
|
529
454
|
class CancelRunResponse:
|
|
530
455
|
def as_dict(self) -> dict:
|
|
@@ -952,271 +877,6 @@ class Continuous:
|
|
|
952
877
|
return cls(pause_status=_enum(d, "pause_status", PauseStatus))
|
|
953
878
|
|
|
954
879
|
|
|
955
|
-
@dataclass
|
|
956
|
-
class CreateJob:
|
|
957
|
-
access_control_list: Optional[List[JobAccessControlRequest]] = None
|
|
958
|
-
"""List of permissions to set on the job."""
|
|
959
|
-
|
|
960
|
-
budget_policy_id: Optional[str] = None
|
|
961
|
-
"""The id of the user specified budget policy to use for this job. If not specified, a default
|
|
962
|
-
budget policy may be applied when creating or modifying the job. See
|
|
963
|
-
`effective_budget_policy_id` for the budget policy used by this workload."""
|
|
964
|
-
|
|
965
|
-
continuous: Optional[Continuous] = None
|
|
966
|
-
"""An optional continuous property for this job. The continuous property will ensure that there is
|
|
967
|
-
always one run executing. Only one of `schedule` and `continuous` can be used."""
|
|
968
|
-
|
|
969
|
-
deployment: Optional[JobDeployment] = None
|
|
970
|
-
"""Deployment information for jobs managed by external sources."""
|
|
971
|
-
|
|
972
|
-
description: Optional[str] = None
|
|
973
|
-
"""An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding."""
|
|
974
|
-
|
|
975
|
-
edit_mode: Optional[JobEditMode] = None
|
|
976
|
-
"""Edit mode of the job.
|
|
977
|
-
|
|
978
|
-
* `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * `EDITABLE`: The job is
|
|
979
|
-
in an editable state and can be modified."""
|
|
980
|
-
|
|
981
|
-
email_notifications: Optional[JobEmailNotifications] = None
|
|
982
|
-
"""An optional set of email addresses that is notified when runs of this job begin or complete as
|
|
983
|
-
well as when this job is deleted."""
|
|
984
|
-
|
|
985
|
-
environments: Optional[List[JobEnvironment]] = None
|
|
986
|
-
"""A list of task execution environment specifications that can be referenced by serverless tasks
|
|
987
|
-
of this job. An environment is required to be present for serverless tasks. For serverless
|
|
988
|
-
notebook tasks, the environment is accessible in the notebook environment panel. For other
|
|
989
|
-
serverless tasks, the task environment is required to be specified using environment_key in the
|
|
990
|
-
task settings."""
|
|
991
|
-
|
|
992
|
-
format: Optional[Format] = None
|
|
993
|
-
"""Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls.
|
|
994
|
-
When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`."""
|
|
995
|
-
|
|
996
|
-
git_source: Optional[GitSource] = None
|
|
997
|
-
"""An optional specification for a remote Git repository containing the source code used by tasks.
|
|
998
|
-
Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.
|
|
999
|
-
|
|
1000
|
-
If `git_source` is set, these tasks retrieve the file from the remote repository by default.
|
|
1001
|
-
However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.
|
|
1002
|
-
|
|
1003
|
-
Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks
|
|
1004
|
-
are used, `git_source` must be defined on the job."""
|
|
1005
|
-
|
|
1006
|
-
health: Optional[JobsHealthRules] = None
|
|
1007
|
-
"""An optional set of health rules that can be defined for this job."""
|
|
1008
|
-
|
|
1009
|
-
job_clusters: Optional[List[JobCluster]] = None
|
|
1010
|
-
"""A list of job cluster specifications that can be shared and reused by tasks of this job.
|
|
1011
|
-
Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
|
|
1012
|
-
task settings."""
|
|
1013
|
-
|
|
1014
|
-
max_concurrent_runs: Optional[int] = None
|
|
1015
|
-
"""An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
|
|
1016
|
-
be able to execute multiple runs of the same job concurrently. This is useful for example if you
|
|
1017
|
-
trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each
|
|
1018
|
-
other, or if you want to trigger multiple runs which differ by their input parameters. This
|
|
1019
|
-
setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4
|
|
1020
|
-
concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.
|
|
1021
|
-
However, from then on, new runs are skipped unless there are fewer than 3 active runs. This
|
|
1022
|
-
value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped."""
|
|
1023
|
-
|
|
1024
|
-
name: Optional[str] = None
|
|
1025
|
-
"""An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding."""
|
|
1026
|
-
|
|
1027
|
-
notification_settings: Optional[JobNotificationSettings] = None
|
|
1028
|
-
"""Optional notification settings that are used when sending notifications to each of the
|
|
1029
|
-
`email_notifications` and `webhook_notifications` for this job."""
|
|
1030
|
-
|
|
1031
|
-
parameters: Optional[List[JobParameterDefinition]] = None
|
|
1032
|
-
"""Job-level parameter definitions"""
|
|
1033
|
-
|
|
1034
|
-
performance_target: Optional[PerformanceTarget] = None
|
|
1035
|
-
"""The performance mode on a serverless job. This field determines the level of compute performance
|
|
1036
|
-
or cost-efficiency for the run.
|
|
1037
|
-
|
|
1038
|
-
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
1039
|
-
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
1040
|
-
optimized cluster performance."""
|
|
1041
|
-
|
|
1042
|
-
queue: Optional[QueueSettings] = None
|
|
1043
|
-
"""The queue settings of the job."""
|
|
1044
|
-
|
|
1045
|
-
run_as: Optional[JobRunAs] = None
|
|
1046
|
-
"""Write-only setting. Specifies the user or service principal that the job runs as. If not
|
|
1047
|
-
specified, the job runs as the user who created the job.
|
|
1048
|
-
|
|
1049
|
-
Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
|
|
1050
|
-
|
|
1051
|
-
schedule: Optional[CronSchedule] = None
|
|
1052
|
-
"""An optional periodic schedule for this job. The default behavior is that the job only runs when
|
|
1053
|
-
triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`."""
|
|
1054
|
-
|
|
1055
|
-
tags: Optional[Dict[str, str]] = None
|
|
1056
|
-
"""A map of tags associated with the job. These are forwarded to the cluster as cluster tags for
|
|
1057
|
-
jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can
|
|
1058
|
-
be added to the job."""
|
|
1059
|
-
|
|
1060
|
-
tasks: Optional[List[Task]] = None
|
|
1061
|
-
"""A list of task specifications to be executed by this job. It supports up to 1000 elements in
|
|
1062
|
-
write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
|
|
1063
|
-
:method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
|
|
1064
|
-
available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
|
|
1065
|
-
at the object root to determine if more results are available."""
|
|
1066
|
-
|
|
1067
|
-
timeout_seconds: Optional[int] = None
|
|
1068
|
-
"""An optional timeout applied to each run of this job. A value of `0` means no timeout."""
|
|
1069
|
-
|
|
1070
|
-
trigger: Optional[TriggerSettings] = None
|
|
1071
|
-
"""A configuration to trigger a run when certain conditions are met. The default behavior is that
|
|
1072
|
-
the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API
|
|
1073
|
-
request to `runNow`."""
|
|
1074
|
-
|
|
1075
|
-
webhook_notifications: Optional[WebhookNotifications] = None
|
|
1076
|
-
"""A collection of system notification IDs to notify when runs of this job begin or complete."""
|
|
1077
|
-
|
|
1078
|
-
def as_dict(self) -> dict:
|
|
1079
|
-
"""Serializes the CreateJob into a dictionary suitable for use as a JSON request body."""
|
|
1080
|
-
body = {}
|
|
1081
|
-
if self.access_control_list:
|
|
1082
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
1083
|
-
if self.budget_policy_id is not None:
|
|
1084
|
-
body["budget_policy_id"] = self.budget_policy_id
|
|
1085
|
-
if self.continuous:
|
|
1086
|
-
body["continuous"] = self.continuous.as_dict()
|
|
1087
|
-
if self.deployment:
|
|
1088
|
-
body["deployment"] = self.deployment.as_dict()
|
|
1089
|
-
if self.description is not None:
|
|
1090
|
-
body["description"] = self.description
|
|
1091
|
-
if self.edit_mode is not None:
|
|
1092
|
-
body["edit_mode"] = self.edit_mode.value
|
|
1093
|
-
if self.email_notifications:
|
|
1094
|
-
body["email_notifications"] = self.email_notifications.as_dict()
|
|
1095
|
-
if self.environments:
|
|
1096
|
-
body["environments"] = [v.as_dict() for v in self.environments]
|
|
1097
|
-
if self.format is not None:
|
|
1098
|
-
body["format"] = self.format.value
|
|
1099
|
-
if self.git_source:
|
|
1100
|
-
body["git_source"] = self.git_source.as_dict()
|
|
1101
|
-
if self.health:
|
|
1102
|
-
body["health"] = self.health.as_dict()
|
|
1103
|
-
if self.job_clusters:
|
|
1104
|
-
body["job_clusters"] = [v.as_dict() for v in self.job_clusters]
|
|
1105
|
-
if self.max_concurrent_runs is not None:
|
|
1106
|
-
body["max_concurrent_runs"] = self.max_concurrent_runs
|
|
1107
|
-
if self.name is not None:
|
|
1108
|
-
body["name"] = self.name
|
|
1109
|
-
if self.notification_settings:
|
|
1110
|
-
body["notification_settings"] = self.notification_settings.as_dict()
|
|
1111
|
-
if self.parameters:
|
|
1112
|
-
body["parameters"] = [v.as_dict() for v in self.parameters]
|
|
1113
|
-
if self.performance_target is not None:
|
|
1114
|
-
body["performance_target"] = self.performance_target.value
|
|
1115
|
-
if self.queue:
|
|
1116
|
-
body["queue"] = self.queue.as_dict()
|
|
1117
|
-
if self.run_as:
|
|
1118
|
-
body["run_as"] = self.run_as.as_dict()
|
|
1119
|
-
if self.schedule:
|
|
1120
|
-
body["schedule"] = self.schedule.as_dict()
|
|
1121
|
-
if self.tags:
|
|
1122
|
-
body["tags"] = self.tags
|
|
1123
|
-
if self.tasks:
|
|
1124
|
-
body["tasks"] = [v.as_dict() for v in self.tasks]
|
|
1125
|
-
if self.timeout_seconds is not None:
|
|
1126
|
-
body["timeout_seconds"] = self.timeout_seconds
|
|
1127
|
-
if self.trigger:
|
|
1128
|
-
body["trigger"] = self.trigger.as_dict()
|
|
1129
|
-
if self.webhook_notifications:
|
|
1130
|
-
body["webhook_notifications"] = self.webhook_notifications.as_dict()
|
|
1131
|
-
return body
|
|
1132
|
-
|
|
1133
|
-
def as_shallow_dict(self) -> dict:
|
|
1134
|
-
"""Serializes the CreateJob into a shallow dictionary of its immediate attributes."""
|
|
1135
|
-
body = {}
|
|
1136
|
-
if self.access_control_list:
|
|
1137
|
-
body["access_control_list"] = self.access_control_list
|
|
1138
|
-
if self.budget_policy_id is not None:
|
|
1139
|
-
body["budget_policy_id"] = self.budget_policy_id
|
|
1140
|
-
if self.continuous:
|
|
1141
|
-
body["continuous"] = self.continuous
|
|
1142
|
-
if self.deployment:
|
|
1143
|
-
body["deployment"] = self.deployment
|
|
1144
|
-
if self.description is not None:
|
|
1145
|
-
body["description"] = self.description
|
|
1146
|
-
if self.edit_mode is not None:
|
|
1147
|
-
body["edit_mode"] = self.edit_mode
|
|
1148
|
-
if self.email_notifications:
|
|
1149
|
-
body["email_notifications"] = self.email_notifications
|
|
1150
|
-
if self.environments:
|
|
1151
|
-
body["environments"] = self.environments
|
|
1152
|
-
if self.format is not None:
|
|
1153
|
-
body["format"] = self.format
|
|
1154
|
-
if self.git_source:
|
|
1155
|
-
body["git_source"] = self.git_source
|
|
1156
|
-
if self.health:
|
|
1157
|
-
body["health"] = self.health
|
|
1158
|
-
if self.job_clusters:
|
|
1159
|
-
body["job_clusters"] = self.job_clusters
|
|
1160
|
-
if self.max_concurrent_runs is not None:
|
|
1161
|
-
body["max_concurrent_runs"] = self.max_concurrent_runs
|
|
1162
|
-
if self.name is not None:
|
|
1163
|
-
body["name"] = self.name
|
|
1164
|
-
if self.notification_settings:
|
|
1165
|
-
body["notification_settings"] = self.notification_settings
|
|
1166
|
-
if self.parameters:
|
|
1167
|
-
body["parameters"] = self.parameters
|
|
1168
|
-
if self.performance_target is not None:
|
|
1169
|
-
body["performance_target"] = self.performance_target
|
|
1170
|
-
if self.queue:
|
|
1171
|
-
body["queue"] = self.queue
|
|
1172
|
-
if self.run_as:
|
|
1173
|
-
body["run_as"] = self.run_as
|
|
1174
|
-
if self.schedule:
|
|
1175
|
-
body["schedule"] = self.schedule
|
|
1176
|
-
if self.tags:
|
|
1177
|
-
body["tags"] = self.tags
|
|
1178
|
-
if self.tasks:
|
|
1179
|
-
body["tasks"] = self.tasks
|
|
1180
|
-
if self.timeout_seconds is not None:
|
|
1181
|
-
body["timeout_seconds"] = self.timeout_seconds
|
|
1182
|
-
if self.trigger:
|
|
1183
|
-
body["trigger"] = self.trigger
|
|
1184
|
-
if self.webhook_notifications:
|
|
1185
|
-
body["webhook_notifications"] = self.webhook_notifications
|
|
1186
|
-
return body
|
|
1187
|
-
|
|
1188
|
-
@classmethod
|
|
1189
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreateJob:
|
|
1190
|
-
"""Deserializes the CreateJob from a dictionary."""
|
|
1191
|
-
return cls(
|
|
1192
|
-
access_control_list=_repeated_dict(d, "access_control_list", JobAccessControlRequest),
|
|
1193
|
-
budget_policy_id=d.get("budget_policy_id", None),
|
|
1194
|
-
continuous=_from_dict(d, "continuous", Continuous),
|
|
1195
|
-
deployment=_from_dict(d, "deployment", JobDeployment),
|
|
1196
|
-
description=d.get("description", None),
|
|
1197
|
-
edit_mode=_enum(d, "edit_mode", JobEditMode),
|
|
1198
|
-
email_notifications=_from_dict(d, "email_notifications", JobEmailNotifications),
|
|
1199
|
-
environments=_repeated_dict(d, "environments", JobEnvironment),
|
|
1200
|
-
format=_enum(d, "format", Format),
|
|
1201
|
-
git_source=_from_dict(d, "git_source", GitSource),
|
|
1202
|
-
health=_from_dict(d, "health", JobsHealthRules),
|
|
1203
|
-
job_clusters=_repeated_dict(d, "job_clusters", JobCluster),
|
|
1204
|
-
max_concurrent_runs=d.get("max_concurrent_runs", None),
|
|
1205
|
-
name=d.get("name", None),
|
|
1206
|
-
notification_settings=_from_dict(d, "notification_settings", JobNotificationSettings),
|
|
1207
|
-
parameters=_repeated_dict(d, "parameters", JobParameterDefinition),
|
|
1208
|
-
performance_target=_enum(d, "performance_target", PerformanceTarget),
|
|
1209
|
-
queue=_from_dict(d, "queue", QueueSettings),
|
|
1210
|
-
run_as=_from_dict(d, "run_as", JobRunAs),
|
|
1211
|
-
schedule=_from_dict(d, "schedule", CronSchedule),
|
|
1212
|
-
tags=d.get("tags", None),
|
|
1213
|
-
tasks=_repeated_dict(d, "tasks", Task),
|
|
1214
|
-
timeout_seconds=d.get("timeout_seconds", None),
|
|
1215
|
-
trigger=_from_dict(d, "trigger", TriggerSettings),
|
|
1216
|
-
webhook_notifications=_from_dict(d, "webhook_notifications", WebhookNotifications),
|
|
1217
|
-
)
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
880
|
@dataclass
|
|
1221
881
|
class CreateResponse:
|
|
1222
882
|
"""Job was created successfully"""
|
|
@@ -1822,32 +1482,7 @@ class DbtTask:
|
|
|
1822
1482
|
schema=d.get("schema", None),
|
|
1823
1483
|
source=_enum(d, "source", Source),
|
|
1824
1484
|
warehouse_id=d.get("warehouse_id", None),
|
|
1825
|
-
)
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
@dataclass
|
|
1829
|
-
class DeleteJob:
|
|
1830
|
-
job_id: int
|
|
1831
|
-
"""The canonical identifier of the job to delete. This field is required."""
|
|
1832
|
-
|
|
1833
|
-
def as_dict(self) -> dict:
|
|
1834
|
-
"""Serializes the DeleteJob into a dictionary suitable for use as a JSON request body."""
|
|
1835
|
-
body = {}
|
|
1836
|
-
if self.job_id is not None:
|
|
1837
|
-
body["job_id"] = self.job_id
|
|
1838
|
-
return body
|
|
1839
|
-
|
|
1840
|
-
def as_shallow_dict(self) -> dict:
|
|
1841
|
-
"""Serializes the DeleteJob into a shallow dictionary of its immediate attributes."""
|
|
1842
|
-
body = {}
|
|
1843
|
-
if self.job_id is not None:
|
|
1844
|
-
body["job_id"] = self.job_id
|
|
1845
|
-
return body
|
|
1846
|
-
|
|
1847
|
-
@classmethod
|
|
1848
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteJob:
|
|
1849
|
-
"""Deserializes the DeleteJob from a dictionary."""
|
|
1850
|
-
return cls(job_id=d.get("job_id", None))
|
|
1485
|
+
)
|
|
1851
1486
|
|
|
1852
1487
|
|
|
1853
1488
|
@dataclass
|
|
@@ -1868,31 +1503,6 @@ class DeleteResponse:
|
|
|
1868
1503
|
return cls()
|
|
1869
1504
|
|
|
1870
1505
|
|
|
1871
|
-
@dataclass
|
|
1872
|
-
class DeleteRun:
|
|
1873
|
-
run_id: int
|
|
1874
|
-
"""ID of the run to delete."""
|
|
1875
|
-
|
|
1876
|
-
def as_dict(self) -> dict:
|
|
1877
|
-
"""Serializes the DeleteRun into a dictionary suitable for use as a JSON request body."""
|
|
1878
|
-
body = {}
|
|
1879
|
-
if self.run_id is not None:
|
|
1880
|
-
body["run_id"] = self.run_id
|
|
1881
|
-
return body
|
|
1882
|
-
|
|
1883
|
-
def as_shallow_dict(self) -> dict:
|
|
1884
|
-
"""Serializes the DeleteRun into a shallow dictionary of its immediate attributes."""
|
|
1885
|
-
body = {}
|
|
1886
|
-
if self.run_id is not None:
|
|
1887
|
-
body["run_id"] = self.run_id
|
|
1888
|
-
return body
|
|
1889
|
-
|
|
1890
|
-
@classmethod
|
|
1891
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteRun:
|
|
1892
|
-
"""Deserializes the DeleteRun from a dictionary."""
|
|
1893
|
-
return cls(run_id=d.get("run_id", None))
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
1506
|
@dataclass
|
|
1897
1507
|
class DeleteRunResponse:
|
|
1898
1508
|
def as_dict(self) -> dict:
|
|
@@ -1959,38 +1569,6 @@ class EnforcePolicyComplianceForJobResponseJobClusterSettingsChange:
|
|
|
1959
1569
|
)
|
|
1960
1570
|
|
|
1961
1571
|
|
|
1962
|
-
@dataclass
|
|
1963
|
-
class EnforcePolicyComplianceRequest:
|
|
1964
|
-
job_id: int
|
|
1965
|
-
"""The ID of the job you want to enforce policy compliance on."""
|
|
1966
|
-
|
|
1967
|
-
validate_only: Optional[bool] = None
|
|
1968
|
-
"""If set, previews changes made to the job to comply with its policy, but does not update the job."""
|
|
1969
|
-
|
|
1970
|
-
def as_dict(self) -> dict:
|
|
1971
|
-
"""Serializes the EnforcePolicyComplianceRequest into a dictionary suitable for use as a JSON request body."""
|
|
1972
|
-
body = {}
|
|
1973
|
-
if self.job_id is not None:
|
|
1974
|
-
body["job_id"] = self.job_id
|
|
1975
|
-
if self.validate_only is not None:
|
|
1976
|
-
body["validate_only"] = self.validate_only
|
|
1977
|
-
return body
|
|
1978
|
-
|
|
1979
|
-
def as_shallow_dict(self) -> dict:
|
|
1980
|
-
"""Serializes the EnforcePolicyComplianceRequest into a shallow dictionary of its immediate attributes."""
|
|
1981
|
-
body = {}
|
|
1982
|
-
if self.job_id is not None:
|
|
1983
|
-
body["job_id"] = self.job_id
|
|
1984
|
-
if self.validate_only is not None:
|
|
1985
|
-
body["validate_only"] = self.validate_only
|
|
1986
|
-
return body
|
|
1987
|
-
|
|
1988
|
-
@classmethod
|
|
1989
|
-
def from_dict(cls, d: Dict[str, Any]) -> EnforcePolicyComplianceRequest:
|
|
1990
|
-
"""Deserializes the EnforcePolicyComplianceRequest from a dictionary."""
|
|
1991
|
-
return cls(job_id=d.get("job_id", None), validate_only=d.get("validate_only", None))
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
1572
|
@dataclass
|
|
1995
1573
|
class EnforcePolicyComplianceResponse:
|
|
1996
1574
|
has_changes: Optional[bool] = None
|
|
@@ -2557,8 +2135,6 @@ class GitSource:
|
|
|
2557
2135
|
with git_branch or git_tag."""
|
|
2558
2136
|
|
|
2559
2137
|
git_snapshot: Optional[GitSnapshot] = None
|
|
2560
|
-
"""Read-only state of the remote repository at the time the job was run. This field is only
|
|
2561
|
-
included on job runs."""
|
|
2562
2138
|
|
|
2563
2139
|
git_tag: Optional[str] = None
|
|
2564
2140
|
"""Name of the tag to be checked out and used by this job. This field cannot be specified in
|
|
@@ -2731,7 +2307,6 @@ class JobAccessControlRequest:
|
|
|
2731
2307
|
"""name of the group"""
|
|
2732
2308
|
|
|
2733
2309
|
permission_level: Optional[JobPermissionLevel] = None
|
|
2734
|
-
"""Permission level"""
|
|
2735
2310
|
|
|
2736
2311
|
service_principal_name: Optional[str] = None
|
|
2737
2312
|
"""application ID of a service principal"""
|
|
@@ -3054,9 +2629,6 @@ class JobEnvironment:
|
|
|
3054
2629
|
"""The key of an environment. It has to be unique within a job."""
|
|
3055
2630
|
|
|
3056
2631
|
spec: Optional[compute.Environment] = None
|
|
3057
|
-
"""The environment entity used to preserve serverless environment side panel, jobs' environment for
|
|
3058
|
-
non-notebook task, and DLT's environment for classic and serverless pipelines. In this minimal
|
|
3059
|
-
environment spec, only pip dependencies are supported."""
|
|
3060
2632
|
|
|
3061
2633
|
def as_dict(self) -> dict:
|
|
3062
2634
|
"""Serializes the JobEnvironment into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3197,7 +2769,6 @@ class JobPermission:
|
|
|
3197
2769
|
inherited_from_object: Optional[List[str]] = None
|
|
3198
2770
|
|
|
3199
2771
|
permission_level: Optional[JobPermissionLevel] = None
|
|
3200
|
-
"""Permission level"""
|
|
3201
2772
|
|
|
3202
2773
|
def as_dict(self) -> dict:
|
|
3203
2774
|
"""Serializes the JobPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3285,7 +2856,6 @@ class JobPermissionsDescription:
|
|
|
3285
2856
|
description: Optional[str] = None
|
|
3286
2857
|
|
|
3287
2858
|
permission_level: Optional[JobPermissionLevel] = None
|
|
3288
|
-
"""Permission level"""
|
|
3289
2859
|
|
|
3290
2860
|
def as_dict(self) -> dict:
|
|
3291
2861
|
"""Serializes the JobPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3313,40 +2883,6 @@ class JobPermissionsDescription:
|
|
|
3313
2883
|
)
|
|
3314
2884
|
|
|
3315
2885
|
|
|
3316
|
-
@dataclass
|
|
3317
|
-
class JobPermissionsRequest:
|
|
3318
|
-
access_control_list: Optional[List[JobAccessControlRequest]] = None
|
|
3319
|
-
|
|
3320
|
-
job_id: Optional[str] = None
|
|
3321
|
-
"""The job for which to get or manage permissions."""
|
|
3322
|
-
|
|
3323
|
-
def as_dict(self) -> dict:
|
|
3324
|
-
"""Serializes the JobPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
3325
|
-
body = {}
|
|
3326
|
-
if self.access_control_list:
|
|
3327
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
3328
|
-
if self.job_id is not None:
|
|
3329
|
-
body["job_id"] = self.job_id
|
|
3330
|
-
return body
|
|
3331
|
-
|
|
3332
|
-
def as_shallow_dict(self) -> dict:
|
|
3333
|
-
"""Serializes the JobPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
3334
|
-
body = {}
|
|
3335
|
-
if self.access_control_list:
|
|
3336
|
-
body["access_control_list"] = self.access_control_list
|
|
3337
|
-
if self.job_id is not None:
|
|
3338
|
-
body["job_id"] = self.job_id
|
|
3339
|
-
return body
|
|
3340
|
-
|
|
3341
|
-
@classmethod
|
|
3342
|
-
def from_dict(cls, d: Dict[str, Any]) -> JobPermissionsRequest:
|
|
3343
|
-
"""Deserializes the JobPermissionsRequest from a dictionary."""
|
|
3344
|
-
return cls(
|
|
3345
|
-
access_control_list=_repeated_dict(d, "access_control_list", JobAccessControlRequest),
|
|
3346
|
-
job_id=d.get("job_id", None),
|
|
3347
|
-
)
|
|
3348
|
-
|
|
3349
|
-
|
|
3350
2886
|
@dataclass
|
|
3351
2887
|
class JobRunAs:
|
|
3352
2888
|
"""Write-only setting. Specifies the user or service principal that the job runs as. If not
|
|
@@ -3435,7 +2971,6 @@ class JobSettings:
|
|
|
3435
2971
|
are used, `git_source` must be defined on the job."""
|
|
3436
2972
|
|
|
3437
2973
|
health: Optional[JobsHealthRules] = None
|
|
3438
|
-
"""An optional set of health rules that can be defined for this job."""
|
|
3439
2974
|
|
|
3440
2975
|
job_clusters: Optional[List[JobCluster]] = None
|
|
3441
2976
|
"""A list of job cluster specifications that can be shared and reused by tasks of this job.
|
|
@@ -3474,10 +3009,9 @@ class JobSettings:
|
|
|
3474
3009
|
"""The queue settings of the job."""
|
|
3475
3010
|
|
|
3476
3011
|
run_as: Optional[JobRunAs] = None
|
|
3477
|
-
"""
|
|
3478
|
-
|
|
3479
|
-
|
|
3480
|
-
Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
|
|
3012
|
+
"""The user or service principal that the job runs as, if specified in the request. This field
|
|
3013
|
+
indicates the explicit configuration of `run_as` for the job. To find the value in all cases,
|
|
3014
|
+
explicit or implicit, use `run_as_user_name`."""
|
|
3481
3015
|
|
|
3482
3016
|
schedule: Optional[CronSchedule] = None
|
|
3483
3017
|
"""An optional periodic schedule for this job. The default behavior is that the job only runs when
|
|
@@ -3736,18 +3270,8 @@ class JobsHealthOperator(Enum):
|
|
|
3736
3270
|
@dataclass
|
|
3737
3271
|
class JobsHealthRule:
|
|
3738
3272
|
metric: JobsHealthMetric
|
|
3739
|
-
"""Specifies the health metric that is being evaluated for a particular health rule.
|
|
3740
|
-
|
|
3741
|
-
* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
|
|
3742
|
-
An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
|
|
3743
|
-
is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
|
|
3744
|
-
across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
|
|
3745
|
-
of the maximum consumer delay across all streams. This metric is in Public Preview. *
|
|
3746
|
-
`STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
|
|
3747
|
-
streams. This metric is in Public Preview."""
|
|
3748
3273
|
|
|
3749
3274
|
op: JobsHealthOperator
|
|
3750
|
-
"""Specifies the operator used to compare the health metric value with the specified threshold."""
|
|
3751
3275
|
|
|
3752
3276
|
value: int
|
|
3753
3277
|
"""Specifies the threshold value that the health metric should obey to satisfy the health rule."""
|
|
@@ -4453,11 +3977,6 @@ class PythonWheelTask:
|
|
|
4453
3977
|
@dataclass
|
|
4454
3978
|
class QueueDetails:
|
|
4455
3979
|
code: Optional[QueueDetailsCodeCode] = None
|
|
4456
|
-
"""The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to
|
|
4457
|
-
reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was
|
|
4458
|
-
queued due to reaching the per-job limit of concurrent job runs. *
|
|
4459
|
-
`ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of
|
|
4460
|
-
active run job tasks."""
|
|
4461
3980
|
|
|
4462
3981
|
message: Optional[str] = None
|
|
4463
3982
|
"""A descriptive message with the queuing details. This field is unstructured, and its exact format
|
|
@@ -4548,7 +4067,6 @@ class RepairHistoryItem:
|
|
|
4548
4067
|
"""Deprecated. Please use the `status` field instead."""
|
|
4549
4068
|
|
|
4550
4069
|
status: Optional[RunStatus] = None
|
|
4551
|
-
"""The current status of the run"""
|
|
4552
4070
|
|
|
4553
4071
|
task_run_ids: Optional[List[int]] = None
|
|
4554
4072
|
"""The run IDs of the task runs that ran as part of this repair history item."""
|
|
@@ -4591,233 +4109,35 @@ class RepairHistoryItem:
|
|
|
4591
4109
|
if self.state:
|
|
4592
4110
|
body["state"] = self.state
|
|
4593
4111
|
if self.status:
|
|
4594
|
-
body["status"] = self.status
|
|
4595
|
-
if self.task_run_ids:
|
|
4596
|
-
body["task_run_ids"] = self.task_run_ids
|
|
4597
|
-
if self.type is not None:
|
|
4598
|
-
body["type"] = self.type
|
|
4599
|
-
return body
|
|
4600
|
-
|
|
4601
|
-
@classmethod
|
|
4602
|
-
def from_dict(cls, d: Dict[str, Any]) -> RepairHistoryItem:
|
|
4603
|
-
"""Deserializes the RepairHistoryItem from a dictionary."""
|
|
4604
|
-
return cls(
|
|
4605
|
-
effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget),
|
|
4606
|
-
end_time=d.get("end_time", None),
|
|
4607
|
-
id=d.get("id", None),
|
|
4608
|
-
start_time=d.get("start_time", None),
|
|
4609
|
-
state=_from_dict(d, "state", RunState),
|
|
4610
|
-
status=_from_dict(d, "status", RunStatus),
|
|
4611
|
-
task_run_ids=d.get("task_run_ids", None),
|
|
4612
|
-
type=_enum(d, "type", RepairHistoryItemType),
|
|
4613
|
-
)
|
|
4614
|
-
|
|
4615
|
-
|
|
4616
|
-
class RepairHistoryItemType(Enum):
|
|
4617
|
-
"""The repair history item type. Indicates whether a run is the original run or a repair run."""
|
|
4618
|
-
|
|
4619
|
-
ORIGINAL = "ORIGINAL"
|
|
4620
|
-
REPAIR = "REPAIR"
|
|
4621
|
-
|
|
4622
|
-
|
|
4623
|
-
@dataclass
|
|
4624
|
-
class RepairRun:
|
|
4625
|
-
run_id: int
|
|
4626
|
-
"""The job run ID of the run to repair. The run must not be in progress."""
|
|
4627
|
-
|
|
4628
|
-
dbt_commands: Optional[List[str]] = None
|
|
4629
|
-
"""An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt
|
|
4630
|
-
deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`"""
|
|
4631
|
-
|
|
4632
|
-
jar_params: Optional[List[str]] = None
|
|
4633
|
-
"""A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe",
|
|
4634
|
-
"35"]`. The parameters are used to invoke the main function of the main class specified in the
|
|
4635
|
-
Spark JAR task. If not specified upon `run-now`, it defaults to an empty list. jar_params cannot
|
|
4636
|
-
be specified in conjunction with notebook_params. The JSON representation of this field (for
|
|
4637
|
-
example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
4638
|
-
|
|
4639
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
4640
|
-
|
|
4641
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
4642
|
-
|
|
4643
|
-
job_parameters: Optional[Dict[str, str]] = None
|
|
4644
|
-
"""Job-level parameters used in the run. for example `"param": "overriding_val"`"""
|
|
4645
|
-
|
|
4646
|
-
latest_repair_id: Optional[int] = None
|
|
4647
|
-
"""The ID of the latest repair. This parameter is not required when repairing a run for the first
|
|
4648
|
-
time, but must be provided on subsequent requests to repair the same run."""
|
|
4649
|
-
|
|
4650
|
-
notebook_params: Optional[Dict[str, str]] = None
|
|
4651
|
-
"""A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name":
|
|
4652
|
-
"john doe", "age": "35"}`. The map is passed to the notebook and is accessible through the
|
|
4653
|
-
[dbutils.widgets.get] function.
|
|
4654
|
-
|
|
4655
|
-
If not specified upon `run-now`, the triggered run uses the job’s base parameters.
|
|
4656
|
-
|
|
4657
|
-
notebook_params cannot be specified in conjunction with jar_params.
|
|
4658
|
-
|
|
4659
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
4660
|
-
|
|
4661
|
-
The JSON representation of this field (for example `{"notebook_params":{"name":"john
|
|
4662
|
-
doe","age":"35"}}`) cannot exceed 10,000 bytes.
|
|
4663
|
-
|
|
4664
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
|
|
4665
|
-
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html"""
|
|
4666
|
-
|
|
4667
|
-
performance_target: Optional[PerformanceTarget] = None
|
|
4668
|
-
"""The performance mode on a serverless job. The performance target determines the level of compute
|
|
4669
|
-
performance or cost-efficiency for the run. This field overrides the performance target defined
|
|
4670
|
-
on the job level.
|
|
4671
|
-
|
|
4672
|
-
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
4673
|
-
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
4674
|
-
optimized cluster performance."""
|
|
4675
|
-
|
|
4676
|
-
pipeline_params: Optional[PipelineParams] = None
|
|
4677
|
-
"""Controls whether the pipeline should perform a full refresh"""
|
|
4678
|
-
|
|
4679
|
-
python_named_params: Optional[Dict[str, str]] = None
|
|
4680
|
-
|
|
4681
|
-
python_params: Optional[List[str]] = None
|
|
4682
|
-
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
4683
|
-
"35"]`. The parameters are passed to Python file as command-line parameters. If specified upon
|
|
4684
|
-
`run-now`, it would overwrite the parameters specified in job setting. The JSON representation
|
|
4685
|
-
of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
4686
|
-
|
|
4687
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
4688
|
-
|
|
4689
|
-
Important
|
|
4690
|
-
|
|
4691
|
-
These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters
|
|
4692
|
-
returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and
|
|
4693
|
-
emojis.
|
|
4694
|
-
|
|
4695
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
4696
|
-
|
|
4697
|
-
rerun_all_failed_tasks: Optional[bool] = None
|
|
4698
|
-
"""If true, repair all failed tasks. Only one of `rerun_tasks` or `rerun_all_failed_tasks` can be
|
|
4699
|
-
used."""
|
|
4700
|
-
|
|
4701
|
-
rerun_dependent_tasks: Optional[bool] = None
|
|
4702
|
-
"""If true, repair all tasks that depend on the tasks in `rerun_tasks`, even if they were
|
|
4703
|
-
previously successful. Can be also used in combination with `rerun_all_failed_tasks`."""
|
|
4704
|
-
|
|
4705
|
-
rerun_tasks: Optional[List[str]] = None
|
|
4706
|
-
"""The task keys of the task runs to repair."""
|
|
4707
|
-
|
|
4708
|
-
spark_submit_params: Optional[List[str]] = None
|
|
4709
|
-
"""A list of parameters for jobs with spark submit task, for example `"spark_submit_params":
|
|
4710
|
-
["--class", "org.apache.spark.examples.SparkPi"]`. The parameters are passed to spark-submit
|
|
4711
|
-
script as command-line parameters. If specified upon `run-now`, it would overwrite the
|
|
4712
|
-
parameters specified in job setting. The JSON representation of this field (for example
|
|
4713
|
-
`{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
4714
|
-
|
|
4715
|
-
Use [Task parameter variables] to set parameters containing information about job runs
|
|
4716
|
-
|
|
4717
|
-
Important
|
|
4718
|
-
|
|
4719
|
-
These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters
|
|
4720
|
-
returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and
|
|
4721
|
-
emojis.
|
|
4722
|
-
|
|
4723
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
4724
|
-
|
|
4725
|
-
sql_params: Optional[Dict[str, str]] = None
|
|
4726
|
-
"""A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john
|
|
4727
|
-
doe", "age": "35"}`. The SQL alert task does not support custom parameters."""
|
|
4728
|
-
|
|
4729
|
-
def as_dict(self) -> dict:
|
|
4730
|
-
"""Serializes the RepairRun into a dictionary suitable for use as a JSON request body."""
|
|
4731
|
-
body = {}
|
|
4732
|
-
if self.dbt_commands:
|
|
4733
|
-
body["dbt_commands"] = [v for v in self.dbt_commands]
|
|
4734
|
-
if self.jar_params:
|
|
4735
|
-
body["jar_params"] = [v for v in self.jar_params]
|
|
4736
|
-
if self.job_parameters:
|
|
4737
|
-
body["job_parameters"] = self.job_parameters
|
|
4738
|
-
if self.latest_repair_id is not None:
|
|
4739
|
-
body["latest_repair_id"] = self.latest_repair_id
|
|
4740
|
-
if self.notebook_params:
|
|
4741
|
-
body["notebook_params"] = self.notebook_params
|
|
4742
|
-
if self.performance_target is not None:
|
|
4743
|
-
body["performance_target"] = self.performance_target.value
|
|
4744
|
-
if self.pipeline_params:
|
|
4745
|
-
body["pipeline_params"] = self.pipeline_params.as_dict()
|
|
4746
|
-
if self.python_named_params:
|
|
4747
|
-
body["python_named_params"] = self.python_named_params
|
|
4748
|
-
if self.python_params:
|
|
4749
|
-
body["python_params"] = [v for v in self.python_params]
|
|
4750
|
-
if self.rerun_all_failed_tasks is not None:
|
|
4751
|
-
body["rerun_all_failed_tasks"] = self.rerun_all_failed_tasks
|
|
4752
|
-
if self.rerun_dependent_tasks is not None:
|
|
4753
|
-
body["rerun_dependent_tasks"] = self.rerun_dependent_tasks
|
|
4754
|
-
if self.rerun_tasks:
|
|
4755
|
-
body["rerun_tasks"] = [v for v in self.rerun_tasks]
|
|
4756
|
-
if self.run_id is not None:
|
|
4757
|
-
body["run_id"] = self.run_id
|
|
4758
|
-
if self.spark_submit_params:
|
|
4759
|
-
body["spark_submit_params"] = [v for v in self.spark_submit_params]
|
|
4760
|
-
if self.sql_params:
|
|
4761
|
-
body["sql_params"] = self.sql_params
|
|
4762
|
-
return body
|
|
4763
|
-
|
|
4764
|
-
def as_shallow_dict(self) -> dict:
|
|
4765
|
-
"""Serializes the RepairRun into a shallow dictionary of its immediate attributes."""
|
|
4766
|
-
body = {}
|
|
4767
|
-
if self.dbt_commands:
|
|
4768
|
-
body["dbt_commands"] = self.dbt_commands
|
|
4769
|
-
if self.jar_params:
|
|
4770
|
-
body["jar_params"] = self.jar_params
|
|
4771
|
-
if self.job_parameters:
|
|
4772
|
-
body["job_parameters"] = self.job_parameters
|
|
4773
|
-
if self.latest_repair_id is not None:
|
|
4774
|
-
body["latest_repair_id"] = self.latest_repair_id
|
|
4775
|
-
if self.notebook_params:
|
|
4776
|
-
body["notebook_params"] = self.notebook_params
|
|
4777
|
-
if self.performance_target is not None:
|
|
4778
|
-
body["performance_target"] = self.performance_target
|
|
4779
|
-
if self.pipeline_params:
|
|
4780
|
-
body["pipeline_params"] = self.pipeline_params
|
|
4781
|
-
if self.python_named_params:
|
|
4782
|
-
body["python_named_params"] = self.python_named_params
|
|
4783
|
-
if self.python_params:
|
|
4784
|
-
body["python_params"] = self.python_params
|
|
4785
|
-
if self.rerun_all_failed_tasks is not None:
|
|
4786
|
-
body["rerun_all_failed_tasks"] = self.rerun_all_failed_tasks
|
|
4787
|
-
if self.rerun_dependent_tasks is not None:
|
|
4788
|
-
body["rerun_dependent_tasks"] = self.rerun_dependent_tasks
|
|
4789
|
-
if self.rerun_tasks:
|
|
4790
|
-
body["rerun_tasks"] = self.rerun_tasks
|
|
4791
|
-
if self.run_id is not None:
|
|
4792
|
-
body["run_id"] = self.run_id
|
|
4793
|
-
if self.spark_submit_params:
|
|
4794
|
-
body["spark_submit_params"] = self.spark_submit_params
|
|
4795
|
-
if self.sql_params:
|
|
4796
|
-
body["sql_params"] = self.sql_params
|
|
4112
|
+
body["status"] = self.status
|
|
4113
|
+
if self.task_run_ids:
|
|
4114
|
+
body["task_run_ids"] = self.task_run_ids
|
|
4115
|
+
if self.type is not None:
|
|
4116
|
+
body["type"] = self.type
|
|
4797
4117
|
return body
|
|
4798
4118
|
|
|
4799
4119
|
@classmethod
|
|
4800
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4801
|
-
"""Deserializes the
|
|
4120
|
+
def from_dict(cls, d: Dict[str, Any]) -> RepairHistoryItem:
|
|
4121
|
+
"""Deserializes the RepairHistoryItem from a dictionary."""
|
|
4802
4122
|
return cls(
|
|
4803
|
-
|
|
4804
|
-
|
|
4805
|
-
|
|
4806
|
-
|
|
4807
|
-
|
|
4808
|
-
|
|
4809
|
-
|
|
4810
|
-
|
|
4811
|
-
python_params=d.get("python_params", None),
|
|
4812
|
-
rerun_all_failed_tasks=d.get("rerun_all_failed_tasks", None),
|
|
4813
|
-
rerun_dependent_tasks=d.get("rerun_dependent_tasks", None),
|
|
4814
|
-
rerun_tasks=d.get("rerun_tasks", None),
|
|
4815
|
-
run_id=d.get("run_id", None),
|
|
4816
|
-
spark_submit_params=d.get("spark_submit_params", None),
|
|
4817
|
-
sql_params=d.get("sql_params", None),
|
|
4123
|
+
effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget),
|
|
4124
|
+
end_time=d.get("end_time", None),
|
|
4125
|
+
id=d.get("id", None),
|
|
4126
|
+
start_time=d.get("start_time", None),
|
|
4127
|
+
state=_from_dict(d, "state", RunState),
|
|
4128
|
+
status=_from_dict(d, "status", RunStatus),
|
|
4129
|
+
task_run_ids=d.get("task_run_ids", None),
|
|
4130
|
+
type=_enum(d, "type", RepairHistoryItemType),
|
|
4818
4131
|
)
|
|
4819
4132
|
|
|
4820
4133
|
|
|
4134
|
+
class RepairHistoryItemType(Enum):
|
|
4135
|
+
"""The repair history item type. Indicates whether a run is the original run or a repair run."""
|
|
4136
|
+
|
|
4137
|
+
ORIGINAL = "ORIGINAL"
|
|
4138
|
+
REPAIR = "REPAIR"
|
|
4139
|
+
|
|
4140
|
+
|
|
4821
4141
|
@dataclass
|
|
4822
4142
|
class RepairRunResponse:
|
|
4823
4143
|
"""Run repair was initiated."""
|
|
@@ -4846,41 +4166,6 @@ class RepairRunResponse:
|
|
|
4846
4166
|
return cls(repair_id=d.get("repair_id", None))
|
|
4847
4167
|
|
|
4848
4168
|
|
|
4849
|
-
@dataclass
|
|
4850
|
-
class ResetJob:
|
|
4851
|
-
job_id: int
|
|
4852
|
-
"""The canonical identifier of the job to reset. This field is required."""
|
|
4853
|
-
|
|
4854
|
-
new_settings: JobSettings
|
|
4855
|
-
"""The new settings of the job. These settings completely replace the old settings.
|
|
4856
|
-
|
|
4857
|
-
Changes to the field `JobBaseSettings.timeout_seconds` are applied to active runs. Changes to
|
|
4858
|
-
other fields are applied to future runs only."""
|
|
4859
|
-
|
|
4860
|
-
def as_dict(self) -> dict:
|
|
4861
|
-
"""Serializes the ResetJob into a dictionary suitable for use as a JSON request body."""
|
|
4862
|
-
body = {}
|
|
4863
|
-
if self.job_id is not None:
|
|
4864
|
-
body["job_id"] = self.job_id
|
|
4865
|
-
if self.new_settings:
|
|
4866
|
-
body["new_settings"] = self.new_settings.as_dict()
|
|
4867
|
-
return body
|
|
4868
|
-
|
|
4869
|
-
def as_shallow_dict(self) -> dict:
|
|
4870
|
-
"""Serializes the ResetJob into a shallow dictionary of its immediate attributes."""
|
|
4871
|
-
body = {}
|
|
4872
|
-
if self.job_id is not None:
|
|
4873
|
-
body["job_id"] = self.job_id
|
|
4874
|
-
if self.new_settings:
|
|
4875
|
-
body["new_settings"] = self.new_settings
|
|
4876
|
-
return body
|
|
4877
|
-
|
|
4878
|
-
@classmethod
|
|
4879
|
-
def from_dict(cls, d: Dict[str, Any]) -> ResetJob:
|
|
4880
|
-
"""Deserializes the ResetJob from a dictionary."""
|
|
4881
|
-
return cls(job_id=d.get("job_id", None), new_settings=_from_dict(d, "new_settings", JobSettings))
|
|
4882
|
-
|
|
4883
|
-
|
|
4884
4169
|
@dataclass
|
|
4885
4170
|
class ResetResponse:
|
|
4886
4171
|
def as_dict(self) -> dict:
|
|
@@ -5292,11 +4577,6 @@ class Run:
|
|
|
5292
4577
|
"""The URL to the detail page of the run."""
|
|
5293
4578
|
|
|
5294
4579
|
run_type: Optional[RunType] = None
|
|
5295
|
-
"""The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
|
|
5296
|
-
`WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
|
|
5297
|
-
run. A run created with :method:jobs/submit.
|
|
5298
|
-
|
|
5299
|
-
[dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
|
|
5300
4580
|
|
|
5301
4581
|
schedule: Optional[CronSchedule] = None
|
|
5302
4582
|
"""The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
|
|
@@ -5317,7 +4597,6 @@ class Run:
|
|
|
5317
4597
|
"""Deprecated. Please use the `status` field instead."""
|
|
5318
4598
|
|
|
5319
4599
|
status: Optional[RunStatus] = None
|
|
5320
|
-
"""The current status of the run"""
|
|
5321
4600
|
|
|
5322
4601
|
tasks: Optional[List[RunTask]] = None
|
|
5323
4602
|
"""The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
|
|
@@ -5326,19 +4605,8 @@ class Run:
|
|
|
5326
4605
|
root to determine if more results are available."""
|
|
5327
4606
|
|
|
5328
4607
|
trigger: Optional[TriggerType] = None
|
|
5329
|
-
"""The type of trigger that fired this run.
|
|
5330
|
-
|
|
5331
|
-
* `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
|
|
5332
|
-
One time triggers that fire a single run. This occurs you triggered a single run on demand
|
|
5333
|
-
through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
|
|
5334
|
-
previously failed run. This occurs when you request to re-run the job in case of failures. *
|
|
5335
|
-
`RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
|
|
5336
|
-
Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
|
|
5337
|
-
triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
|
|
5338
|
-
restart a continuous job run."""
|
|
5339
4608
|
|
|
5340
4609
|
trigger_info: Optional[TriggerInfo] = None
|
|
5341
|
-
"""Additional details about what triggered the run"""
|
|
5342
4610
|
|
|
5343
4611
|
def as_dict(self) -> dict:
|
|
5344
4612
|
"""Serializes the Run into a dictionary suitable for use as a JSON request body."""
|
|
@@ -5697,206 +4965,7 @@ class RunJobTask:
|
|
|
5697
4965
|
|
|
5698
4966
|
dbt_commands: Optional[List[str]] = None
|
|
5699
4967
|
"""An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt
|
|
5700
|
-
deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`"""
|
|
5701
|
-
|
|
5702
|
-
jar_params: Optional[List[str]] = None
|
|
5703
|
-
"""A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe",
|
|
5704
|
-
"35"]`. The parameters are used to invoke the main function of the main class specified in the
|
|
5705
|
-
Spark JAR task. If not specified upon `run-now`, it defaults to an empty list. jar_params cannot
|
|
5706
|
-
be specified in conjunction with notebook_params. The JSON representation of this field (for
|
|
5707
|
-
example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
5708
|
-
|
|
5709
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
5710
|
-
|
|
5711
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
5712
|
-
|
|
5713
|
-
job_parameters: Optional[Dict[str, str]] = None
|
|
5714
|
-
"""Job-level parameters used to trigger the job."""
|
|
5715
|
-
|
|
5716
|
-
notebook_params: Optional[Dict[str, str]] = None
|
|
5717
|
-
"""A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name":
|
|
5718
|
-
"john doe", "age": "35"}`. The map is passed to the notebook and is accessible through the
|
|
5719
|
-
[dbutils.widgets.get] function.
|
|
5720
|
-
|
|
5721
|
-
If not specified upon `run-now`, the triggered run uses the job’s base parameters.
|
|
5722
|
-
|
|
5723
|
-
notebook_params cannot be specified in conjunction with jar_params.
|
|
5724
|
-
|
|
5725
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
5726
|
-
|
|
5727
|
-
The JSON representation of this field (for example `{"notebook_params":{"name":"john
|
|
5728
|
-
doe","age":"35"}}`) cannot exceed 10,000 bytes.
|
|
5729
|
-
|
|
5730
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
|
|
5731
|
-
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html"""
|
|
5732
|
-
|
|
5733
|
-
pipeline_params: Optional[PipelineParams] = None
|
|
5734
|
-
"""Controls whether the pipeline should perform a full refresh"""
|
|
5735
|
-
|
|
5736
|
-
python_named_params: Optional[Dict[str, str]] = None
|
|
5737
|
-
|
|
5738
|
-
python_params: Optional[List[str]] = None
|
|
5739
|
-
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
5740
|
-
"35"]`. The parameters are passed to Python file as command-line parameters. If specified upon
|
|
5741
|
-
`run-now`, it would overwrite the parameters specified in job setting. The JSON representation
|
|
5742
|
-
of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
5743
|
-
|
|
5744
|
-
Use [Task parameter variables] to set parameters containing information about job runs.
|
|
5745
|
-
|
|
5746
|
-
Important
|
|
5747
|
-
|
|
5748
|
-
These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters
|
|
5749
|
-
returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and
|
|
5750
|
-
emojis.
|
|
5751
|
-
|
|
5752
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
5753
|
-
|
|
5754
|
-
spark_submit_params: Optional[List[str]] = None
|
|
5755
|
-
"""A list of parameters for jobs with spark submit task, for example `"spark_submit_params":
|
|
5756
|
-
["--class", "org.apache.spark.examples.SparkPi"]`. The parameters are passed to spark-submit
|
|
5757
|
-
script as command-line parameters. If specified upon `run-now`, it would overwrite the
|
|
5758
|
-
parameters specified in job setting. The JSON representation of this field (for example
|
|
5759
|
-
`{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes.
|
|
5760
|
-
|
|
5761
|
-
Use [Task parameter variables] to set parameters containing information about job runs
|
|
5762
|
-
|
|
5763
|
-
Important
|
|
5764
|
-
|
|
5765
|
-
These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters
|
|
5766
|
-
returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and
|
|
5767
|
-
emojis.
|
|
5768
|
-
|
|
5769
|
-
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
5770
|
-
|
|
5771
|
-
sql_params: Optional[Dict[str, str]] = None
|
|
5772
|
-
"""A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john
|
|
5773
|
-
doe", "age": "35"}`. The SQL alert task does not support custom parameters."""
|
|
5774
|
-
|
|
5775
|
-
def as_dict(self) -> dict:
|
|
5776
|
-
"""Serializes the RunJobTask into a dictionary suitable for use as a JSON request body."""
|
|
5777
|
-
body = {}
|
|
5778
|
-
if self.dbt_commands:
|
|
5779
|
-
body["dbt_commands"] = [v for v in self.dbt_commands]
|
|
5780
|
-
if self.jar_params:
|
|
5781
|
-
body["jar_params"] = [v for v in self.jar_params]
|
|
5782
|
-
if self.job_id is not None:
|
|
5783
|
-
body["job_id"] = self.job_id
|
|
5784
|
-
if self.job_parameters:
|
|
5785
|
-
body["job_parameters"] = self.job_parameters
|
|
5786
|
-
if self.notebook_params:
|
|
5787
|
-
body["notebook_params"] = self.notebook_params
|
|
5788
|
-
if self.pipeline_params:
|
|
5789
|
-
body["pipeline_params"] = self.pipeline_params.as_dict()
|
|
5790
|
-
if self.python_named_params:
|
|
5791
|
-
body["python_named_params"] = self.python_named_params
|
|
5792
|
-
if self.python_params:
|
|
5793
|
-
body["python_params"] = [v for v in self.python_params]
|
|
5794
|
-
if self.spark_submit_params:
|
|
5795
|
-
body["spark_submit_params"] = [v for v in self.spark_submit_params]
|
|
5796
|
-
if self.sql_params:
|
|
5797
|
-
body["sql_params"] = self.sql_params
|
|
5798
|
-
return body
|
|
5799
|
-
|
|
5800
|
-
def as_shallow_dict(self) -> dict:
|
|
5801
|
-
"""Serializes the RunJobTask into a shallow dictionary of its immediate attributes."""
|
|
5802
|
-
body = {}
|
|
5803
|
-
if self.dbt_commands:
|
|
5804
|
-
body["dbt_commands"] = self.dbt_commands
|
|
5805
|
-
if self.jar_params:
|
|
5806
|
-
body["jar_params"] = self.jar_params
|
|
5807
|
-
if self.job_id is not None:
|
|
5808
|
-
body["job_id"] = self.job_id
|
|
5809
|
-
if self.job_parameters:
|
|
5810
|
-
body["job_parameters"] = self.job_parameters
|
|
5811
|
-
if self.notebook_params:
|
|
5812
|
-
body["notebook_params"] = self.notebook_params
|
|
5813
|
-
if self.pipeline_params:
|
|
5814
|
-
body["pipeline_params"] = self.pipeline_params
|
|
5815
|
-
if self.python_named_params:
|
|
5816
|
-
body["python_named_params"] = self.python_named_params
|
|
5817
|
-
if self.python_params:
|
|
5818
|
-
body["python_params"] = self.python_params
|
|
5819
|
-
if self.spark_submit_params:
|
|
5820
|
-
body["spark_submit_params"] = self.spark_submit_params
|
|
5821
|
-
if self.sql_params:
|
|
5822
|
-
body["sql_params"] = self.sql_params
|
|
5823
|
-
return body
|
|
5824
|
-
|
|
5825
|
-
@classmethod
|
|
5826
|
-
def from_dict(cls, d: Dict[str, Any]) -> RunJobTask:
|
|
5827
|
-
"""Deserializes the RunJobTask from a dictionary."""
|
|
5828
|
-
return cls(
|
|
5829
|
-
dbt_commands=d.get("dbt_commands", None),
|
|
5830
|
-
jar_params=d.get("jar_params", None),
|
|
5831
|
-
job_id=d.get("job_id", None),
|
|
5832
|
-
job_parameters=d.get("job_parameters", None),
|
|
5833
|
-
notebook_params=d.get("notebook_params", None),
|
|
5834
|
-
pipeline_params=_from_dict(d, "pipeline_params", PipelineParams),
|
|
5835
|
-
python_named_params=d.get("python_named_params", None),
|
|
5836
|
-
python_params=d.get("python_params", None),
|
|
5837
|
-
spark_submit_params=d.get("spark_submit_params", None),
|
|
5838
|
-
sql_params=d.get("sql_params", None),
|
|
5839
|
-
)
|
|
5840
|
-
|
|
5841
|
-
|
|
5842
|
-
class RunLifeCycleState(Enum):
|
|
5843
|
-
"""A value indicating the run's lifecycle state. The possible values are: * `QUEUED`: The run is
|
|
5844
|
-
queued. * `PENDING`: The run is waiting to be executed while the cluster and execution context
|
|
5845
|
-
are being prepared. * `RUNNING`: The task of this run is being executed. * `TERMINATING`: The
|
|
5846
|
-
task of this run has completed, and the cluster and execution context are being cleaned up. *
|
|
5847
|
-
`TERMINATED`: The task of this run has completed, and the cluster and execution context have
|
|
5848
|
-
been cleaned up. This state is terminal. * `SKIPPED`: This run was aborted because a previous
|
|
5849
|
-
run of the same job was already active. This state is terminal. * `INTERNAL_ERROR`: An
|
|
5850
|
-
exceptional state that indicates a failure in the Jobs service, such as network failure over a
|
|
5851
|
-
long period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the Jobs service
|
|
5852
|
-
terminates the cluster as soon as possible. This state is terminal. * `BLOCKED`: The run is
|
|
5853
|
-
blocked on an upstream dependency. * `WAITING_FOR_RETRY`: The run is waiting for a retry."""
|
|
5854
|
-
|
|
5855
|
-
BLOCKED = "BLOCKED"
|
|
5856
|
-
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
5857
|
-
PENDING = "PENDING"
|
|
5858
|
-
QUEUED = "QUEUED"
|
|
5859
|
-
RUNNING = "RUNNING"
|
|
5860
|
-
SKIPPED = "SKIPPED"
|
|
5861
|
-
TERMINATED = "TERMINATED"
|
|
5862
|
-
TERMINATING = "TERMINATING"
|
|
5863
|
-
WAITING_FOR_RETRY = "WAITING_FOR_RETRY"
|
|
5864
|
-
|
|
5865
|
-
|
|
5866
|
-
class RunLifecycleStateV2State(Enum):
|
|
5867
|
-
"""The current state of the run."""
|
|
5868
|
-
|
|
5869
|
-
BLOCKED = "BLOCKED"
|
|
5870
|
-
PENDING = "PENDING"
|
|
5871
|
-
QUEUED = "QUEUED"
|
|
5872
|
-
RUNNING = "RUNNING"
|
|
5873
|
-
TERMINATED = "TERMINATED"
|
|
5874
|
-
TERMINATING = "TERMINATING"
|
|
5875
|
-
WAITING = "WAITING"
|
|
5876
|
-
|
|
5877
|
-
|
|
5878
|
-
@dataclass
|
|
5879
|
-
class RunNow:
|
|
5880
|
-
job_id: int
|
|
5881
|
-
"""The ID of the job to be executed"""
|
|
5882
|
-
|
|
5883
|
-
dbt_commands: Optional[List[str]] = None
|
|
5884
|
-
"""An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt
|
|
5885
|
-
deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`"""
|
|
5886
|
-
|
|
5887
|
-
idempotency_token: Optional[str] = None
|
|
5888
|
-
"""An optional token to guarantee the idempotency of job run requests. If a run with the provided
|
|
5889
|
-
token already exists, the request does not create a new run but returns the ID of the existing
|
|
5890
|
-
run instead. If a run with the provided token is deleted, an error is returned.
|
|
5891
|
-
|
|
5892
|
-
If you specify the idempotency token, upon failure you can retry until the request succeeds.
|
|
5893
|
-
Databricks guarantees that exactly one run is launched with that idempotency token.
|
|
5894
|
-
|
|
5895
|
-
This token must have at most 64 characters.
|
|
5896
|
-
|
|
5897
|
-
For more information, see [How to ensure idempotency for jobs].
|
|
5898
|
-
|
|
5899
|
-
[How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html"""
|
|
4968
|
+
deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`"""
|
|
5900
4969
|
|
|
5901
4970
|
jar_params: Optional[List[str]] = None
|
|
5902
4971
|
"""A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe",
|
|
@@ -5910,7 +4979,7 @@ class RunNow:
|
|
|
5910
4979
|
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
5911
4980
|
|
|
5912
4981
|
job_parameters: Optional[Dict[str, str]] = None
|
|
5913
|
-
"""Job-level parameters used
|
|
4982
|
+
"""Job-level parameters used to trigger the job."""
|
|
5914
4983
|
|
|
5915
4984
|
notebook_params: Optional[Dict[str, str]] = None
|
|
5916
4985
|
"""A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name":
|
|
@@ -5929,19 +4998,6 @@ class RunNow:
|
|
|
5929
4998
|
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
|
|
5930
4999
|
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html"""
|
|
5931
5000
|
|
|
5932
|
-
only: Optional[List[str]] = None
|
|
5933
|
-
"""A list of task keys to run inside of the job. If this field is not provided, all tasks in the
|
|
5934
|
-
job will be run."""
|
|
5935
|
-
|
|
5936
|
-
performance_target: Optional[PerformanceTarget] = None
|
|
5937
|
-
"""The performance mode on a serverless job. The performance target determines the level of compute
|
|
5938
|
-
performance or cost-efficiency for the run. This field overrides the performance target defined
|
|
5939
|
-
on the job level.
|
|
5940
|
-
|
|
5941
|
-
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
5942
|
-
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
5943
|
-
optimized cluster performance."""
|
|
5944
|
-
|
|
5945
5001
|
pipeline_params: Optional[PipelineParams] = None
|
|
5946
5002
|
"""Controls whether the pipeline should perform a full refresh"""
|
|
5947
5003
|
|
|
@@ -5963,9 +5019,6 @@ class RunNow:
|
|
|
5963
5019
|
|
|
5964
5020
|
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables"""
|
|
5965
5021
|
|
|
5966
|
-
queue: Optional[QueueSettings] = None
|
|
5967
|
-
"""The queue settings of the run."""
|
|
5968
|
-
|
|
5969
5022
|
spark_submit_params: Optional[List[str]] = None
|
|
5970
5023
|
"""A list of parameters for jobs with spark submit task, for example `"spark_submit_params":
|
|
5971
5024
|
["--class", "org.apache.spark.examples.SparkPi"]`. The parameters are passed to spark-submit
|
|
@@ -5988,12 +5041,10 @@ class RunNow:
|
|
|
5988
5041
|
doe", "age": "35"}`. The SQL alert task does not support custom parameters."""
|
|
5989
5042
|
|
|
5990
5043
|
def as_dict(self) -> dict:
|
|
5991
|
-
"""Serializes the
|
|
5044
|
+
"""Serializes the RunJobTask into a dictionary suitable for use as a JSON request body."""
|
|
5992
5045
|
body = {}
|
|
5993
5046
|
if self.dbt_commands:
|
|
5994
5047
|
body["dbt_commands"] = [v for v in self.dbt_commands]
|
|
5995
|
-
if self.idempotency_token is not None:
|
|
5996
|
-
body["idempotency_token"] = self.idempotency_token
|
|
5997
5048
|
if self.jar_params:
|
|
5998
5049
|
body["jar_params"] = [v for v in self.jar_params]
|
|
5999
5050
|
if self.job_id is not None:
|
|
@@ -6002,18 +5053,12 @@ class RunNow:
|
|
|
6002
5053
|
body["job_parameters"] = self.job_parameters
|
|
6003
5054
|
if self.notebook_params:
|
|
6004
5055
|
body["notebook_params"] = self.notebook_params
|
|
6005
|
-
if self.only:
|
|
6006
|
-
body["only"] = [v for v in self.only]
|
|
6007
|
-
if self.performance_target is not None:
|
|
6008
|
-
body["performance_target"] = self.performance_target.value
|
|
6009
5056
|
if self.pipeline_params:
|
|
6010
5057
|
body["pipeline_params"] = self.pipeline_params.as_dict()
|
|
6011
5058
|
if self.python_named_params:
|
|
6012
5059
|
body["python_named_params"] = self.python_named_params
|
|
6013
5060
|
if self.python_params:
|
|
6014
5061
|
body["python_params"] = [v for v in self.python_params]
|
|
6015
|
-
if self.queue:
|
|
6016
|
-
body["queue"] = self.queue.as_dict()
|
|
6017
5062
|
if self.spark_submit_params:
|
|
6018
5063
|
body["spark_submit_params"] = [v for v in self.spark_submit_params]
|
|
6019
5064
|
if self.sql_params:
|
|
@@ -6021,12 +5066,10 @@ class RunNow:
|
|
|
6021
5066
|
return body
|
|
6022
5067
|
|
|
6023
5068
|
def as_shallow_dict(self) -> dict:
|
|
6024
|
-
"""Serializes the
|
|
5069
|
+
"""Serializes the RunJobTask into a shallow dictionary of its immediate attributes."""
|
|
6025
5070
|
body = {}
|
|
6026
5071
|
if self.dbt_commands:
|
|
6027
5072
|
body["dbt_commands"] = self.dbt_commands
|
|
6028
|
-
if self.idempotency_token is not None:
|
|
6029
|
-
body["idempotency_token"] = self.idempotency_token
|
|
6030
5073
|
if self.jar_params:
|
|
6031
5074
|
body["jar_params"] = self.jar_params
|
|
6032
5075
|
if self.job_id is not None:
|
|
@@ -6035,18 +5078,12 @@ class RunNow:
|
|
|
6035
5078
|
body["job_parameters"] = self.job_parameters
|
|
6036
5079
|
if self.notebook_params:
|
|
6037
5080
|
body["notebook_params"] = self.notebook_params
|
|
6038
|
-
if self.only:
|
|
6039
|
-
body["only"] = self.only
|
|
6040
|
-
if self.performance_target is not None:
|
|
6041
|
-
body["performance_target"] = self.performance_target
|
|
6042
5081
|
if self.pipeline_params:
|
|
6043
5082
|
body["pipeline_params"] = self.pipeline_params
|
|
6044
5083
|
if self.python_named_params:
|
|
6045
5084
|
body["python_named_params"] = self.python_named_params
|
|
6046
5085
|
if self.python_params:
|
|
6047
5086
|
body["python_params"] = self.python_params
|
|
6048
|
-
if self.queue:
|
|
6049
|
-
body["queue"] = self.queue
|
|
6050
5087
|
if self.spark_submit_params:
|
|
6051
5088
|
body["spark_submit_params"] = self.spark_submit_params
|
|
6052
5089
|
if self.sql_params:
|
|
@@ -6054,26 +5091,58 @@ class RunNow:
|
|
|
6054
5091
|
return body
|
|
6055
5092
|
|
|
6056
5093
|
@classmethod
|
|
6057
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
6058
|
-
"""Deserializes the
|
|
5094
|
+
def from_dict(cls, d: Dict[str, Any]) -> RunJobTask:
|
|
5095
|
+
"""Deserializes the RunJobTask from a dictionary."""
|
|
6059
5096
|
return cls(
|
|
6060
5097
|
dbt_commands=d.get("dbt_commands", None),
|
|
6061
|
-
idempotency_token=d.get("idempotency_token", None),
|
|
6062
5098
|
jar_params=d.get("jar_params", None),
|
|
6063
5099
|
job_id=d.get("job_id", None),
|
|
6064
5100
|
job_parameters=d.get("job_parameters", None),
|
|
6065
5101
|
notebook_params=d.get("notebook_params", None),
|
|
6066
|
-
only=d.get("only", None),
|
|
6067
|
-
performance_target=_enum(d, "performance_target", PerformanceTarget),
|
|
6068
5102
|
pipeline_params=_from_dict(d, "pipeline_params", PipelineParams),
|
|
6069
5103
|
python_named_params=d.get("python_named_params", None),
|
|
6070
5104
|
python_params=d.get("python_params", None),
|
|
6071
|
-
queue=_from_dict(d, "queue", QueueSettings),
|
|
6072
5105
|
spark_submit_params=d.get("spark_submit_params", None),
|
|
6073
5106
|
sql_params=d.get("sql_params", None),
|
|
6074
5107
|
)
|
|
6075
5108
|
|
|
6076
5109
|
|
|
5110
|
+
class RunLifeCycleState(Enum):
|
|
5111
|
+
"""A value indicating the run's lifecycle state. The possible values are: * `QUEUED`: The run is
|
|
5112
|
+
queued. * `PENDING`: The run is waiting to be executed while the cluster and execution context
|
|
5113
|
+
are being prepared. * `RUNNING`: The task of this run is being executed. * `TERMINATING`: The
|
|
5114
|
+
task of this run has completed, and the cluster and execution context are being cleaned up. *
|
|
5115
|
+
`TERMINATED`: The task of this run has completed, and the cluster and execution context have
|
|
5116
|
+
been cleaned up. This state is terminal. * `SKIPPED`: This run was aborted because a previous
|
|
5117
|
+
run of the same job was already active. This state is terminal. * `INTERNAL_ERROR`: An
|
|
5118
|
+
exceptional state that indicates a failure in the Jobs service, such as network failure over a
|
|
5119
|
+
long period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the Jobs service
|
|
5120
|
+
terminates the cluster as soon as possible. This state is terminal. * `BLOCKED`: The run is
|
|
5121
|
+
blocked on an upstream dependency. * `WAITING_FOR_RETRY`: The run is waiting for a retry."""
|
|
5122
|
+
|
|
5123
|
+
BLOCKED = "BLOCKED"
|
|
5124
|
+
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
5125
|
+
PENDING = "PENDING"
|
|
5126
|
+
QUEUED = "QUEUED"
|
|
5127
|
+
RUNNING = "RUNNING"
|
|
5128
|
+
SKIPPED = "SKIPPED"
|
|
5129
|
+
TERMINATED = "TERMINATED"
|
|
5130
|
+
TERMINATING = "TERMINATING"
|
|
5131
|
+
WAITING_FOR_RETRY = "WAITING_FOR_RETRY"
|
|
5132
|
+
|
|
5133
|
+
|
|
5134
|
+
class RunLifecycleStateV2State(Enum):
|
|
5135
|
+
"""The current state of the run."""
|
|
5136
|
+
|
|
5137
|
+
BLOCKED = "BLOCKED"
|
|
5138
|
+
PENDING = "PENDING"
|
|
5139
|
+
QUEUED = "QUEUED"
|
|
5140
|
+
RUNNING = "RUNNING"
|
|
5141
|
+
TERMINATED = "TERMINATED"
|
|
5142
|
+
TERMINATING = "TERMINATING"
|
|
5143
|
+
WAITING = "WAITING"
|
|
5144
|
+
|
|
5145
|
+
|
|
6077
5146
|
@dataclass
|
|
6078
5147
|
class RunNowResponse:
|
|
6079
5148
|
"""Run was started successfully."""
|
|
@@ -6482,7 +5551,6 @@ class RunStatus:
|
|
|
6482
5551
|
"""If the run was queued, details about the reason for queuing the run."""
|
|
6483
5552
|
|
|
6484
5553
|
state: Optional[RunLifecycleStateV2State] = None
|
|
6485
|
-
"""The current state of the run."""
|
|
6486
5554
|
|
|
6487
5555
|
termination_details: Optional[TerminationDetails] = None
|
|
6488
5556
|
"""If the run is in a TERMINATING or TERMINATED state, details about the reason for terminating the
|
|
@@ -6719,7 +5787,6 @@ class RunTask:
|
|
|
6719
5787
|
"""Deprecated. Please use the `status` field instead."""
|
|
6720
5788
|
|
|
6721
5789
|
status: Optional[RunStatus] = None
|
|
6722
|
-
"""The current status of the run"""
|
|
6723
5790
|
|
|
6724
5791
|
timeout_seconds: Optional[int] = None
|
|
6725
5792
|
"""An optional timeout applied to each run of this job task. A value of `0` means no timeout."""
|
|
@@ -7154,10 +6221,6 @@ class SparkSubmitTask:
|
|
|
7154
6221
|
@dataclass
|
|
7155
6222
|
class SqlAlertOutput:
|
|
7156
6223
|
alert_state: Optional[SqlAlertState] = None
|
|
7157
|
-
"""The state of the SQL alert.
|
|
7158
|
-
|
|
7159
|
-
* UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not fulfill trigger
|
|
7160
|
-
conditions * TRIGGERED: alert evaluated and fulfilled trigger conditions"""
|
|
7161
6224
|
|
|
7162
6225
|
output_link: Optional[str] = None
|
|
7163
6226
|
"""The link to find the output results."""
|
|
@@ -7764,157 +6827,6 @@ class StorageMode(Enum):
|
|
|
7764
6827
|
IMPORT = "IMPORT"
|
|
7765
6828
|
|
|
7766
6829
|
|
|
7767
|
-
@dataclass
|
|
7768
|
-
class SubmitRun:
|
|
7769
|
-
access_control_list: Optional[List[JobAccessControlRequest]] = None
|
|
7770
|
-
"""List of permissions to set on the job."""
|
|
7771
|
-
|
|
7772
|
-
budget_policy_id: Optional[str] = None
|
|
7773
|
-
"""The user specified id of the budget policy to use for this one-time run. If not specified, the
|
|
7774
|
-
run will be not be attributed to any budget policy."""
|
|
7775
|
-
|
|
7776
|
-
email_notifications: Optional[JobEmailNotifications] = None
|
|
7777
|
-
"""An optional set of email addresses notified when the run begins or completes."""
|
|
7778
|
-
|
|
7779
|
-
environments: Optional[List[JobEnvironment]] = None
|
|
7780
|
-
"""A list of task execution environment specifications that can be referenced by tasks of this run."""
|
|
7781
|
-
|
|
7782
|
-
git_source: Optional[GitSource] = None
|
|
7783
|
-
"""An optional specification for a remote Git repository containing the source code used by tasks.
|
|
7784
|
-
Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.
|
|
7785
|
-
|
|
7786
|
-
If `git_source` is set, these tasks retrieve the file from the remote repository by default.
|
|
7787
|
-
However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.
|
|
7788
|
-
|
|
7789
|
-
Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks
|
|
7790
|
-
are used, `git_source` must be defined on the job."""
|
|
7791
|
-
|
|
7792
|
-
health: Optional[JobsHealthRules] = None
|
|
7793
|
-
"""An optional set of health rules that can be defined for this job."""
|
|
7794
|
-
|
|
7795
|
-
idempotency_token: Optional[str] = None
|
|
7796
|
-
"""An optional token that can be used to guarantee the idempotency of job run requests. If a run
|
|
7797
|
-
with the provided token already exists, the request does not create a new run but returns the ID
|
|
7798
|
-
of the existing run instead. If a run with the provided token is deleted, an error is returned.
|
|
7799
|
-
|
|
7800
|
-
If you specify the idempotency token, upon failure you can retry until the request succeeds.
|
|
7801
|
-
Databricks guarantees that exactly one run is launched with that idempotency token.
|
|
7802
|
-
|
|
7803
|
-
This token must have at most 64 characters.
|
|
7804
|
-
|
|
7805
|
-
For more information, see [How to ensure idempotency for jobs].
|
|
7806
|
-
|
|
7807
|
-
[How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html"""
|
|
7808
|
-
|
|
7809
|
-
notification_settings: Optional[JobNotificationSettings] = None
|
|
7810
|
-
"""Optional notification settings that are used when sending notifications to each of the
|
|
7811
|
-
`email_notifications` and `webhook_notifications` for this run."""
|
|
7812
|
-
|
|
7813
|
-
queue: Optional[QueueSettings] = None
|
|
7814
|
-
"""The queue settings of the one-time run."""
|
|
7815
|
-
|
|
7816
|
-
run_as: Optional[JobRunAs] = None
|
|
7817
|
-
"""Specifies the user or service principal that the job runs as. If not specified, the job runs as
|
|
7818
|
-
the user who submits the request."""
|
|
7819
|
-
|
|
7820
|
-
run_name: Optional[str] = None
|
|
7821
|
-
"""An optional name for the run. The default value is `Untitled`."""
|
|
7822
|
-
|
|
7823
|
-
tasks: Optional[List[SubmitTask]] = None
|
|
7824
|
-
|
|
7825
|
-
timeout_seconds: Optional[int] = None
|
|
7826
|
-
"""An optional timeout applied to each run of this job. A value of `0` means no timeout."""
|
|
7827
|
-
|
|
7828
|
-
webhook_notifications: Optional[WebhookNotifications] = None
|
|
7829
|
-
"""A collection of system notification IDs to notify when the run begins or completes."""
|
|
7830
|
-
|
|
7831
|
-
def as_dict(self) -> dict:
|
|
7832
|
-
"""Serializes the SubmitRun into a dictionary suitable for use as a JSON request body."""
|
|
7833
|
-
body = {}
|
|
7834
|
-
if self.access_control_list:
|
|
7835
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
7836
|
-
if self.budget_policy_id is not None:
|
|
7837
|
-
body["budget_policy_id"] = self.budget_policy_id
|
|
7838
|
-
if self.email_notifications:
|
|
7839
|
-
body["email_notifications"] = self.email_notifications.as_dict()
|
|
7840
|
-
if self.environments:
|
|
7841
|
-
body["environments"] = [v.as_dict() for v in self.environments]
|
|
7842
|
-
if self.git_source:
|
|
7843
|
-
body["git_source"] = self.git_source.as_dict()
|
|
7844
|
-
if self.health:
|
|
7845
|
-
body["health"] = self.health.as_dict()
|
|
7846
|
-
if self.idempotency_token is not None:
|
|
7847
|
-
body["idempotency_token"] = self.idempotency_token
|
|
7848
|
-
if self.notification_settings:
|
|
7849
|
-
body["notification_settings"] = self.notification_settings.as_dict()
|
|
7850
|
-
if self.queue:
|
|
7851
|
-
body["queue"] = self.queue.as_dict()
|
|
7852
|
-
if self.run_as:
|
|
7853
|
-
body["run_as"] = self.run_as.as_dict()
|
|
7854
|
-
if self.run_name is not None:
|
|
7855
|
-
body["run_name"] = self.run_name
|
|
7856
|
-
if self.tasks:
|
|
7857
|
-
body["tasks"] = [v.as_dict() for v in self.tasks]
|
|
7858
|
-
if self.timeout_seconds is not None:
|
|
7859
|
-
body["timeout_seconds"] = self.timeout_seconds
|
|
7860
|
-
if self.webhook_notifications:
|
|
7861
|
-
body["webhook_notifications"] = self.webhook_notifications.as_dict()
|
|
7862
|
-
return body
|
|
7863
|
-
|
|
7864
|
-
def as_shallow_dict(self) -> dict:
|
|
7865
|
-
"""Serializes the SubmitRun into a shallow dictionary of its immediate attributes."""
|
|
7866
|
-
body = {}
|
|
7867
|
-
if self.access_control_list:
|
|
7868
|
-
body["access_control_list"] = self.access_control_list
|
|
7869
|
-
if self.budget_policy_id is not None:
|
|
7870
|
-
body["budget_policy_id"] = self.budget_policy_id
|
|
7871
|
-
if self.email_notifications:
|
|
7872
|
-
body["email_notifications"] = self.email_notifications
|
|
7873
|
-
if self.environments:
|
|
7874
|
-
body["environments"] = self.environments
|
|
7875
|
-
if self.git_source:
|
|
7876
|
-
body["git_source"] = self.git_source
|
|
7877
|
-
if self.health:
|
|
7878
|
-
body["health"] = self.health
|
|
7879
|
-
if self.idempotency_token is not None:
|
|
7880
|
-
body["idempotency_token"] = self.idempotency_token
|
|
7881
|
-
if self.notification_settings:
|
|
7882
|
-
body["notification_settings"] = self.notification_settings
|
|
7883
|
-
if self.queue:
|
|
7884
|
-
body["queue"] = self.queue
|
|
7885
|
-
if self.run_as:
|
|
7886
|
-
body["run_as"] = self.run_as
|
|
7887
|
-
if self.run_name is not None:
|
|
7888
|
-
body["run_name"] = self.run_name
|
|
7889
|
-
if self.tasks:
|
|
7890
|
-
body["tasks"] = self.tasks
|
|
7891
|
-
if self.timeout_seconds is not None:
|
|
7892
|
-
body["timeout_seconds"] = self.timeout_seconds
|
|
7893
|
-
if self.webhook_notifications:
|
|
7894
|
-
body["webhook_notifications"] = self.webhook_notifications
|
|
7895
|
-
return body
|
|
7896
|
-
|
|
7897
|
-
@classmethod
|
|
7898
|
-
def from_dict(cls, d: Dict[str, Any]) -> SubmitRun:
|
|
7899
|
-
"""Deserializes the SubmitRun from a dictionary."""
|
|
7900
|
-
return cls(
|
|
7901
|
-
access_control_list=_repeated_dict(d, "access_control_list", JobAccessControlRequest),
|
|
7902
|
-
budget_policy_id=d.get("budget_policy_id", None),
|
|
7903
|
-
email_notifications=_from_dict(d, "email_notifications", JobEmailNotifications),
|
|
7904
|
-
environments=_repeated_dict(d, "environments", JobEnvironment),
|
|
7905
|
-
git_source=_from_dict(d, "git_source", GitSource),
|
|
7906
|
-
health=_from_dict(d, "health", JobsHealthRules),
|
|
7907
|
-
idempotency_token=d.get("idempotency_token", None),
|
|
7908
|
-
notification_settings=_from_dict(d, "notification_settings", JobNotificationSettings),
|
|
7909
|
-
queue=_from_dict(d, "queue", QueueSettings),
|
|
7910
|
-
run_as=_from_dict(d, "run_as", JobRunAs),
|
|
7911
|
-
run_name=d.get("run_name", None),
|
|
7912
|
-
tasks=_repeated_dict(d, "tasks", SubmitTask),
|
|
7913
|
-
timeout_seconds=d.get("timeout_seconds", None),
|
|
7914
|
-
webhook_notifications=_from_dict(d, "webhook_notifications", WebhookNotifications),
|
|
7915
|
-
)
|
|
7916
|
-
|
|
7917
|
-
|
|
7918
6830
|
@dataclass
|
|
7919
6831
|
class SubmitRunResponse:
|
|
7920
6832
|
"""Run was created and started successfully."""
|
|
@@ -7999,7 +6911,6 @@ class SubmitTask:
|
|
|
7999
6911
|
gen_ai_compute_task: Optional[GenAiComputeTask] = None
|
|
8000
6912
|
|
|
8001
6913
|
health: Optional[JobsHealthRules] = None
|
|
8002
|
-
"""An optional set of health rules that can be defined for this job."""
|
|
8003
6914
|
|
|
8004
6915
|
libraries: Optional[List[compute.Library]] = None
|
|
8005
6916
|
"""An optional list of libraries to be installed on the cluster. The default value is an empty
|
|
@@ -8428,7 +7339,6 @@ class Task:
|
|
|
8428
7339
|
gen_ai_compute_task: Optional[GenAiComputeTask] = None
|
|
8429
7340
|
|
|
8430
7341
|
health: Optional[JobsHealthRules] = None
|
|
8431
|
-
"""An optional set of health rules that can be defined for this job."""
|
|
8432
7342
|
|
|
8433
7343
|
job_cluster_key: Optional[str] = None
|
|
8434
7344
|
"""If job_cluster_key, this task is executed reusing the cluster specified in
|
|
@@ -8940,55 +7850,12 @@ class TerminationCodeCode(Enum):
|
|
|
8940
7850
|
@dataclass
|
|
8941
7851
|
class TerminationDetails:
|
|
8942
7852
|
code: Optional[TerminationCodeCode] = None
|
|
8943
|
-
"""The code indicates why the run was terminated. Additional codes might be introduced in future
|
|
8944
|
-
releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run
|
|
8945
|
-
was completed successfully but some child runs failed. * `USER_CANCELED`: The run was
|
|
8946
|
-
successfully canceled during execution by a user. * `CANCELED`: The run was canceled during
|
|
8947
|
-
execution by the Databricks platform; for example, if the maximum run duration was exceeded. *
|
|
8948
|
-
`SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency
|
|
8949
|
-
type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The
|
|
8950
|
-
run encountered an unexpected error. Refer to the state message for further details. *
|
|
8951
|
-
`DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. *
|
|
8952
|
-
`CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further
|
|
8953
|
-
details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when
|
|
8954
|
-
communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because
|
|
8955
|
-
it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The
|
|
8956
|
-
workspace has reached the quota for the maximum number of concurrent active runs. Consider
|
|
8957
|
-
scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it
|
|
8958
|
-
tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The
|
|
8959
|
-
number of cluster creation, start, and upsize requests have exceeded the allotted rate limit.
|
|
8960
|
-
Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run
|
|
8961
|
-
failed due to an error when accessing the customer blob storage. Refer to the state message for
|
|
8962
|
-
further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more
|
|
8963
|
-
details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to
|
|
8964
|
-
a permission issue while accessing a resource. Refer to the state message for further details. *
|
|
8965
|
-
`LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer
|
|
8966
|
-
to the state message for further details. The causes might include, but are not limited to: The
|
|
8967
|
-
provided library is invalid, there are insufficient permissions to install the library, and so
|
|
8968
|
-
forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum
|
|
8969
|
-
concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a
|
|
8970
|
-
cluster that has already reached the maximum number of contexts it is configured to create. See:
|
|
8971
|
-
[Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to
|
|
8972
|
-
the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an
|
|
8973
|
-
invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The
|
|
8974
|
-
run failed due to a cloud provider issue. Refer to the state message for further details. *
|
|
8975
|
-
`MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
|
|
8976
|
-
limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
|
|
8977
|
-
|
|
8978
|
-
[Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
|
|
8979
7853
|
|
|
8980
7854
|
message: Optional[str] = None
|
|
8981
7855
|
"""A descriptive message with the termination details. This field is unstructured and the format
|
|
8982
7856
|
might change."""
|
|
8983
7857
|
|
|
8984
7858
|
type: Optional[TerminationTypeType] = None
|
|
8985
|
-
"""* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the
|
|
8986
|
-
Databricks platform. Please look at the [status page] or contact support if the issue persists.
|
|
8987
|
-
* `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job
|
|
8988
|
-
configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud
|
|
8989
|
-
provider.
|
|
8990
|
-
|
|
8991
|
-
[status page]: https://status.databricks.com/"""
|
|
8992
7859
|
|
|
8993
7860
|
def as_dict(self) -> dict:
|
|
8994
7861
|
"""Serializes the TerminationDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9154,10 +8021,13 @@ class TriggerType(Enum):
|
|
|
9154
8021
|
through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
|
|
9155
8022
|
previously failed run. This occurs when you request to re-run the job in case of failures. *
|
|
9156
8023
|
`RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
|
|
9157
|
-
Indicates a run that is triggered by a file arrival. * `
|
|
9158
|
-
triggered by a
|
|
9159
|
-
restart a continuous job run.
|
|
8024
|
+
Indicates a run that is triggered by a file arrival. * `CONTINUOUS`: Indicates a run that is
|
|
8025
|
+
triggered by a continuous job. * `TABLE`: Indicates a run that is triggered by a table update. *
|
|
8026
|
+
`CONTINUOUS_RESTART`: Indicates a run created by user to manually restart a continuous job run.
|
|
8027
|
+
* `MODEL`: Indicates a run that is triggered by a model update."""
|
|
9160
8028
|
|
|
8029
|
+
CONTINUOUS = "CONTINUOUS"
|
|
8030
|
+
CONTINUOUS_RESTART = "CONTINUOUS_RESTART"
|
|
9161
8031
|
FILE_ARRIVAL = "FILE_ARRIVAL"
|
|
9162
8032
|
ONE_TIME = "ONE_TIME"
|
|
9163
8033
|
PERIODIC = "PERIODIC"
|
|
@@ -9166,59 +8036,6 @@ class TriggerType(Enum):
|
|
|
9166
8036
|
TABLE = "TABLE"
|
|
9167
8037
|
|
|
9168
8038
|
|
|
9169
|
-
@dataclass
|
|
9170
|
-
class UpdateJob:
|
|
9171
|
-
job_id: int
|
|
9172
|
-
"""The canonical identifier of the job to update. This field is required."""
|
|
9173
|
-
|
|
9174
|
-
fields_to_remove: Optional[List[str]] = None
|
|
9175
|
-
"""Remove top-level fields in the job settings. Removing nested fields is not supported, except for
|
|
9176
|
-
tasks and job clusters (`tasks/task_1`). This field is optional."""
|
|
9177
|
-
|
|
9178
|
-
new_settings: Optional[JobSettings] = None
|
|
9179
|
-
"""The new settings for the job.
|
|
9180
|
-
|
|
9181
|
-
Top-level fields specified in `new_settings` are completely replaced, except for arrays which
|
|
9182
|
-
are merged. That is, new and existing entries are completely replaced based on the respective
|
|
9183
|
-
key fields, i.e. `task_key` or `job_cluster_key`, while previous entries are kept.
|
|
9184
|
-
|
|
9185
|
-
Partially updating nested fields is not supported.
|
|
9186
|
-
|
|
9187
|
-
Changes to the field `JobSettings.timeout_seconds` are applied to active runs. Changes to other
|
|
9188
|
-
fields are applied to future runs only."""
|
|
9189
|
-
|
|
9190
|
-
def as_dict(self) -> dict:
|
|
9191
|
-
"""Serializes the UpdateJob into a dictionary suitable for use as a JSON request body."""
|
|
9192
|
-
body = {}
|
|
9193
|
-
if self.fields_to_remove:
|
|
9194
|
-
body["fields_to_remove"] = [v for v in self.fields_to_remove]
|
|
9195
|
-
if self.job_id is not None:
|
|
9196
|
-
body["job_id"] = self.job_id
|
|
9197
|
-
if self.new_settings:
|
|
9198
|
-
body["new_settings"] = self.new_settings.as_dict()
|
|
9199
|
-
return body
|
|
9200
|
-
|
|
9201
|
-
def as_shallow_dict(self) -> dict:
|
|
9202
|
-
"""Serializes the UpdateJob into a shallow dictionary of its immediate attributes."""
|
|
9203
|
-
body = {}
|
|
9204
|
-
if self.fields_to_remove:
|
|
9205
|
-
body["fields_to_remove"] = self.fields_to_remove
|
|
9206
|
-
if self.job_id is not None:
|
|
9207
|
-
body["job_id"] = self.job_id
|
|
9208
|
-
if self.new_settings:
|
|
9209
|
-
body["new_settings"] = self.new_settings
|
|
9210
|
-
return body
|
|
9211
|
-
|
|
9212
|
-
@classmethod
|
|
9213
|
-
def from_dict(cls, d: Dict[str, Any]) -> UpdateJob:
|
|
9214
|
-
"""Deserializes the UpdateJob from a dictionary."""
|
|
9215
|
-
return cls(
|
|
9216
|
-
fields_to_remove=d.get("fields_to_remove", None),
|
|
9217
|
-
job_id=d.get("job_id", None),
|
|
9218
|
-
new_settings=_from_dict(d, "new_settings", JobSettings),
|
|
9219
|
-
)
|
|
9220
|
-
|
|
9221
|
-
|
|
9222
8039
|
@dataclass
|
|
9223
8040
|
class UpdateResponse:
|
|
9224
8041
|
def as_dict(self) -> dict:
|
|
@@ -9587,7 +8404,6 @@ class JobsAPI:
|
|
|
9587
8404
|
Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
|
|
9588
8405
|
used, `git_source` must be defined on the job.
|
|
9589
8406
|
:param health: :class:`JobsHealthRules` (optional)
|
|
9590
|
-
An optional set of health rules that can be defined for this job.
|
|
9591
8407
|
:param job_clusters: List[:class:`JobCluster`] (optional)
|
|
9592
8408
|
A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
|
|
9593
8409
|
cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
|
|
@@ -9617,10 +8433,9 @@ class JobsAPI:
|
|
|
9617
8433
|
:param queue: :class:`QueueSettings` (optional)
|
|
9618
8434
|
The queue settings of the job.
|
|
9619
8435
|
:param run_as: :class:`JobRunAs` (optional)
|
|
9620
|
-
|
|
9621
|
-
the
|
|
9622
|
-
|
|
9623
|
-
Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown.
|
|
8436
|
+
The user or service principal that the job runs as, if specified in the request. This field
|
|
8437
|
+
indicates the explicit configuration of `run_as` for the job. To find the value in all cases,
|
|
8438
|
+
explicit or implicit, use `run_as_user_name`.
|
|
9624
8439
|
:param schedule: :class:`CronSchedule` (optional)
|
|
9625
8440
|
An optional periodic schedule for this job. The default behavior is that the job only runs when
|
|
9626
8441
|
triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.
|
|
@@ -10515,7 +9330,6 @@ class JobsAPI:
|
|
|
10515
9330
|
Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
|
|
10516
9331
|
used, `git_source` must be defined on the job.
|
|
10517
9332
|
:param health: :class:`JobsHealthRules` (optional)
|
|
10518
|
-
An optional set of health rules that can be defined for this job.
|
|
10519
9333
|
:param idempotency_token: str (optional)
|
|
10520
9334
|
An optional token that can be used to guarantee the idempotency of job run requests. If a run with
|
|
10521
9335
|
the provided token already exists, the request does not create a new run but returns the ID of the
|