databricks-sdk 0.49.0__py3-none-any.whl → 0.51.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +20 -8
- databricks/sdk/config.py +2 -3
- databricks/sdk/credentials_provider.py +61 -15
- databricks/sdk/oidc_token_supplier.py +28 -0
- databricks/sdk/service/apps.py +8 -10
- databricks/sdk/service/billing.py +12 -3
- databricks/sdk/service/catalog.py +73 -4
- databricks/sdk/service/cleanrooms.py +9 -14
- databricks/sdk/service/compute.py +151 -7
- databricks/sdk/service/dashboards.py +253 -42
- databricks/sdk/service/jobs.py +602 -83
- databricks/sdk/service/ml.py +408 -72
- databricks/sdk/service/oauth2.py +8 -13
- databricks/sdk/service/pipelines.py +0 -32
- databricks/sdk/service/serving.py +26 -26
- databricks/sdk/service/settings.py +670 -113
- databricks/sdk/service/sql.py +881 -6
- databricks/sdk/service/vectorsearch.py +355 -159
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/METADATA +11 -11
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/RECORD +25 -24
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/top_level.txt +0 -0
databricks/sdk/service/jobs.py
CHANGED
|
@@ -21,6 +21,12 @@ from databricks.sdk.service import compute
|
|
|
21
21
|
# all definitions in this file are in alphabetical order
|
|
22
22
|
|
|
23
23
|
|
|
24
|
+
class AuthenticationMethod(Enum):
|
|
25
|
+
|
|
26
|
+
OAUTH = "OAUTH"
|
|
27
|
+
PAT = "PAT"
|
|
28
|
+
|
|
29
|
+
|
|
24
30
|
@dataclass
|
|
25
31
|
class BaseJob:
|
|
26
32
|
created_time: Optional[int] = None
|
|
@@ -37,9 +43,9 @@ class BaseJob:
|
|
|
37
43
|
on accessible budget policies of the run_as identity on job creation or modification."""
|
|
38
44
|
|
|
39
45
|
has_more: Optional[bool] = None
|
|
40
|
-
"""Indicates if the job has more
|
|
41
|
-
can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
|
|
42
|
-
requests with `expand_tasks=true`."""
|
|
46
|
+
"""Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
|
|
47
|
+
They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
|
|
48
|
+
:method:jobs/list requests with `expand_tasks=true`."""
|
|
43
49
|
|
|
44
50
|
job_id: Optional[int] = None
|
|
45
51
|
"""The canonical identifier for this job."""
|
|
@@ -125,9 +131,13 @@ class BaseRun:
|
|
|
125
131
|
"""Description of the run"""
|
|
126
132
|
|
|
127
133
|
effective_performance_target: Optional[PerformanceTarget] = None
|
|
128
|
-
"""
|
|
129
|
-
|
|
130
|
-
the job
|
|
134
|
+
"""The actual performance target used by the serverless run during execution. This can differ from
|
|
135
|
+
the client-set performance target on the request depending on whether the performance mode is
|
|
136
|
+
supported by the job type.
|
|
137
|
+
|
|
138
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
139
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
140
|
+
optimized cluster performance."""
|
|
131
141
|
|
|
132
142
|
end_time: Optional[int] = None
|
|
133
143
|
"""The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
|
|
@@ -151,8 +161,8 @@ class BaseRun:
|
|
|
151
161
|
are used, `git_source` must be defined on the job."""
|
|
152
162
|
|
|
153
163
|
has_more: Optional[bool] = None
|
|
154
|
-
"""Indicates if the run has more
|
|
155
|
-
can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
|
|
164
|
+
"""Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
|
|
165
|
+
They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
|
|
156
166
|
:method:jobs/listruns requests with `expand_tasks=true`."""
|
|
157
167
|
|
|
158
168
|
job_clusters: Optional[List[JobCluster]] = None
|
|
@@ -566,10 +576,11 @@ class CleanRoomTaskRunState:
|
|
|
566
576
|
|
|
567
577
|
life_cycle_state: Optional[CleanRoomTaskRunLifeCycleState] = None
|
|
568
578
|
"""A value indicating the run's current lifecycle state. This field is always available in the
|
|
569
|
-
response."""
|
|
579
|
+
response. Note: Additional states might be introduced in future releases."""
|
|
570
580
|
|
|
571
581
|
result_state: Optional[CleanRoomTaskRunResultState] = None
|
|
572
|
-
"""A value indicating the run's result. This field is only available for terminal lifecycle states.
|
|
582
|
+
"""A value indicating the run's result. This field is only available for terminal lifecycle states.
|
|
583
|
+
Note: Additional states might be introduced in future releases."""
|
|
573
584
|
|
|
574
585
|
def as_dict(self) -> dict:
|
|
575
586
|
"""Serializes the CleanRoomTaskRunState into a dictionary suitable for use as a JSON request body."""
|
|
@@ -793,8 +804,6 @@ class ClusterSpec:
|
|
|
793
804
|
|
|
794
805
|
@dataclass
|
|
795
806
|
class ComputeConfig:
|
|
796
|
-
"""Next field: 4"""
|
|
797
|
-
|
|
798
807
|
num_gpus: int
|
|
799
808
|
"""Number of GPUs."""
|
|
800
809
|
|
|
@@ -992,8 +1001,7 @@ class CreateJob:
|
|
|
992
1001
|
job_clusters: Optional[List[JobCluster]] = None
|
|
993
1002
|
"""A list of job cluster specifications that can be shared and reused by tasks of this job.
|
|
994
1003
|
Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
|
|
995
|
-
task settings.
|
|
996
|
-
:method:jobs/get."""
|
|
1004
|
+
task settings."""
|
|
997
1005
|
|
|
998
1006
|
max_concurrent_runs: Optional[int] = None
|
|
999
1007
|
"""An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
|
|
@@ -1016,8 +1024,12 @@ class CreateJob:
|
|
|
1016
1024
|
"""Job-level parameter definitions"""
|
|
1017
1025
|
|
|
1018
1026
|
performance_target: Optional[PerformanceTarget] = None
|
|
1019
|
-
"""
|
|
1020
|
-
|
|
1027
|
+
"""The performance mode on a serverless job. This field determines the level of compute performance
|
|
1028
|
+
or cost-efficiency for the run.
|
|
1029
|
+
|
|
1030
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
1031
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
1032
|
+
optimized cluster performance."""
|
|
1021
1033
|
|
|
1022
1034
|
queue: Optional[QueueSettings] = None
|
|
1023
1035
|
"""The queue settings of the job."""
|
|
@@ -1038,9 +1050,11 @@ class CreateJob:
|
|
|
1038
1050
|
be added to the job."""
|
|
1039
1051
|
|
|
1040
1052
|
tasks: Optional[List[Task]] = None
|
|
1041
|
-
"""A list of task specifications to be executed by this job.
|
|
1042
|
-
|
|
1043
|
-
|
|
1053
|
+
"""A list of task specifications to be executed by this job. It supports up to 1000 elements in
|
|
1054
|
+
write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
|
|
1055
|
+
:method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
|
|
1056
|
+
available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
|
|
1057
|
+
at the object root to determine if more results are available."""
|
|
1044
1058
|
|
|
1045
1059
|
timeout_seconds: Optional[int] = None
|
|
1046
1060
|
"""An optional timeout applied to each run of this job. A value of `0` means no timeout."""
|
|
@@ -1271,6 +1285,110 @@ class CronSchedule:
|
|
|
1271
1285
|
)
|
|
1272
1286
|
|
|
1273
1287
|
|
|
1288
|
+
@dataclass
|
|
1289
|
+
class DashboardPageSnapshot:
|
|
1290
|
+
page_display_name: Optional[str] = None
|
|
1291
|
+
|
|
1292
|
+
widget_error_details: Optional[List[WidgetErrorDetail]] = None
|
|
1293
|
+
|
|
1294
|
+
def as_dict(self) -> dict:
|
|
1295
|
+
"""Serializes the DashboardPageSnapshot into a dictionary suitable for use as a JSON request body."""
|
|
1296
|
+
body = {}
|
|
1297
|
+
if self.page_display_name is not None:
|
|
1298
|
+
body["page_display_name"] = self.page_display_name
|
|
1299
|
+
if self.widget_error_details:
|
|
1300
|
+
body["widget_error_details"] = [v.as_dict() for v in self.widget_error_details]
|
|
1301
|
+
return body
|
|
1302
|
+
|
|
1303
|
+
def as_shallow_dict(self) -> dict:
|
|
1304
|
+
"""Serializes the DashboardPageSnapshot into a shallow dictionary of its immediate attributes."""
|
|
1305
|
+
body = {}
|
|
1306
|
+
if self.page_display_name is not None:
|
|
1307
|
+
body["page_display_name"] = self.page_display_name
|
|
1308
|
+
if self.widget_error_details:
|
|
1309
|
+
body["widget_error_details"] = self.widget_error_details
|
|
1310
|
+
return body
|
|
1311
|
+
|
|
1312
|
+
@classmethod
|
|
1313
|
+
def from_dict(cls, d: Dict[str, Any]) -> DashboardPageSnapshot:
|
|
1314
|
+
"""Deserializes the DashboardPageSnapshot from a dictionary."""
|
|
1315
|
+
return cls(
|
|
1316
|
+
page_display_name=d.get("page_display_name", None),
|
|
1317
|
+
widget_error_details=_repeated_dict(d, "widget_error_details", WidgetErrorDetail),
|
|
1318
|
+
)
|
|
1319
|
+
|
|
1320
|
+
|
|
1321
|
+
@dataclass
|
|
1322
|
+
class DashboardTask:
|
|
1323
|
+
"""Configures the Lakeview Dashboard job task type."""
|
|
1324
|
+
|
|
1325
|
+
dashboard_id: Optional[str] = None
|
|
1326
|
+
"""The identifier of the dashboard to refresh."""
|
|
1327
|
+
|
|
1328
|
+
subscription: Optional[Subscription] = None
|
|
1329
|
+
"""Optional: subscription configuration for sending the dashboard snapshot."""
|
|
1330
|
+
|
|
1331
|
+
warehouse_id: Optional[str] = None
|
|
1332
|
+
"""Optional: The warehouse id to execute the dashboard with for the schedule. If not specified, the
|
|
1333
|
+
default warehouse of the dashboard will be used."""
|
|
1334
|
+
|
|
1335
|
+
def as_dict(self) -> dict:
|
|
1336
|
+
"""Serializes the DashboardTask into a dictionary suitable for use as a JSON request body."""
|
|
1337
|
+
body = {}
|
|
1338
|
+
if self.dashboard_id is not None:
|
|
1339
|
+
body["dashboard_id"] = self.dashboard_id
|
|
1340
|
+
if self.subscription:
|
|
1341
|
+
body["subscription"] = self.subscription.as_dict()
|
|
1342
|
+
if self.warehouse_id is not None:
|
|
1343
|
+
body["warehouse_id"] = self.warehouse_id
|
|
1344
|
+
return body
|
|
1345
|
+
|
|
1346
|
+
def as_shallow_dict(self) -> dict:
|
|
1347
|
+
"""Serializes the DashboardTask into a shallow dictionary of its immediate attributes."""
|
|
1348
|
+
body = {}
|
|
1349
|
+
if self.dashboard_id is not None:
|
|
1350
|
+
body["dashboard_id"] = self.dashboard_id
|
|
1351
|
+
if self.subscription:
|
|
1352
|
+
body["subscription"] = self.subscription
|
|
1353
|
+
if self.warehouse_id is not None:
|
|
1354
|
+
body["warehouse_id"] = self.warehouse_id
|
|
1355
|
+
return body
|
|
1356
|
+
|
|
1357
|
+
@classmethod
|
|
1358
|
+
def from_dict(cls, d: Dict[str, Any]) -> DashboardTask:
|
|
1359
|
+
"""Deserializes the DashboardTask from a dictionary."""
|
|
1360
|
+
return cls(
|
|
1361
|
+
dashboard_id=d.get("dashboard_id", None),
|
|
1362
|
+
subscription=_from_dict(d, "subscription", Subscription),
|
|
1363
|
+
warehouse_id=d.get("warehouse_id", None),
|
|
1364
|
+
)
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
@dataclass
|
|
1368
|
+
class DashboardTaskOutput:
|
|
1369
|
+
page_snapshots: Optional[List[DashboardPageSnapshot]] = None
|
|
1370
|
+
"""Should only be populated for manual PDF download jobs."""
|
|
1371
|
+
|
|
1372
|
+
def as_dict(self) -> dict:
|
|
1373
|
+
"""Serializes the DashboardTaskOutput into a dictionary suitable for use as a JSON request body."""
|
|
1374
|
+
body = {}
|
|
1375
|
+
if self.page_snapshots:
|
|
1376
|
+
body["page_snapshots"] = [v.as_dict() for v in self.page_snapshots]
|
|
1377
|
+
return body
|
|
1378
|
+
|
|
1379
|
+
def as_shallow_dict(self) -> dict:
|
|
1380
|
+
"""Serializes the DashboardTaskOutput into a shallow dictionary of its immediate attributes."""
|
|
1381
|
+
body = {}
|
|
1382
|
+
if self.page_snapshots:
|
|
1383
|
+
body["page_snapshots"] = self.page_snapshots
|
|
1384
|
+
return body
|
|
1385
|
+
|
|
1386
|
+
@classmethod
|
|
1387
|
+
def from_dict(cls, d: Dict[str, Any]) -> DashboardTaskOutput:
|
|
1388
|
+
"""Deserializes the DashboardTaskOutput from a dictionary."""
|
|
1389
|
+
return cls(page_snapshots=_repeated_dict(d, "page_snapshots", DashboardPageSnapshot))
|
|
1390
|
+
|
|
1391
|
+
|
|
1274
1392
|
@dataclass
|
|
1275
1393
|
class DbtOutput:
|
|
1276
1394
|
artifacts_headers: Optional[Dict[str, str]] = None
|
|
@@ -1881,8 +1999,6 @@ class Format(Enum):
|
|
|
1881
1999
|
|
|
1882
2000
|
@dataclass
|
|
1883
2001
|
class GenAiComputeTask:
|
|
1884
|
-
"""Next field: 9"""
|
|
1885
|
-
|
|
1886
2002
|
dl_runtime_image: str
|
|
1887
2003
|
"""Runtime image"""
|
|
1888
2004
|
|
|
@@ -1890,7 +2006,6 @@ class GenAiComputeTask:
|
|
|
1890
2006
|
"""Command launcher to run the actual script, e.g. bash, python etc."""
|
|
1891
2007
|
|
|
1892
2008
|
compute: Optional[ComputeConfig] = None
|
|
1893
|
-
"""Next field: 4"""
|
|
1894
2009
|
|
|
1895
2010
|
mlflow_experiment_name: Optional[str] = None
|
|
1896
2011
|
"""Optional string containing the name of the MLflow experiment to log the run to. If name is not
|
|
@@ -2183,15 +2298,15 @@ class Job:
|
|
|
2183
2298
|
on accessible budget policies of the run_as identity on job creation or modification."""
|
|
2184
2299
|
|
|
2185
2300
|
has_more: Optional[bool] = None
|
|
2186
|
-
"""Indicates if the job has more
|
|
2187
|
-
can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
|
|
2188
|
-
requests with `expand_tasks=true`."""
|
|
2301
|
+
"""Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
|
|
2302
|
+
They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
|
|
2303
|
+
:method:jobs/list requests with `expand_tasks=true`."""
|
|
2189
2304
|
|
|
2190
2305
|
job_id: Optional[int] = None
|
|
2191
2306
|
"""The canonical identifier for this job."""
|
|
2192
2307
|
|
|
2193
2308
|
next_page_token: Optional[str] = None
|
|
2194
|
-
"""A token that can be used to list the next page of
|
|
2309
|
+
"""A token that can be used to list the next page of array properties."""
|
|
2195
2310
|
|
|
2196
2311
|
run_as_user_name: Optional[str] = None
|
|
2197
2312
|
"""The email of an active workspace user or the application ID of a service principal that the job
|
|
@@ -2592,8 +2707,11 @@ class JobEnvironment:
|
|
|
2592
2707
|
"""The key of an environment. It has to be unique within a job."""
|
|
2593
2708
|
|
|
2594
2709
|
spec: Optional[compute.Environment] = None
|
|
2595
|
-
"""The environment entity used to preserve serverless environment side panel
|
|
2596
|
-
|
|
2710
|
+
"""The environment entity used to preserve serverless environment side panel, jobs' environment for
|
|
2711
|
+
non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a
|
|
2712
|
+
copied version of the Environment proto below, at
|
|
2713
|
+
//spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal
|
|
2714
|
+
environment spec, only pip dependencies are supported."""
|
|
2597
2715
|
|
|
2598
2716
|
def as_dict(self) -> dict:
|
|
2599
2717
|
"""Serializes the JobEnvironment into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2977,8 +3095,7 @@ class JobSettings:
|
|
|
2977
3095
|
job_clusters: Optional[List[JobCluster]] = None
|
|
2978
3096
|
"""A list of job cluster specifications that can be shared and reused by tasks of this job.
|
|
2979
3097
|
Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
|
|
2980
|
-
task settings.
|
|
2981
|
-
:method:jobs/get."""
|
|
3098
|
+
task settings."""
|
|
2982
3099
|
|
|
2983
3100
|
max_concurrent_runs: Optional[int] = None
|
|
2984
3101
|
"""An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
|
|
@@ -3001,8 +3118,12 @@ class JobSettings:
|
|
|
3001
3118
|
"""Job-level parameter definitions"""
|
|
3002
3119
|
|
|
3003
3120
|
performance_target: Optional[PerformanceTarget] = None
|
|
3004
|
-
"""
|
|
3005
|
-
|
|
3121
|
+
"""The performance mode on a serverless job. This field determines the level of compute performance
|
|
3122
|
+
or cost-efficiency for the run.
|
|
3123
|
+
|
|
3124
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
3125
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
3126
|
+
optimized cluster performance."""
|
|
3006
3127
|
|
|
3007
3128
|
queue: Optional[QueueSettings] = None
|
|
3008
3129
|
"""The queue settings of the job."""
|
|
@@ -3023,9 +3144,11 @@ class JobSettings:
|
|
|
3023
3144
|
be added to the job."""
|
|
3024
3145
|
|
|
3025
3146
|
tasks: Optional[List[Task]] = None
|
|
3026
|
-
"""A list of task specifications to be executed by this job.
|
|
3027
|
-
|
|
3028
|
-
|
|
3147
|
+
"""A list of task specifications to be executed by this job. It supports up to 1000 elements in
|
|
3148
|
+
write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
|
|
3149
|
+
:method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
|
|
3150
|
+
available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
|
|
3151
|
+
at the object root to determine if more results are available."""
|
|
3029
3152
|
|
|
3030
3153
|
timeout_seconds: Optional[int] = None
|
|
3031
3154
|
"""An optional timeout applied to each run of this job. A value of `0` means no timeout."""
|
|
@@ -3659,9 +3782,8 @@ class PerformanceTarget(Enum):
|
|
|
3659
3782
|
on serverless compute should be. The performance mode on the job or pipeline should map to a
|
|
3660
3783
|
performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget)."""
|
|
3661
3784
|
|
|
3662
|
-
BALANCED = "BALANCED"
|
|
3663
|
-
COST_OPTIMIZED = "COST_OPTIMIZED"
|
|
3664
3785
|
PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED"
|
|
3786
|
+
STANDARD = "STANDARD"
|
|
3665
3787
|
|
|
3666
3788
|
|
|
3667
3789
|
@dataclass
|
|
@@ -3760,6 +3882,175 @@ class PipelineTask:
|
|
|
3760
3882
|
return cls(full_refresh=d.get("full_refresh", None), pipeline_id=d.get("pipeline_id", None))
|
|
3761
3883
|
|
|
3762
3884
|
|
|
3885
|
+
@dataclass
|
|
3886
|
+
class PowerBiModel:
|
|
3887
|
+
authentication_method: Optional[AuthenticationMethod] = None
|
|
3888
|
+
"""How the published Power BI model authenticates to Databricks"""
|
|
3889
|
+
|
|
3890
|
+
model_name: Optional[str] = None
|
|
3891
|
+
"""The name of the Power BI model"""
|
|
3892
|
+
|
|
3893
|
+
overwrite_existing: Optional[bool] = None
|
|
3894
|
+
"""Whether to overwrite existing Power BI models"""
|
|
3895
|
+
|
|
3896
|
+
storage_mode: Optional[StorageMode] = None
|
|
3897
|
+
"""The default storage mode of the Power BI model"""
|
|
3898
|
+
|
|
3899
|
+
workspace_name: Optional[str] = None
|
|
3900
|
+
"""The name of the Power BI workspace of the model"""
|
|
3901
|
+
|
|
3902
|
+
def as_dict(self) -> dict:
|
|
3903
|
+
"""Serializes the PowerBiModel into a dictionary suitable for use as a JSON request body."""
|
|
3904
|
+
body = {}
|
|
3905
|
+
if self.authentication_method is not None:
|
|
3906
|
+
body["authentication_method"] = self.authentication_method.value
|
|
3907
|
+
if self.model_name is not None:
|
|
3908
|
+
body["model_name"] = self.model_name
|
|
3909
|
+
if self.overwrite_existing is not None:
|
|
3910
|
+
body["overwrite_existing"] = self.overwrite_existing
|
|
3911
|
+
if self.storage_mode is not None:
|
|
3912
|
+
body["storage_mode"] = self.storage_mode.value
|
|
3913
|
+
if self.workspace_name is not None:
|
|
3914
|
+
body["workspace_name"] = self.workspace_name
|
|
3915
|
+
return body
|
|
3916
|
+
|
|
3917
|
+
def as_shallow_dict(self) -> dict:
|
|
3918
|
+
"""Serializes the PowerBiModel into a shallow dictionary of its immediate attributes."""
|
|
3919
|
+
body = {}
|
|
3920
|
+
if self.authentication_method is not None:
|
|
3921
|
+
body["authentication_method"] = self.authentication_method
|
|
3922
|
+
if self.model_name is not None:
|
|
3923
|
+
body["model_name"] = self.model_name
|
|
3924
|
+
if self.overwrite_existing is not None:
|
|
3925
|
+
body["overwrite_existing"] = self.overwrite_existing
|
|
3926
|
+
if self.storage_mode is not None:
|
|
3927
|
+
body["storage_mode"] = self.storage_mode
|
|
3928
|
+
if self.workspace_name is not None:
|
|
3929
|
+
body["workspace_name"] = self.workspace_name
|
|
3930
|
+
return body
|
|
3931
|
+
|
|
3932
|
+
@classmethod
|
|
3933
|
+
def from_dict(cls, d: Dict[str, Any]) -> PowerBiModel:
|
|
3934
|
+
"""Deserializes the PowerBiModel from a dictionary."""
|
|
3935
|
+
return cls(
|
|
3936
|
+
authentication_method=_enum(d, "authentication_method", AuthenticationMethod),
|
|
3937
|
+
model_name=d.get("model_name", None),
|
|
3938
|
+
overwrite_existing=d.get("overwrite_existing", None),
|
|
3939
|
+
storage_mode=_enum(d, "storage_mode", StorageMode),
|
|
3940
|
+
workspace_name=d.get("workspace_name", None),
|
|
3941
|
+
)
|
|
3942
|
+
|
|
3943
|
+
|
|
3944
|
+
@dataclass
|
|
3945
|
+
class PowerBiTable:
|
|
3946
|
+
catalog: Optional[str] = None
|
|
3947
|
+
"""The catalog name in Databricks"""
|
|
3948
|
+
|
|
3949
|
+
name: Optional[str] = None
|
|
3950
|
+
"""The table name in Databricks"""
|
|
3951
|
+
|
|
3952
|
+
schema: Optional[str] = None
|
|
3953
|
+
"""The schema name in Databricks"""
|
|
3954
|
+
|
|
3955
|
+
storage_mode: Optional[StorageMode] = None
|
|
3956
|
+
"""The Power BI storage mode of the table"""
|
|
3957
|
+
|
|
3958
|
+
def as_dict(self) -> dict:
|
|
3959
|
+
"""Serializes the PowerBiTable into a dictionary suitable for use as a JSON request body."""
|
|
3960
|
+
body = {}
|
|
3961
|
+
if self.catalog is not None:
|
|
3962
|
+
body["catalog"] = self.catalog
|
|
3963
|
+
if self.name is not None:
|
|
3964
|
+
body["name"] = self.name
|
|
3965
|
+
if self.schema is not None:
|
|
3966
|
+
body["schema"] = self.schema
|
|
3967
|
+
if self.storage_mode is not None:
|
|
3968
|
+
body["storage_mode"] = self.storage_mode.value
|
|
3969
|
+
return body
|
|
3970
|
+
|
|
3971
|
+
def as_shallow_dict(self) -> dict:
|
|
3972
|
+
"""Serializes the PowerBiTable into a shallow dictionary of its immediate attributes."""
|
|
3973
|
+
body = {}
|
|
3974
|
+
if self.catalog is not None:
|
|
3975
|
+
body["catalog"] = self.catalog
|
|
3976
|
+
if self.name is not None:
|
|
3977
|
+
body["name"] = self.name
|
|
3978
|
+
if self.schema is not None:
|
|
3979
|
+
body["schema"] = self.schema
|
|
3980
|
+
if self.storage_mode is not None:
|
|
3981
|
+
body["storage_mode"] = self.storage_mode
|
|
3982
|
+
return body
|
|
3983
|
+
|
|
3984
|
+
@classmethod
|
|
3985
|
+
def from_dict(cls, d: Dict[str, Any]) -> PowerBiTable:
|
|
3986
|
+
"""Deserializes the PowerBiTable from a dictionary."""
|
|
3987
|
+
return cls(
|
|
3988
|
+
catalog=d.get("catalog", None),
|
|
3989
|
+
name=d.get("name", None),
|
|
3990
|
+
schema=d.get("schema", None),
|
|
3991
|
+
storage_mode=_enum(d, "storage_mode", StorageMode),
|
|
3992
|
+
)
|
|
3993
|
+
|
|
3994
|
+
|
|
3995
|
+
@dataclass
|
|
3996
|
+
class PowerBiTask:
|
|
3997
|
+
connection_resource_name: Optional[str] = None
|
|
3998
|
+
"""The resource name of the UC connection to authenticate from Databricks to Power BI"""
|
|
3999
|
+
|
|
4000
|
+
power_bi_model: Optional[PowerBiModel] = None
|
|
4001
|
+
"""The semantic model to update"""
|
|
4002
|
+
|
|
4003
|
+
refresh_after_update: Optional[bool] = None
|
|
4004
|
+
"""Whether the model should be refreshed after the update"""
|
|
4005
|
+
|
|
4006
|
+
tables: Optional[List[PowerBiTable]] = None
|
|
4007
|
+
"""The tables to be exported to Power BI"""
|
|
4008
|
+
|
|
4009
|
+
warehouse_id: Optional[str] = None
|
|
4010
|
+
"""The SQL warehouse ID to use as the Power BI data source"""
|
|
4011
|
+
|
|
4012
|
+
def as_dict(self) -> dict:
|
|
4013
|
+
"""Serializes the PowerBiTask into a dictionary suitable for use as a JSON request body."""
|
|
4014
|
+
body = {}
|
|
4015
|
+
if self.connection_resource_name is not None:
|
|
4016
|
+
body["connection_resource_name"] = self.connection_resource_name
|
|
4017
|
+
if self.power_bi_model:
|
|
4018
|
+
body["power_bi_model"] = self.power_bi_model.as_dict()
|
|
4019
|
+
if self.refresh_after_update is not None:
|
|
4020
|
+
body["refresh_after_update"] = self.refresh_after_update
|
|
4021
|
+
if self.tables:
|
|
4022
|
+
body["tables"] = [v.as_dict() for v in self.tables]
|
|
4023
|
+
if self.warehouse_id is not None:
|
|
4024
|
+
body["warehouse_id"] = self.warehouse_id
|
|
4025
|
+
return body
|
|
4026
|
+
|
|
4027
|
+
def as_shallow_dict(self) -> dict:
|
|
4028
|
+
"""Serializes the PowerBiTask into a shallow dictionary of its immediate attributes."""
|
|
4029
|
+
body = {}
|
|
4030
|
+
if self.connection_resource_name is not None:
|
|
4031
|
+
body["connection_resource_name"] = self.connection_resource_name
|
|
4032
|
+
if self.power_bi_model:
|
|
4033
|
+
body["power_bi_model"] = self.power_bi_model
|
|
4034
|
+
if self.refresh_after_update is not None:
|
|
4035
|
+
body["refresh_after_update"] = self.refresh_after_update
|
|
4036
|
+
if self.tables:
|
|
4037
|
+
body["tables"] = self.tables
|
|
4038
|
+
if self.warehouse_id is not None:
|
|
4039
|
+
body["warehouse_id"] = self.warehouse_id
|
|
4040
|
+
return body
|
|
4041
|
+
|
|
4042
|
+
@classmethod
|
|
4043
|
+
def from_dict(cls, d: Dict[str, Any]) -> PowerBiTask:
|
|
4044
|
+
"""Deserializes the PowerBiTask from a dictionary."""
|
|
4045
|
+
return cls(
|
|
4046
|
+
connection_resource_name=d.get("connection_resource_name", None),
|
|
4047
|
+
power_bi_model=_from_dict(d, "power_bi_model", PowerBiModel),
|
|
4048
|
+
refresh_after_update=d.get("refresh_after_update", None),
|
|
4049
|
+
tables=_repeated_dict(d, "tables", PowerBiTable),
|
|
4050
|
+
warehouse_id=d.get("warehouse_id", None),
|
|
4051
|
+
)
|
|
4052
|
+
|
|
4053
|
+
|
|
3763
4054
|
@dataclass
|
|
3764
4055
|
class PythonWheelTask:
|
|
3765
4056
|
package_name: str
|
|
@@ -3890,6 +4181,15 @@ class QueueSettings:
|
|
|
3890
4181
|
|
|
3891
4182
|
@dataclass
|
|
3892
4183
|
class RepairHistoryItem:
|
|
4184
|
+
effective_performance_target: Optional[PerformanceTarget] = None
|
|
4185
|
+
"""The actual performance target used by the serverless run during execution. This can differ from
|
|
4186
|
+
the client-set performance target on the request depending on whether the performance mode is
|
|
4187
|
+
supported by the job type.
|
|
4188
|
+
|
|
4189
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
4190
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
4191
|
+
optimized cluster performance."""
|
|
4192
|
+
|
|
3893
4193
|
end_time: Optional[int] = None
|
|
3894
4194
|
"""The end time of the (repaired) run."""
|
|
3895
4195
|
|
|
@@ -3914,6 +4214,8 @@ class RepairHistoryItem:
|
|
|
3914
4214
|
def as_dict(self) -> dict:
|
|
3915
4215
|
"""Serializes the RepairHistoryItem into a dictionary suitable for use as a JSON request body."""
|
|
3916
4216
|
body = {}
|
|
4217
|
+
if self.effective_performance_target is not None:
|
|
4218
|
+
body["effective_performance_target"] = self.effective_performance_target.value
|
|
3917
4219
|
if self.end_time is not None:
|
|
3918
4220
|
body["end_time"] = self.end_time
|
|
3919
4221
|
if self.id is not None:
|
|
@@ -3933,6 +4235,8 @@ class RepairHistoryItem:
|
|
|
3933
4235
|
def as_shallow_dict(self) -> dict:
|
|
3934
4236
|
"""Serializes the RepairHistoryItem into a shallow dictionary of its immediate attributes."""
|
|
3935
4237
|
body = {}
|
|
4238
|
+
if self.effective_performance_target is not None:
|
|
4239
|
+
body["effective_performance_target"] = self.effective_performance_target
|
|
3936
4240
|
if self.end_time is not None:
|
|
3937
4241
|
body["end_time"] = self.end_time
|
|
3938
4242
|
if self.id is not None:
|
|
@@ -3953,6 +4257,7 @@ class RepairHistoryItem:
|
|
|
3953
4257
|
def from_dict(cls, d: Dict[str, Any]) -> RepairHistoryItem:
|
|
3954
4258
|
"""Deserializes the RepairHistoryItem from a dictionary."""
|
|
3955
4259
|
return cls(
|
|
4260
|
+
effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget),
|
|
3956
4261
|
end_time=d.get("end_time", None),
|
|
3957
4262
|
id=d.get("id", None),
|
|
3958
4263
|
start_time=d.get("start_time", None),
|
|
@@ -4014,6 +4319,15 @@ class RepairRun:
|
|
|
4014
4319
|
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
|
|
4015
4320
|
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html"""
|
|
4016
4321
|
|
|
4322
|
+
performance_target: Optional[PerformanceTarget] = None
|
|
4323
|
+
"""The performance mode on a serverless job. The performance target determines the level of compute
|
|
4324
|
+
performance or cost-efficiency for the run. This field overrides the performance target defined
|
|
4325
|
+
on the job level.
|
|
4326
|
+
|
|
4327
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
4328
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
4329
|
+
optimized cluster performance."""
|
|
4330
|
+
|
|
4017
4331
|
pipeline_params: Optional[PipelineParams] = None
|
|
4018
4332
|
"""Controls whether the pipeline should perform a full refresh"""
|
|
4019
4333
|
|
|
@@ -4080,6 +4394,8 @@ class RepairRun:
|
|
|
4080
4394
|
body["latest_repair_id"] = self.latest_repair_id
|
|
4081
4395
|
if self.notebook_params:
|
|
4082
4396
|
body["notebook_params"] = self.notebook_params
|
|
4397
|
+
if self.performance_target is not None:
|
|
4398
|
+
body["performance_target"] = self.performance_target.value
|
|
4083
4399
|
if self.pipeline_params:
|
|
4084
4400
|
body["pipeline_params"] = self.pipeline_params.as_dict()
|
|
4085
4401
|
if self.python_named_params:
|
|
@@ -4113,6 +4429,8 @@ class RepairRun:
|
|
|
4113
4429
|
body["latest_repair_id"] = self.latest_repair_id
|
|
4114
4430
|
if self.notebook_params:
|
|
4115
4431
|
body["notebook_params"] = self.notebook_params
|
|
4432
|
+
if self.performance_target is not None:
|
|
4433
|
+
body["performance_target"] = self.performance_target
|
|
4116
4434
|
if self.pipeline_params:
|
|
4117
4435
|
body["pipeline_params"] = self.pipeline_params
|
|
4118
4436
|
if self.python_named_params:
|
|
@@ -4142,6 +4460,7 @@ class RepairRun:
|
|
|
4142
4460
|
job_parameters=d.get("job_parameters", None),
|
|
4143
4461
|
latest_repair_id=d.get("latest_repair_id", None),
|
|
4144
4462
|
notebook_params=d.get("notebook_params", None),
|
|
4463
|
+
performance_target=_enum(d, "performance_target", PerformanceTarget),
|
|
4145
4464
|
pipeline_params=_from_dict(d, "pipeline_params", PipelineParams),
|
|
4146
4465
|
python_named_params=d.get("python_named_params", None),
|
|
4147
4466
|
python_params=d.get("python_params", None),
|
|
@@ -4542,9 +4861,13 @@ class Run:
|
|
|
4542
4861
|
"""Description of the run"""
|
|
4543
4862
|
|
|
4544
4863
|
effective_performance_target: Optional[PerformanceTarget] = None
|
|
4545
|
-
"""
|
|
4546
|
-
|
|
4547
|
-
the job
|
|
4864
|
+
"""The actual performance target used by the serverless run during execution. This can differ from
|
|
4865
|
+
the client-set performance target on the request depending on whether the performance mode is
|
|
4866
|
+
supported by the job type.
|
|
4867
|
+
|
|
4868
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
4869
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
4870
|
+
optimized cluster performance."""
|
|
4548
4871
|
|
|
4549
4872
|
end_time: Optional[int] = None
|
|
4550
4873
|
"""The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
|
|
@@ -4568,8 +4891,8 @@ class Run:
|
|
|
4568
4891
|
are used, `git_source` must be defined on the job."""
|
|
4569
4892
|
|
|
4570
4893
|
has_more: Optional[bool] = None
|
|
4571
|
-
"""Indicates if the run has more
|
|
4572
|
-
can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
|
|
4894
|
+
"""Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
|
|
4895
|
+
They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
|
|
4573
4896
|
:method:jobs/listruns requests with `expand_tasks=true`."""
|
|
4574
4897
|
|
|
4575
4898
|
iterations: Optional[List[RunTask]] = None
|
|
@@ -4593,7 +4916,7 @@ class Run:
|
|
|
4593
4916
|
that the task run belongs to."""
|
|
4594
4917
|
|
|
4595
4918
|
next_page_token: Optional[str] = None
|
|
4596
|
-
"""A token that can be used to list the next page of
|
|
4919
|
+
"""A token that can be used to list the next page of array properties."""
|
|
4597
4920
|
|
|
4598
4921
|
number_in_job: Optional[int] = None
|
|
4599
4922
|
"""A unique identifier for this job run. This is set to the same value as `run_id`."""
|
|
@@ -5266,9 +5589,13 @@ class RunNow:
|
|
|
5266
5589
|
job will be run."""
|
|
5267
5590
|
|
|
5268
5591
|
performance_target: Optional[PerformanceTarget] = None
|
|
5269
|
-
"""
|
|
5270
|
-
|
|
5271
|
-
job
|
|
5592
|
+
"""The performance mode on a serverless job. The performance target determines the level of compute
|
|
5593
|
+
performance or cost-efficiency for the run. This field overrides the performance target defined
|
|
5594
|
+
on the job level.
|
|
5595
|
+
|
|
5596
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
5597
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
5598
|
+
optimized cluster performance."""
|
|
5272
5599
|
|
|
5273
5600
|
pipeline_params: Optional[PipelineParams] = None
|
|
5274
5601
|
"""Controls whether the pipeline should perform a full refresh"""
|
|
@@ -5443,6 +5770,9 @@ class RunOutput:
|
|
|
5443
5770
|
clean_rooms_notebook_output: Optional[CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput] = None
|
|
5444
5771
|
"""The output of a clean rooms notebook task, if available"""
|
|
5445
5772
|
|
|
5773
|
+
dashboard_output: Optional[DashboardTaskOutput] = None
|
|
5774
|
+
"""The output of a dashboard task, if available"""
|
|
5775
|
+
|
|
5446
5776
|
dbt_output: Optional[DbtOutput] = None
|
|
5447
5777
|
"""The output of a dbt task, if available."""
|
|
5448
5778
|
|
|
@@ -5489,6 +5819,8 @@ class RunOutput:
|
|
|
5489
5819
|
body = {}
|
|
5490
5820
|
if self.clean_rooms_notebook_output:
|
|
5491
5821
|
body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output.as_dict()
|
|
5822
|
+
if self.dashboard_output:
|
|
5823
|
+
body["dashboard_output"] = self.dashboard_output.as_dict()
|
|
5492
5824
|
if self.dbt_output:
|
|
5493
5825
|
body["dbt_output"] = self.dbt_output.as_dict()
|
|
5494
5826
|
if self.error is not None:
|
|
@@ -5516,6 +5848,8 @@ class RunOutput:
|
|
|
5516
5848
|
body = {}
|
|
5517
5849
|
if self.clean_rooms_notebook_output:
|
|
5518
5850
|
body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output
|
|
5851
|
+
if self.dashboard_output:
|
|
5852
|
+
body["dashboard_output"] = self.dashboard_output
|
|
5519
5853
|
if self.dbt_output:
|
|
5520
5854
|
body["dbt_output"] = self.dbt_output
|
|
5521
5855
|
if self.error is not None:
|
|
@@ -5545,6 +5879,7 @@ class RunOutput:
|
|
|
5545
5879
|
clean_rooms_notebook_output=_from_dict(
|
|
5546
5880
|
d, "clean_rooms_notebook_output", CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput
|
|
5547
5881
|
),
|
|
5882
|
+
dashboard_output=_from_dict(d, "dashboard_output", DashboardTaskOutput),
|
|
5548
5883
|
dbt_output=_from_dict(d, "dbt_output", DbtOutput),
|
|
5549
5884
|
error=d.get("error", None),
|
|
5550
5885
|
error_trace=d.get("error_trace", None),
|
|
@@ -5720,13 +6055,14 @@ class RunState:
|
|
|
5720
6055
|
|
|
5721
6056
|
life_cycle_state: Optional[RunLifeCycleState] = None
|
|
5722
6057
|
"""A value indicating the run's current lifecycle state. This field is always available in the
|
|
5723
|
-
response."""
|
|
6058
|
+
response. Note: Additional states might be introduced in future releases."""
|
|
5724
6059
|
|
|
5725
6060
|
queue_reason: Optional[str] = None
|
|
5726
6061
|
"""The reason indicating why the run was queued."""
|
|
5727
6062
|
|
|
5728
6063
|
result_state: Optional[RunResultState] = None
|
|
5729
|
-
"""A value indicating the run's result. This field is only available for terminal lifecycle states.
|
|
6064
|
+
"""A value indicating the run's result. This field is only available for terminal lifecycle states.
|
|
6065
|
+
Note: Additional states might be introduced in future releases."""
|
|
5730
6066
|
|
|
5731
6067
|
state_message: Optional[str] = None
|
|
5732
6068
|
"""A descriptive message for the current state. This field is unstructured, and its exact format is
|
|
@@ -5860,6 +6196,9 @@ class RunTask:
|
|
|
5860
6196
|
`condition_task` field is present. The condition task does not require a cluster to execute and
|
|
5861
6197
|
does not support retries or notifications."""
|
|
5862
6198
|
|
|
6199
|
+
dashboard_task: Optional[DashboardTask] = None
|
|
6200
|
+
"""The task refreshes a dashboard and sends a snapshot to subscribers."""
|
|
6201
|
+
|
|
5863
6202
|
dbt_task: Optional[DbtTask] = None
|
|
5864
6203
|
"""The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
|
|
5865
6204
|
requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
|
|
@@ -5873,13 +6212,16 @@ class RunTask:
|
|
|
5873
6212
|
"""An optional description for this task."""
|
|
5874
6213
|
|
|
5875
6214
|
disabled: Optional[bool] = None
|
|
5876
|
-
"""
|
|
5877
|
-
immediately skipped as soon as they are unblocked."""
|
|
6215
|
+
"""Deprecated, field was never used in production."""
|
|
5878
6216
|
|
|
5879
6217
|
effective_performance_target: Optional[PerformanceTarget] = None
|
|
5880
|
-
"""
|
|
5881
|
-
|
|
5882
|
-
the job
|
|
6218
|
+
"""The actual performance target used by the serverless run during execution. This can differ from
|
|
6219
|
+
the client-set performance target on the request depending on whether the performance mode is
|
|
6220
|
+
supported by the job type.
|
|
6221
|
+
|
|
6222
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
|
|
6223
|
+
`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
|
|
6224
|
+
optimized cluster performance."""
|
|
5883
6225
|
|
|
5884
6226
|
email_notifications: Optional[JobEmailNotifications] = None
|
|
5885
6227
|
"""An optional set of email addresses notified when the task run begins or completes. The default
|
|
@@ -5910,7 +6252,6 @@ class RunTask:
|
|
|
5910
6252
|
present."""
|
|
5911
6253
|
|
|
5912
6254
|
gen_ai_compute_task: Optional[GenAiComputeTask] = None
|
|
5913
|
-
"""Next field: 9"""
|
|
5914
6255
|
|
|
5915
6256
|
git_source: Optional[GitSource] = None
|
|
5916
6257
|
"""An optional specification for a remote Git repository containing the source code used by tasks.
|
|
@@ -5942,6 +6283,9 @@ class RunTask:
|
|
|
5942
6283
|
"""The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
|
|
5943
6284
|
configured to use triggered more are supported."""
|
|
5944
6285
|
|
|
6286
|
+
power_bi_task: Optional[PowerBiTask] = None
|
|
6287
|
+
"""The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
|
|
6288
|
+
|
|
5945
6289
|
python_wheel_task: Optional[PythonWheelTask] = None
|
|
5946
6290
|
"""The task runs a Python wheel when the `python_wheel_task` field is present."""
|
|
5947
6291
|
|
|
@@ -6033,6 +6377,8 @@ class RunTask:
|
|
|
6033
6377
|
body["cluster_instance"] = self.cluster_instance.as_dict()
|
|
6034
6378
|
if self.condition_task:
|
|
6035
6379
|
body["condition_task"] = self.condition_task.as_dict()
|
|
6380
|
+
if self.dashboard_task:
|
|
6381
|
+
body["dashboard_task"] = self.dashboard_task.as_dict()
|
|
6036
6382
|
if self.dbt_task:
|
|
6037
6383
|
body["dbt_task"] = self.dbt_task.as_dict()
|
|
6038
6384
|
if self.depends_on:
|
|
@@ -6071,6 +6417,8 @@ class RunTask:
|
|
|
6071
6417
|
body["notification_settings"] = self.notification_settings.as_dict()
|
|
6072
6418
|
if self.pipeline_task:
|
|
6073
6419
|
body["pipeline_task"] = self.pipeline_task.as_dict()
|
|
6420
|
+
if self.power_bi_task:
|
|
6421
|
+
body["power_bi_task"] = self.power_bi_task.as_dict()
|
|
6074
6422
|
if self.python_wheel_task:
|
|
6075
6423
|
body["python_wheel_task"] = self.python_wheel_task.as_dict()
|
|
6076
6424
|
if self.queue_duration is not None:
|
|
@@ -6124,6 +6472,8 @@ class RunTask:
|
|
|
6124
6472
|
body["cluster_instance"] = self.cluster_instance
|
|
6125
6473
|
if self.condition_task:
|
|
6126
6474
|
body["condition_task"] = self.condition_task
|
|
6475
|
+
if self.dashboard_task:
|
|
6476
|
+
body["dashboard_task"] = self.dashboard_task
|
|
6127
6477
|
if self.dbt_task:
|
|
6128
6478
|
body["dbt_task"] = self.dbt_task
|
|
6129
6479
|
if self.depends_on:
|
|
@@ -6162,6 +6512,8 @@ class RunTask:
|
|
|
6162
6512
|
body["notification_settings"] = self.notification_settings
|
|
6163
6513
|
if self.pipeline_task:
|
|
6164
6514
|
body["pipeline_task"] = self.pipeline_task
|
|
6515
|
+
if self.power_bi_task:
|
|
6516
|
+
body["power_bi_task"] = self.power_bi_task
|
|
6165
6517
|
if self.python_wheel_task:
|
|
6166
6518
|
body["python_wheel_task"] = self.python_wheel_task
|
|
6167
6519
|
if self.queue_duration is not None:
|
|
@@ -6211,6 +6563,7 @@ class RunTask:
|
|
|
6211
6563
|
cleanup_duration=d.get("cleanup_duration", None),
|
|
6212
6564
|
cluster_instance=_from_dict(d, "cluster_instance", ClusterInstance),
|
|
6213
6565
|
condition_task=_from_dict(d, "condition_task", RunConditionTask),
|
|
6566
|
+
dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
|
|
6214
6567
|
dbt_task=_from_dict(d, "dbt_task", DbtTask),
|
|
6215
6568
|
depends_on=_repeated_dict(d, "depends_on", TaskDependency),
|
|
6216
6569
|
description=d.get("description", None),
|
|
@@ -6230,6 +6583,7 @@ class RunTask:
|
|
|
6230
6583
|
notebook_task=_from_dict(d, "notebook_task", NotebookTask),
|
|
6231
6584
|
notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
|
|
6232
6585
|
pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
|
|
6586
|
+
power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
|
|
6233
6587
|
python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
|
|
6234
6588
|
queue_duration=d.get("queue_duration", None),
|
|
6235
6589
|
resolved_values=_from_dict(d, "resolved_values", ResolvedValues),
|
|
@@ -7028,6 +7382,13 @@ class SqlTaskSubscription:
|
|
|
7028
7382
|
return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
|
|
7029
7383
|
|
|
7030
7384
|
|
|
7385
|
+
class StorageMode(Enum):
|
|
7386
|
+
|
|
7387
|
+
DIRECT_QUERY = "DIRECT_QUERY"
|
|
7388
|
+
DUAL = "DUAL"
|
|
7389
|
+
IMPORT = "IMPORT"
|
|
7390
|
+
|
|
7391
|
+
|
|
7031
7392
|
@dataclass
|
|
7032
7393
|
class SubmitRun:
|
|
7033
7394
|
access_control_list: Optional[List[JobAccessControlRequest]] = None
|
|
@@ -7223,6 +7584,9 @@ class SubmitTask:
|
|
|
7223
7584
|
`condition_task` field is present. The condition task does not require a cluster to execute and
|
|
7224
7585
|
does not support retries or notifications."""
|
|
7225
7586
|
|
|
7587
|
+
dashboard_task: Optional[DashboardTask] = None
|
|
7588
|
+
"""The task refreshes a dashboard and sends a snapshot to subscribers."""
|
|
7589
|
+
|
|
7226
7590
|
dbt_task: Optional[DbtTask] = None
|
|
7227
7591
|
"""The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
|
|
7228
7592
|
requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
|
|
@@ -7253,7 +7617,6 @@ class SubmitTask:
|
|
|
7253
7617
|
present."""
|
|
7254
7618
|
|
|
7255
7619
|
gen_ai_compute_task: Optional[GenAiComputeTask] = None
|
|
7256
|
-
"""Next field: 9"""
|
|
7257
7620
|
|
|
7258
7621
|
health: Optional[JobsHealthRules] = None
|
|
7259
7622
|
"""An optional set of health rules that can be defined for this job."""
|
|
@@ -7276,6 +7639,9 @@ class SubmitTask:
|
|
|
7276
7639
|
"""The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
|
|
7277
7640
|
configured to use triggered more are supported."""
|
|
7278
7641
|
|
|
7642
|
+
power_bi_task: Optional[PowerBiTask] = None
|
|
7643
|
+
"""The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
|
|
7644
|
+
|
|
7279
7645
|
python_wheel_task: Optional[PythonWheelTask] = None
|
|
7280
7646
|
"""The task runs a Python wheel when the `python_wheel_task` field is present."""
|
|
7281
7647
|
|
|
@@ -7329,6 +7695,8 @@ class SubmitTask:
|
|
|
7329
7695
|
body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
|
|
7330
7696
|
if self.condition_task:
|
|
7331
7697
|
body["condition_task"] = self.condition_task.as_dict()
|
|
7698
|
+
if self.dashboard_task:
|
|
7699
|
+
body["dashboard_task"] = self.dashboard_task.as_dict()
|
|
7332
7700
|
if self.dbt_task:
|
|
7333
7701
|
body["dbt_task"] = self.dbt_task.as_dict()
|
|
7334
7702
|
if self.depends_on:
|
|
@@ -7357,6 +7725,8 @@ class SubmitTask:
|
|
|
7357
7725
|
body["notification_settings"] = self.notification_settings.as_dict()
|
|
7358
7726
|
if self.pipeline_task:
|
|
7359
7727
|
body["pipeline_task"] = self.pipeline_task.as_dict()
|
|
7728
|
+
if self.power_bi_task:
|
|
7729
|
+
body["power_bi_task"] = self.power_bi_task.as_dict()
|
|
7360
7730
|
if self.python_wheel_task:
|
|
7361
7731
|
body["python_wheel_task"] = self.python_wheel_task.as_dict()
|
|
7362
7732
|
if self.run_if is not None:
|
|
@@ -7386,6 +7756,8 @@ class SubmitTask:
|
|
|
7386
7756
|
body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
|
|
7387
7757
|
if self.condition_task:
|
|
7388
7758
|
body["condition_task"] = self.condition_task
|
|
7759
|
+
if self.dashboard_task:
|
|
7760
|
+
body["dashboard_task"] = self.dashboard_task
|
|
7389
7761
|
if self.dbt_task:
|
|
7390
7762
|
body["dbt_task"] = self.dbt_task
|
|
7391
7763
|
if self.depends_on:
|
|
@@ -7414,6 +7786,8 @@ class SubmitTask:
|
|
|
7414
7786
|
body["notification_settings"] = self.notification_settings
|
|
7415
7787
|
if self.pipeline_task:
|
|
7416
7788
|
body["pipeline_task"] = self.pipeline_task
|
|
7789
|
+
if self.power_bi_task:
|
|
7790
|
+
body["power_bi_task"] = self.power_bi_task
|
|
7417
7791
|
if self.python_wheel_task:
|
|
7418
7792
|
body["python_wheel_task"] = self.python_wheel_task
|
|
7419
7793
|
if self.run_if is not None:
|
|
@@ -7442,6 +7816,7 @@ class SubmitTask:
|
|
|
7442
7816
|
return cls(
|
|
7443
7817
|
clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
|
|
7444
7818
|
condition_task=_from_dict(d, "condition_task", ConditionTask),
|
|
7819
|
+
dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
|
|
7445
7820
|
dbt_task=_from_dict(d, "dbt_task", DbtTask),
|
|
7446
7821
|
depends_on=_repeated_dict(d, "depends_on", TaskDependency),
|
|
7447
7822
|
description=d.get("description", None),
|
|
@@ -7456,6 +7831,7 @@ class SubmitTask:
|
|
|
7456
7831
|
notebook_task=_from_dict(d, "notebook_task", NotebookTask),
|
|
7457
7832
|
notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
|
|
7458
7833
|
pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
|
|
7834
|
+
power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
|
|
7459
7835
|
python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
|
|
7460
7836
|
run_if=_enum(d, "run_if", RunIf),
|
|
7461
7837
|
run_job_task=_from_dict(d, "run_job_task", RunJobTask),
|
|
@@ -7469,6 +7845,83 @@ class SubmitTask:
|
|
|
7469
7845
|
)
|
|
7470
7846
|
|
|
7471
7847
|
|
|
7848
|
+
@dataclass
|
|
7849
|
+
class Subscription:
|
|
7850
|
+
custom_subject: Optional[str] = None
|
|
7851
|
+
"""Optional: Allows users to specify a custom subject line on the email sent to subscribers."""
|
|
7852
|
+
|
|
7853
|
+
paused: Optional[bool] = None
|
|
7854
|
+
"""When true, the subscription will not send emails."""
|
|
7855
|
+
|
|
7856
|
+
subscribers: Optional[List[SubscriptionSubscriber]] = None
|
|
7857
|
+
"""The list of subscribers to send the snapshot of the dashboard to."""
|
|
7858
|
+
|
|
7859
|
+
def as_dict(self) -> dict:
|
|
7860
|
+
"""Serializes the Subscription into a dictionary suitable for use as a JSON request body."""
|
|
7861
|
+
body = {}
|
|
7862
|
+
if self.custom_subject is not None:
|
|
7863
|
+
body["custom_subject"] = self.custom_subject
|
|
7864
|
+
if self.paused is not None:
|
|
7865
|
+
body["paused"] = self.paused
|
|
7866
|
+
if self.subscribers:
|
|
7867
|
+
body["subscribers"] = [v.as_dict() for v in self.subscribers]
|
|
7868
|
+
return body
|
|
7869
|
+
|
|
7870
|
+
def as_shallow_dict(self) -> dict:
|
|
7871
|
+
"""Serializes the Subscription into a shallow dictionary of its immediate attributes."""
|
|
7872
|
+
body = {}
|
|
7873
|
+
if self.custom_subject is not None:
|
|
7874
|
+
body["custom_subject"] = self.custom_subject
|
|
7875
|
+
if self.paused is not None:
|
|
7876
|
+
body["paused"] = self.paused
|
|
7877
|
+
if self.subscribers:
|
|
7878
|
+
body["subscribers"] = self.subscribers
|
|
7879
|
+
return body
|
|
7880
|
+
|
|
7881
|
+
@classmethod
|
|
7882
|
+
def from_dict(cls, d: Dict[str, Any]) -> Subscription:
|
|
7883
|
+
"""Deserializes the Subscription from a dictionary."""
|
|
7884
|
+
return cls(
|
|
7885
|
+
custom_subject=d.get("custom_subject", None),
|
|
7886
|
+
paused=d.get("paused", None),
|
|
7887
|
+
subscribers=_repeated_dict(d, "subscribers", SubscriptionSubscriber),
|
|
7888
|
+
)
|
|
7889
|
+
|
|
7890
|
+
|
|
7891
|
+
@dataclass
|
|
7892
|
+
class SubscriptionSubscriber:
|
|
7893
|
+
destination_id: Optional[str] = None
|
|
7894
|
+
"""A snapshot of the dashboard will be sent to the destination when the `destination_id` field is
|
|
7895
|
+
present."""
|
|
7896
|
+
|
|
7897
|
+
user_name: Optional[str] = None
|
|
7898
|
+
"""A snapshot of the dashboard will be sent to the user's email when the `user_name` field is
|
|
7899
|
+
present."""
|
|
7900
|
+
|
|
7901
|
+
def as_dict(self) -> dict:
|
|
7902
|
+
"""Serializes the SubscriptionSubscriber into a dictionary suitable for use as a JSON request body."""
|
|
7903
|
+
body = {}
|
|
7904
|
+
if self.destination_id is not None:
|
|
7905
|
+
body["destination_id"] = self.destination_id
|
|
7906
|
+
if self.user_name is not None:
|
|
7907
|
+
body["user_name"] = self.user_name
|
|
7908
|
+
return body
|
|
7909
|
+
|
|
7910
|
+
def as_shallow_dict(self) -> dict:
|
|
7911
|
+
"""Serializes the SubscriptionSubscriber into a shallow dictionary of its immediate attributes."""
|
|
7912
|
+
body = {}
|
|
7913
|
+
if self.destination_id is not None:
|
|
7914
|
+
body["destination_id"] = self.destination_id
|
|
7915
|
+
if self.user_name is not None:
|
|
7916
|
+
body["user_name"] = self.user_name
|
|
7917
|
+
return body
|
|
7918
|
+
|
|
7919
|
+
@classmethod
|
|
7920
|
+
def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriber:
|
|
7921
|
+
"""Deserializes the SubscriptionSubscriber from a dictionary."""
|
|
7922
|
+
return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
|
|
7923
|
+
|
|
7924
|
+
|
|
7472
7925
|
@dataclass
|
|
7473
7926
|
class TableUpdateTriggerConfiguration:
|
|
7474
7927
|
condition: Optional[Condition] = None
|
|
@@ -7541,6 +7994,9 @@ class Task:
|
|
|
7541
7994
|
`condition_task` field is present. The condition task does not require a cluster to execute and
|
|
7542
7995
|
does not support retries or notifications."""
|
|
7543
7996
|
|
|
7997
|
+
dashboard_task: Optional[DashboardTask] = None
|
|
7998
|
+
"""The task refreshes a dashboard and sends a snapshot to subscribers."""
|
|
7999
|
+
|
|
7544
8000
|
dbt_task: Optional[DbtTask] = None
|
|
7545
8001
|
"""The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
|
|
7546
8002
|
requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
|
|
@@ -7575,7 +8031,6 @@ class Task:
|
|
|
7575
8031
|
present."""
|
|
7576
8032
|
|
|
7577
8033
|
gen_ai_compute_task: Optional[GenAiComputeTask] = None
|
|
7578
|
-
"""Next field: 9"""
|
|
7579
8034
|
|
|
7580
8035
|
health: Optional[JobsHealthRules] = None
|
|
7581
8036
|
"""An optional set of health rules that can be defined for this job."""
|
|
@@ -7612,6 +8067,9 @@ class Task:
|
|
|
7612
8067
|
"""The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
|
|
7613
8068
|
configured to use triggered more are supported."""
|
|
7614
8069
|
|
|
8070
|
+
power_bi_task: Optional[PowerBiTask] = None
|
|
8071
|
+
"""The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
|
|
8072
|
+
|
|
7615
8073
|
python_wheel_task: Optional[PythonWheelTask] = None
|
|
7616
8074
|
"""The task runs a Python wheel when the `python_wheel_task` field is present."""
|
|
7617
8075
|
|
|
@@ -7672,6 +8130,8 @@ class Task:
|
|
|
7672
8130
|
body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
|
|
7673
8131
|
if self.condition_task:
|
|
7674
8132
|
body["condition_task"] = self.condition_task.as_dict()
|
|
8133
|
+
if self.dashboard_task:
|
|
8134
|
+
body["dashboard_task"] = self.dashboard_task.as_dict()
|
|
7675
8135
|
if self.dbt_task:
|
|
7676
8136
|
body["dbt_task"] = self.dbt_task.as_dict()
|
|
7677
8137
|
if self.depends_on:
|
|
@@ -7708,6 +8168,8 @@ class Task:
|
|
|
7708
8168
|
body["notification_settings"] = self.notification_settings.as_dict()
|
|
7709
8169
|
if self.pipeline_task:
|
|
7710
8170
|
body["pipeline_task"] = self.pipeline_task.as_dict()
|
|
8171
|
+
if self.power_bi_task:
|
|
8172
|
+
body["power_bi_task"] = self.power_bi_task.as_dict()
|
|
7711
8173
|
if self.python_wheel_task:
|
|
7712
8174
|
body["python_wheel_task"] = self.python_wheel_task.as_dict()
|
|
7713
8175
|
if self.retry_on_timeout is not None:
|
|
@@ -7739,6 +8201,8 @@ class Task:
|
|
|
7739
8201
|
body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
|
|
7740
8202
|
if self.condition_task:
|
|
7741
8203
|
body["condition_task"] = self.condition_task
|
|
8204
|
+
if self.dashboard_task:
|
|
8205
|
+
body["dashboard_task"] = self.dashboard_task
|
|
7742
8206
|
if self.dbt_task:
|
|
7743
8207
|
body["dbt_task"] = self.dbt_task
|
|
7744
8208
|
if self.depends_on:
|
|
@@ -7775,6 +8239,8 @@ class Task:
|
|
|
7775
8239
|
body["notification_settings"] = self.notification_settings
|
|
7776
8240
|
if self.pipeline_task:
|
|
7777
8241
|
body["pipeline_task"] = self.pipeline_task
|
|
8242
|
+
if self.power_bi_task:
|
|
8243
|
+
body["power_bi_task"] = self.power_bi_task
|
|
7778
8244
|
if self.python_wheel_task:
|
|
7779
8245
|
body["python_wheel_task"] = self.python_wheel_task
|
|
7780
8246
|
if self.retry_on_timeout is not None:
|
|
@@ -7805,6 +8271,7 @@ class Task:
|
|
|
7805
8271
|
return cls(
|
|
7806
8272
|
clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
|
|
7807
8273
|
condition_task=_from_dict(d, "condition_task", ConditionTask),
|
|
8274
|
+
dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
|
|
7808
8275
|
dbt_task=_from_dict(d, "dbt_task", DbtTask),
|
|
7809
8276
|
depends_on=_repeated_dict(d, "depends_on", TaskDependency),
|
|
7810
8277
|
description=d.get("description", None),
|
|
@@ -7823,6 +8290,7 @@ class Task:
|
|
|
7823
8290
|
notebook_task=_from_dict(d, "notebook_task", NotebookTask),
|
|
7824
8291
|
notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
|
|
7825
8292
|
pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
|
|
8293
|
+
power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
|
|
7826
8294
|
python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
|
|
7827
8295
|
retry_on_timeout=d.get("retry_on_timeout", None),
|
|
7828
8296
|
run_if=_enum(d, "run_if", RunIf),
|
|
@@ -8032,7 +8500,7 @@ class TerminationCodeCode(Enum):
|
|
|
8032
8500
|
invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The
|
|
8033
8501
|
run failed due to a cloud provider issue. Refer to the state message for further details. *
|
|
8034
8502
|
`MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
|
|
8035
|
-
limit.
|
|
8503
|
+
limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
|
|
8036
8504
|
|
|
8037
8505
|
[Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
|
|
8038
8506
|
|
|
@@ -8041,6 +8509,7 @@ class TerminationCodeCode(Enum):
|
|
|
8041
8509
|
CLOUD_FAILURE = "CLOUD_FAILURE"
|
|
8042
8510
|
CLUSTER_ERROR = "CLUSTER_ERROR"
|
|
8043
8511
|
CLUSTER_REQUEST_LIMIT_EXCEEDED = "CLUSTER_REQUEST_LIMIT_EXCEEDED"
|
|
8512
|
+
DISABLED = "DISABLED"
|
|
8044
8513
|
DRIVER_ERROR = "DRIVER_ERROR"
|
|
8045
8514
|
FEATURE_DISABLED = "FEATURE_DISABLED"
|
|
8046
8515
|
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
@@ -8096,7 +8565,7 @@ class TerminationDetails:
|
|
|
8096
8565
|
invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The
|
|
8097
8566
|
run failed due to a cloud provider issue. Refer to the state message for further details. *
|
|
8098
8567
|
`MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
|
|
8099
|
-
limit.
|
|
8568
|
+
limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
|
|
8100
8569
|
|
|
8101
8570
|
[Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
|
|
8102
8571
|
|
|
@@ -8487,6 +8956,30 @@ class WebhookNotifications:
|
|
|
8487
8956
|
)
|
|
8488
8957
|
|
|
8489
8958
|
|
|
8959
|
+
@dataclass
|
|
8960
|
+
class WidgetErrorDetail:
|
|
8961
|
+
message: Optional[str] = None
|
|
8962
|
+
|
|
8963
|
+
def as_dict(self) -> dict:
|
|
8964
|
+
"""Serializes the WidgetErrorDetail into a dictionary suitable for use as a JSON request body."""
|
|
8965
|
+
body = {}
|
|
8966
|
+
if self.message is not None:
|
|
8967
|
+
body["message"] = self.message
|
|
8968
|
+
return body
|
|
8969
|
+
|
|
8970
|
+
def as_shallow_dict(self) -> dict:
|
|
8971
|
+
"""Serializes the WidgetErrorDetail into a shallow dictionary of its immediate attributes."""
|
|
8972
|
+
body = {}
|
|
8973
|
+
if self.message is not None:
|
|
8974
|
+
body["message"] = self.message
|
|
8975
|
+
return body
|
|
8976
|
+
|
|
8977
|
+
@classmethod
|
|
8978
|
+
def from_dict(cls, d: Dict[str, Any]) -> WidgetErrorDetail:
|
|
8979
|
+
"""Deserializes the WidgetErrorDetail from a dictionary."""
|
|
8980
|
+
return cls(message=d.get("message", None))
|
|
8981
|
+
|
|
8982
|
+
|
|
8490
8983
|
class JobsAPI:
|
|
8491
8984
|
"""The Jobs API allows you to create, edit, and delete jobs.
|
|
8492
8985
|
|
|
@@ -8672,7 +9165,6 @@ class JobsAPI:
|
|
|
8672
9165
|
:param job_clusters: List[:class:`JobCluster`] (optional)
|
|
8673
9166
|
A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
|
|
8674
9167
|
cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
|
|
8675
|
-
If more than 100 job clusters are available, you can paginate through them using :method:jobs/get.
|
|
8676
9168
|
:param max_concurrent_runs: int (optional)
|
|
8677
9169
|
An optional maximum allowed number of concurrent runs of the job. Set this value if you want to be
|
|
8678
9170
|
able to execute multiple runs of the same job concurrently. This is useful for example if you
|
|
@@ -8690,8 +9182,12 @@ class JobsAPI:
|
|
|
8690
9182
|
:param parameters: List[:class:`JobParameterDefinition`] (optional)
|
|
8691
9183
|
Job-level parameter definitions
|
|
8692
9184
|
:param performance_target: :class:`PerformanceTarget` (optional)
|
|
8693
|
-
|
|
8694
|
-
|
|
9185
|
+
The performance mode on a serverless job. This field determines the level of compute performance or
|
|
9186
|
+
cost-efficiency for the run.
|
|
9187
|
+
|
|
9188
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
|
|
9189
|
+
Prioritizes fast startup and execution times through rapid scaling and optimized cluster
|
|
9190
|
+
performance.
|
|
8695
9191
|
:param queue: :class:`QueueSettings` (optional)
|
|
8696
9192
|
The queue settings of the job.
|
|
8697
9193
|
:param run_as: :class:`JobRunAs` (optional)
|
|
@@ -8707,9 +9203,11 @@ class JobsAPI:
|
|
|
8707
9203
|
clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added
|
|
8708
9204
|
to the job.
|
|
8709
9205
|
:param tasks: List[:class:`Task`] (optional)
|
|
8710
|
-
A list of task specifications to be executed by this job.
|
|
8711
|
-
|
|
8712
|
-
|
|
9206
|
+
A list of task specifications to be executed by this job. It supports up to 1000 elements in write
|
|
9207
|
+
endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read
|
|
9208
|
+
endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them
|
|
9209
|
+
using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more
|
|
9210
|
+
results are available.
|
|
8713
9211
|
:param timeout_seconds: int (optional)
|
|
8714
9212
|
An optional timeout applied to each run of this job. A value of `0` means no timeout.
|
|
8715
9213
|
:param trigger: :class:`TriggerSettings` (optional)
|
|
@@ -8848,16 +9346,18 @@ class JobsAPI:
|
|
|
8848
9346
|
|
|
8849
9347
|
Retrieves the details for a single job.
|
|
8850
9348
|
|
|
8851
|
-
|
|
8852
|
-
|
|
8853
|
-
|
|
8854
|
-
|
|
9349
|
+
Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
|
|
9350
|
+
job will return all properties for that job, and the first 100 elements of array properties (`tasks`,
|
|
9351
|
+
`job_clusters`, `environments` and `parameters`). Use the `next_page_token` field to check for more
|
|
9352
|
+
results and pass its value as the `page_token` in subsequent requests. If any array properties have
|
|
9353
|
+
more than 100 elements, additional results will be returned on subsequent requests. Arrays without
|
|
9354
|
+
additional results will be empty on later pages.
|
|
8855
9355
|
|
|
8856
9356
|
:param job_id: int
|
|
8857
9357
|
The canonical identifier of the job to retrieve information about. This field is required.
|
|
8858
9358
|
:param page_token: str (optional)
|
|
8859
|
-
Use `next_page_token` returned from the previous GetJob to request the next page of the
|
|
8860
|
-
|
|
9359
|
+
Use `next_page_token` returned from the previous GetJob response to request the next page of the
|
|
9360
|
+
job's array properties.
|
|
8861
9361
|
|
|
8862
9362
|
:returns: :class:`Job`
|
|
8863
9363
|
"""
|
|
@@ -8922,10 +9422,12 @@ class JobsAPI:
|
|
|
8922
9422
|
|
|
8923
9423
|
Retrieves the metadata of a run.
|
|
8924
9424
|
|
|
8925
|
-
|
|
8926
|
-
|
|
8927
|
-
|
|
8928
|
-
|
|
9425
|
+
Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
|
|
9426
|
+
run will return all properties for that run, and the first 100 elements of array properties (`tasks`,
|
|
9427
|
+
`job_clusters`, `job_parameters` and `repair_history`). Use the next_page_token field to check for
|
|
9428
|
+
more results and pass its value as the page_token in subsequent requests. If any array properties have
|
|
9429
|
+
more than 100 elements, additional results will be returned on subsequent requests. Arrays without
|
|
9430
|
+
additional results will be empty on later pages.
|
|
8929
9431
|
|
|
8930
9432
|
:param run_id: int
|
|
8931
9433
|
The canonical identifier of the run for which to retrieve the metadata. This field is required.
|
|
@@ -8934,8 +9436,8 @@ class JobsAPI:
|
|
|
8934
9436
|
:param include_resolved_values: bool (optional)
|
|
8935
9437
|
Whether to include resolved parameter values in the response.
|
|
8936
9438
|
:param page_token: str (optional)
|
|
8937
|
-
Use `next_page_token` returned from the previous GetRun to request the next page of the
|
|
8938
|
-
|
|
9439
|
+
Use `next_page_token` returned from the previous GetRun response to request the next page of the
|
|
9440
|
+
run's array properties.
|
|
8939
9441
|
|
|
8940
9442
|
:returns: :class:`Run`
|
|
8941
9443
|
"""
|
|
@@ -8998,8 +9500,8 @@ class JobsAPI:
|
|
|
8998
9500
|
Retrieves a list of jobs.
|
|
8999
9501
|
|
|
9000
9502
|
:param expand_tasks: bool (optional)
|
|
9001
|
-
Whether to include task and cluster details in the response. Note that
|
|
9002
|
-
|
|
9503
|
+
Whether to include task and cluster details in the response. Note that only the first 100 elements
|
|
9504
|
+
will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
|
|
9003
9505
|
:param limit: int (optional)
|
|
9004
9506
|
The number of jobs to return. This value must be greater than 0 and less or equal to 100. The
|
|
9005
9507
|
default value is 20.
|
|
@@ -9065,8 +9567,8 @@ class JobsAPI:
|
|
|
9065
9567
|
If completed_only is `true`, only completed runs are included in the results; otherwise, lists both
|
|
9066
9568
|
active and completed runs. This field cannot be `true` when active_only is `true`.
|
|
9067
9569
|
:param expand_tasks: bool (optional)
|
|
9068
|
-
Whether to include task and cluster details in the response. Note that
|
|
9069
|
-
|
|
9570
|
+
Whether to include task and cluster details in the response. Note that only the first 100 elements
|
|
9571
|
+
will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
|
|
9070
9572
|
:param job_id: int (optional)
|
|
9071
9573
|
The job for which to list runs. If omitted, the Jobs service lists runs from all jobs.
|
|
9072
9574
|
:param limit: int (optional)
|
|
@@ -9133,6 +9635,7 @@ class JobsAPI:
|
|
|
9133
9635
|
job_parameters: Optional[Dict[str, str]] = None,
|
|
9134
9636
|
latest_repair_id: Optional[int] = None,
|
|
9135
9637
|
notebook_params: Optional[Dict[str, str]] = None,
|
|
9638
|
+
performance_target: Optional[PerformanceTarget] = None,
|
|
9136
9639
|
pipeline_params: Optional[PipelineParams] = None,
|
|
9137
9640
|
python_named_params: Optional[Dict[str, str]] = None,
|
|
9138
9641
|
python_params: Optional[List[str]] = None,
|
|
@@ -9183,6 +9686,14 @@ class JobsAPI:
|
|
|
9183
9686
|
|
|
9184
9687
|
[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
|
|
9185
9688
|
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
|
|
9689
|
+
:param performance_target: :class:`PerformanceTarget` (optional)
|
|
9690
|
+
The performance mode on a serverless job. The performance target determines the level of compute
|
|
9691
|
+
performance or cost-efficiency for the run. This field overrides the performance target defined on
|
|
9692
|
+
the job level.
|
|
9693
|
+
|
|
9694
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
|
|
9695
|
+
Prioritizes fast startup and execution times through rapid scaling and optimized cluster
|
|
9696
|
+
performance.
|
|
9186
9697
|
:param pipeline_params: :class:`PipelineParams` (optional)
|
|
9187
9698
|
Controls whether the pipeline should perform a full refresh
|
|
9188
9699
|
:param python_named_params: Dict[str,str] (optional)
|
|
@@ -9243,6 +9754,8 @@ class JobsAPI:
|
|
|
9243
9754
|
body["latest_repair_id"] = latest_repair_id
|
|
9244
9755
|
if notebook_params is not None:
|
|
9245
9756
|
body["notebook_params"] = notebook_params
|
|
9757
|
+
if performance_target is not None:
|
|
9758
|
+
body["performance_target"] = performance_target.value
|
|
9246
9759
|
if pipeline_params is not None:
|
|
9247
9760
|
body["pipeline_params"] = pipeline_params.as_dict()
|
|
9248
9761
|
if python_named_params is not None:
|
|
@@ -9282,6 +9795,7 @@ class JobsAPI:
|
|
|
9282
9795
|
job_parameters: Optional[Dict[str, str]] = None,
|
|
9283
9796
|
latest_repair_id: Optional[int] = None,
|
|
9284
9797
|
notebook_params: Optional[Dict[str, str]] = None,
|
|
9798
|
+
performance_target: Optional[PerformanceTarget] = None,
|
|
9285
9799
|
pipeline_params: Optional[PipelineParams] = None,
|
|
9286
9800
|
python_named_params: Optional[Dict[str, str]] = None,
|
|
9287
9801
|
python_params: Optional[List[str]] = None,
|
|
@@ -9298,6 +9812,7 @@ class JobsAPI:
|
|
|
9298
9812
|
job_parameters=job_parameters,
|
|
9299
9813
|
latest_repair_id=latest_repair_id,
|
|
9300
9814
|
notebook_params=notebook_params,
|
|
9815
|
+
performance_target=performance_target,
|
|
9301
9816
|
pipeline_params=pipeline_params,
|
|
9302
9817
|
python_named_params=python_named_params,
|
|
9303
9818
|
python_params=python_params,
|
|
@@ -9408,9 +9923,13 @@ class JobsAPI:
|
|
|
9408
9923
|
A list of task keys to run inside of the job. If this field is not provided, all tasks in the job
|
|
9409
9924
|
will be run.
|
|
9410
9925
|
:param performance_target: :class:`PerformanceTarget` (optional)
|
|
9411
|
-
|
|
9412
|
-
|
|
9413
|
-
job
|
|
9926
|
+
The performance mode on a serverless job. The performance target determines the level of compute
|
|
9927
|
+
performance or cost-efficiency for the run. This field overrides the performance target defined on
|
|
9928
|
+
the job level.
|
|
9929
|
+
|
|
9930
|
+
* `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
|
|
9931
|
+
Prioritizes fast startup and execution times through rapid scaling and optimized cluster
|
|
9932
|
+
performance.
|
|
9414
9933
|
:param pipeline_params: :class:`PipelineParams` (optional)
|
|
9415
9934
|
Controls whether the pipeline should perform a full refresh
|
|
9416
9935
|
:param python_named_params: Dict[str,str] (optional)
|