databricks-sdk 0.49.0__py3-none-any.whl → 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -21,6 +21,12 @@ from databricks.sdk.service import compute
21
21
  # all definitions in this file are in alphabetical order
22
22
 
23
23
 
24
+ class AuthenticationMethod(Enum):
25
+
26
+ OAUTH = "OAUTH"
27
+ PAT = "PAT"
28
+
29
+
24
30
  @dataclass
25
31
  class BaseJob:
26
32
  created_time: Optional[int] = None
@@ -37,9 +43,9 @@ class BaseJob:
37
43
  on accessible budget policies of the run_as identity on job creation or modification."""
38
44
 
39
45
  has_more: Optional[bool] = None
40
- """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
41
- can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
42
- requests with `expand_tasks=true`."""
46
+ """Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
47
+ They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
48
+ :method:jobs/list requests with `expand_tasks=true`."""
43
49
 
44
50
  job_id: Optional[int] = None
45
51
  """The canonical identifier for this job."""
@@ -125,9 +131,13 @@ class BaseRun:
125
131
  """Description of the run"""
126
132
 
127
133
  effective_performance_target: Optional[PerformanceTarget] = None
128
- """effective_performance_target is the actual performance target used by the run during execution.
129
- effective_performance_target can differ from the client-set performance_target depending on if
130
- the job was eligible to be cost-optimized."""
134
+ """The actual performance target used by the serverless run during execution. This can differ from
135
+ the client-set performance target on the request depending on whether the performance mode is
136
+ supported by the job type.
137
+
138
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
139
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
140
+ optimized cluster performance."""
131
141
 
132
142
  end_time: Optional[int] = None
133
143
  """The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
@@ -151,8 +161,8 @@ class BaseRun:
151
161
  are used, `git_source` must be defined on the job."""
152
162
 
153
163
  has_more: Optional[bool] = None
154
- """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
155
- can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
164
+ """Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
165
+ They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
156
166
  :method:jobs/listruns requests with `expand_tasks=true`."""
157
167
 
158
168
  job_clusters: Optional[List[JobCluster]] = None
@@ -793,8 +803,6 @@ class ClusterSpec:
793
803
 
794
804
  @dataclass
795
805
  class ComputeConfig:
796
- """Next field: 4"""
797
-
798
806
  num_gpus: int
799
807
  """Number of GPUs."""
800
808
 
@@ -992,8 +1000,7 @@ class CreateJob:
992
1000
  job_clusters: Optional[List[JobCluster]] = None
993
1001
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
994
1002
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
995
- task settings. If more than 100 job clusters are available, you can paginate through them using
996
- :method:jobs/get."""
1003
+ task settings."""
997
1004
 
998
1005
  max_concurrent_runs: Optional[int] = None
999
1006
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -1016,8 +1023,12 @@ class CreateJob:
1016
1023
  """Job-level parameter definitions"""
1017
1024
 
1018
1025
  performance_target: Optional[PerformanceTarget] = None
1019
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
1020
- should be."""
1026
+ """The performance mode on a serverless job. The performance target determines the level of compute
1027
+ performance or cost-efficiency for the run.
1028
+
1029
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
1030
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
1031
+ optimized cluster performance."""
1021
1032
 
1022
1033
  queue: Optional[QueueSettings] = None
1023
1034
  """The queue settings of the job."""
@@ -1038,9 +1049,11 @@ class CreateJob:
1038
1049
  be added to the job."""
1039
1050
 
1040
1051
  tasks: Optional[List[Task]] = None
1041
- """A list of task specifications to be executed by this job. If more than 100 tasks are available,
1042
- you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
1043
- object root to determine if more results are available."""
1052
+ """A list of task specifications to be executed by this job. It supports up to 1000 elements in
1053
+ write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
1054
+ :method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
1055
+ available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
1056
+ at the object root to determine if more results are available."""
1044
1057
 
1045
1058
  timeout_seconds: Optional[int] = None
1046
1059
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -1271,6 +1284,107 @@ class CronSchedule:
1271
1284
  )
1272
1285
 
1273
1286
 
1287
+ @dataclass
1288
+ class DashboardPageSnapshot:
1289
+ page_display_name: Optional[str] = None
1290
+
1291
+ widget_error_details: Optional[List[WidgetErrorDetail]] = None
1292
+
1293
+ def as_dict(self) -> dict:
1294
+ """Serializes the DashboardPageSnapshot into a dictionary suitable for use as a JSON request body."""
1295
+ body = {}
1296
+ if self.page_display_name is not None:
1297
+ body["page_display_name"] = self.page_display_name
1298
+ if self.widget_error_details:
1299
+ body["widget_error_details"] = [v.as_dict() for v in self.widget_error_details]
1300
+ return body
1301
+
1302
+ def as_shallow_dict(self) -> dict:
1303
+ """Serializes the DashboardPageSnapshot into a shallow dictionary of its immediate attributes."""
1304
+ body = {}
1305
+ if self.page_display_name is not None:
1306
+ body["page_display_name"] = self.page_display_name
1307
+ if self.widget_error_details:
1308
+ body["widget_error_details"] = self.widget_error_details
1309
+ return body
1310
+
1311
+ @classmethod
1312
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardPageSnapshot:
1313
+ """Deserializes the DashboardPageSnapshot from a dictionary."""
1314
+ return cls(
1315
+ page_display_name=d.get("page_display_name", None),
1316
+ widget_error_details=_repeated_dict(d, "widget_error_details", WidgetErrorDetail),
1317
+ )
1318
+
1319
+
1320
+ @dataclass
1321
+ class DashboardTask:
1322
+ """Configures the Lakeview Dashboard job task type."""
1323
+
1324
+ dashboard_id: Optional[str] = None
1325
+
1326
+ subscription: Optional[Subscription] = None
1327
+
1328
+ warehouse_id: Optional[str] = None
1329
+ """The warehouse id to execute the dashboard with for the schedule"""
1330
+
1331
+ def as_dict(self) -> dict:
1332
+ """Serializes the DashboardTask into a dictionary suitable for use as a JSON request body."""
1333
+ body = {}
1334
+ if self.dashboard_id is not None:
1335
+ body["dashboard_id"] = self.dashboard_id
1336
+ if self.subscription:
1337
+ body["subscription"] = self.subscription.as_dict()
1338
+ if self.warehouse_id is not None:
1339
+ body["warehouse_id"] = self.warehouse_id
1340
+ return body
1341
+
1342
+ def as_shallow_dict(self) -> dict:
1343
+ """Serializes the DashboardTask into a shallow dictionary of its immediate attributes."""
1344
+ body = {}
1345
+ if self.dashboard_id is not None:
1346
+ body["dashboard_id"] = self.dashboard_id
1347
+ if self.subscription:
1348
+ body["subscription"] = self.subscription
1349
+ if self.warehouse_id is not None:
1350
+ body["warehouse_id"] = self.warehouse_id
1351
+ return body
1352
+
1353
+ @classmethod
1354
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardTask:
1355
+ """Deserializes the DashboardTask from a dictionary."""
1356
+ return cls(
1357
+ dashboard_id=d.get("dashboard_id", None),
1358
+ subscription=_from_dict(d, "subscription", Subscription),
1359
+ warehouse_id=d.get("warehouse_id", None),
1360
+ )
1361
+
1362
+
1363
+ @dataclass
1364
+ class DashboardTaskOutput:
1365
+ page_snapshots: Optional[List[DashboardPageSnapshot]] = None
1366
+ """Should only be populated for manual PDF download jobs."""
1367
+
1368
+ def as_dict(self) -> dict:
1369
+ """Serializes the DashboardTaskOutput into a dictionary suitable for use as a JSON request body."""
1370
+ body = {}
1371
+ if self.page_snapshots:
1372
+ body["page_snapshots"] = [v.as_dict() for v in self.page_snapshots]
1373
+ return body
1374
+
1375
+ def as_shallow_dict(self) -> dict:
1376
+ """Serializes the DashboardTaskOutput into a shallow dictionary of its immediate attributes."""
1377
+ body = {}
1378
+ if self.page_snapshots:
1379
+ body["page_snapshots"] = self.page_snapshots
1380
+ return body
1381
+
1382
+ @classmethod
1383
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardTaskOutput:
1384
+ """Deserializes the DashboardTaskOutput from a dictionary."""
1385
+ return cls(page_snapshots=_repeated_dict(d, "page_snapshots", DashboardPageSnapshot))
1386
+
1387
+
1274
1388
  @dataclass
1275
1389
  class DbtOutput:
1276
1390
  artifacts_headers: Optional[Dict[str, str]] = None
@@ -1881,8 +1995,6 @@ class Format(Enum):
1881
1995
 
1882
1996
  @dataclass
1883
1997
  class GenAiComputeTask:
1884
- """Next field: 9"""
1885
-
1886
1998
  dl_runtime_image: str
1887
1999
  """Runtime image"""
1888
2000
 
@@ -1890,7 +2002,6 @@ class GenAiComputeTask:
1890
2002
  """Command launcher to run the actual script, e.g. bash, python etc."""
1891
2003
 
1892
2004
  compute: Optional[ComputeConfig] = None
1893
- """Next field: 4"""
1894
2005
 
1895
2006
  mlflow_experiment_name: Optional[str] = None
1896
2007
  """Optional string containing the name of the MLflow experiment to log the run to. If name is not
@@ -2183,15 +2294,15 @@ class Job:
2183
2294
  on accessible budget policies of the run_as identity on job creation or modification."""
2184
2295
 
2185
2296
  has_more: Optional[bool] = None
2186
- """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
2187
- can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
2188
- requests with `expand_tasks=true`."""
2297
+ """Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
2298
+ They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
2299
+ :method:jobs/list requests with `expand_tasks=true`."""
2189
2300
 
2190
2301
  job_id: Optional[int] = None
2191
2302
  """The canonical identifier for this job."""
2192
2303
 
2193
2304
  next_page_token: Optional[str] = None
2194
- """A token that can be used to list the next page of sub-resources."""
2305
+ """A token that can be used to list the next page of array properties."""
2195
2306
 
2196
2307
  run_as_user_name: Optional[str] = None
2197
2308
  """The email of an active workspace user or the application ID of a service principal that the job
@@ -2977,8 +3088,7 @@ class JobSettings:
2977
3088
  job_clusters: Optional[List[JobCluster]] = None
2978
3089
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
2979
3090
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
2980
- task settings. If more than 100 job clusters are available, you can paginate through them using
2981
- :method:jobs/get."""
3091
+ task settings."""
2982
3092
 
2983
3093
  max_concurrent_runs: Optional[int] = None
2984
3094
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -3001,8 +3111,12 @@ class JobSettings:
3001
3111
  """Job-level parameter definitions"""
3002
3112
 
3003
3113
  performance_target: Optional[PerformanceTarget] = None
3004
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
3005
- should be."""
3114
+ """The performance mode on a serverless job. The performance target determines the level of compute
3115
+ performance or cost-efficiency for the run.
3116
+
3117
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
3118
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
3119
+ optimized cluster performance."""
3006
3120
 
3007
3121
  queue: Optional[QueueSettings] = None
3008
3122
  """The queue settings of the job."""
@@ -3023,9 +3137,11 @@ class JobSettings:
3023
3137
  be added to the job."""
3024
3138
 
3025
3139
  tasks: Optional[List[Task]] = None
3026
- """A list of task specifications to be executed by this job. If more than 100 tasks are available,
3027
- you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
3028
- object root to determine if more results are available."""
3140
+ """A list of task specifications to be executed by this job. It supports up to 1000 elements in
3141
+ write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
3142
+ :method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
3143
+ available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
3144
+ at the object root to determine if more results are available."""
3029
3145
 
3030
3146
  timeout_seconds: Optional[int] = None
3031
3147
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -3659,9 +3775,8 @@ class PerformanceTarget(Enum):
3659
3775
  on serverless compute should be. The performance mode on the job or pipeline should map to a
3660
3776
  performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget)."""
3661
3777
 
3662
- BALANCED = "BALANCED"
3663
- COST_OPTIMIZED = "COST_OPTIMIZED"
3664
3778
  PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED"
3779
+ STANDARD = "STANDARD"
3665
3780
 
3666
3781
 
3667
3782
  @dataclass
@@ -3760,6 +3875,175 @@ class PipelineTask:
3760
3875
  return cls(full_refresh=d.get("full_refresh", None), pipeline_id=d.get("pipeline_id", None))
3761
3876
 
3762
3877
 
3878
+ @dataclass
3879
+ class PowerBiModel:
3880
+ authentication_method: Optional[AuthenticationMethod] = None
3881
+ """How the published Power BI model authenticates to Databricks"""
3882
+
3883
+ model_name: Optional[str] = None
3884
+ """The name of the Power BI model"""
3885
+
3886
+ overwrite_existing: Optional[bool] = None
3887
+ """Whether to overwrite existing Power BI models"""
3888
+
3889
+ storage_mode: Optional[StorageMode] = None
3890
+ """The default storage mode of the Power BI model"""
3891
+
3892
+ workspace_name: Optional[str] = None
3893
+ """The name of the Power BI workspace of the model"""
3894
+
3895
+ def as_dict(self) -> dict:
3896
+ """Serializes the PowerBiModel into a dictionary suitable for use as a JSON request body."""
3897
+ body = {}
3898
+ if self.authentication_method is not None:
3899
+ body["authentication_method"] = self.authentication_method.value
3900
+ if self.model_name is not None:
3901
+ body["model_name"] = self.model_name
3902
+ if self.overwrite_existing is not None:
3903
+ body["overwrite_existing"] = self.overwrite_existing
3904
+ if self.storage_mode is not None:
3905
+ body["storage_mode"] = self.storage_mode.value
3906
+ if self.workspace_name is not None:
3907
+ body["workspace_name"] = self.workspace_name
3908
+ return body
3909
+
3910
+ def as_shallow_dict(self) -> dict:
3911
+ """Serializes the PowerBiModel into a shallow dictionary of its immediate attributes."""
3912
+ body = {}
3913
+ if self.authentication_method is not None:
3914
+ body["authentication_method"] = self.authentication_method
3915
+ if self.model_name is not None:
3916
+ body["model_name"] = self.model_name
3917
+ if self.overwrite_existing is not None:
3918
+ body["overwrite_existing"] = self.overwrite_existing
3919
+ if self.storage_mode is not None:
3920
+ body["storage_mode"] = self.storage_mode
3921
+ if self.workspace_name is not None:
3922
+ body["workspace_name"] = self.workspace_name
3923
+ return body
3924
+
3925
+ @classmethod
3926
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiModel:
3927
+ """Deserializes the PowerBiModel from a dictionary."""
3928
+ return cls(
3929
+ authentication_method=_enum(d, "authentication_method", AuthenticationMethod),
3930
+ model_name=d.get("model_name", None),
3931
+ overwrite_existing=d.get("overwrite_existing", None),
3932
+ storage_mode=_enum(d, "storage_mode", StorageMode),
3933
+ workspace_name=d.get("workspace_name", None),
3934
+ )
3935
+
3936
+
3937
+ @dataclass
3938
+ class PowerBiTable:
3939
+ catalog: Optional[str] = None
3940
+ """The catalog name in Databricks"""
3941
+
3942
+ name: Optional[str] = None
3943
+ """The table name in Databricks"""
3944
+
3945
+ schema: Optional[str] = None
3946
+ """The schema name in Databricks"""
3947
+
3948
+ storage_mode: Optional[StorageMode] = None
3949
+ """The Power BI storage mode of the table"""
3950
+
3951
+ def as_dict(self) -> dict:
3952
+ """Serializes the PowerBiTable into a dictionary suitable for use as a JSON request body."""
3953
+ body = {}
3954
+ if self.catalog is not None:
3955
+ body["catalog"] = self.catalog
3956
+ if self.name is not None:
3957
+ body["name"] = self.name
3958
+ if self.schema is not None:
3959
+ body["schema"] = self.schema
3960
+ if self.storage_mode is not None:
3961
+ body["storage_mode"] = self.storage_mode.value
3962
+ return body
3963
+
3964
+ def as_shallow_dict(self) -> dict:
3965
+ """Serializes the PowerBiTable into a shallow dictionary of its immediate attributes."""
3966
+ body = {}
3967
+ if self.catalog is not None:
3968
+ body["catalog"] = self.catalog
3969
+ if self.name is not None:
3970
+ body["name"] = self.name
3971
+ if self.schema is not None:
3972
+ body["schema"] = self.schema
3973
+ if self.storage_mode is not None:
3974
+ body["storage_mode"] = self.storage_mode
3975
+ return body
3976
+
3977
+ @classmethod
3978
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiTable:
3979
+ """Deserializes the PowerBiTable from a dictionary."""
3980
+ return cls(
3981
+ catalog=d.get("catalog", None),
3982
+ name=d.get("name", None),
3983
+ schema=d.get("schema", None),
3984
+ storage_mode=_enum(d, "storage_mode", StorageMode),
3985
+ )
3986
+
3987
+
3988
+ @dataclass
3989
+ class PowerBiTask:
3990
+ connection_resource_name: Optional[str] = None
3991
+ """The resource name of the UC connection to authenticate from Databricks to Power BI"""
3992
+
3993
+ power_bi_model: Optional[PowerBiModel] = None
3994
+ """The semantic model to update"""
3995
+
3996
+ refresh_after_update: Optional[bool] = None
3997
+ """Whether the model should be refreshed after the update"""
3998
+
3999
+ tables: Optional[List[PowerBiTable]] = None
4000
+ """The tables to be exported to Power BI"""
4001
+
4002
+ warehouse_id: Optional[str] = None
4003
+ """The SQL warehouse ID to use as the Power BI data source"""
4004
+
4005
+ def as_dict(self) -> dict:
4006
+ """Serializes the PowerBiTask into a dictionary suitable for use as a JSON request body."""
4007
+ body = {}
4008
+ if self.connection_resource_name is not None:
4009
+ body["connection_resource_name"] = self.connection_resource_name
4010
+ if self.power_bi_model:
4011
+ body["power_bi_model"] = self.power_bi_model.as_dict()
4012
+ if self.refresh_after_update is not None:
4013
+ body["refresh_after_update"] = self.refresh_after_update
4014
+ if self.tables:
4015
+ body["tables"] = [v.as_dict() for v in self.tables]
4016
+ if self.warehouse_id is not None:
4017
+ body["warehouse_id"] = self.warehouse_id
4018
+ return body
4019
+
4020
+ def as_shallow_dict(self) -> dict:
4021
+ """Serializes the PowerBiTask into a shallow dictionary of its immediate attributes."""
4022
+ body = {}
4023
+ if self.connection_resource_name is not None:
4024
+ body["connection_resource_name"] = self.connection_resource_name
4025
+ if self.power_bi_model:
4026
+ body["power_bi_model"] = self.power_bi_model
4027
+ if self.refresh_after_update is not None:
4028
+ body["refresh_after_update"] = self.refresh_after_update
4029
+ if self.tables:
4030
+ body["tables"] = self.tables
4031
+ if self.warehouse_id is not None:
4032
+ body["warehouse_id"] = self.warehouse_id
4033
+ return body
4034
+
4035
+ @classmethod
4036
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiTask:
4037
+ """Deserializes the PowerBiTask from a dictionary."""
4038
+ return cls(
4039
+ connection_resource_name=d.get("connection_resource_name", None),
4040
+ power_bi_model=_from_dict(d, "power_bi_model", PowerBiModel),
4041
+ refresh_after_update=d.get("refresh_after_update", None),
4042
+ tables=_repeated_dict(d, "tables", PowerBiTable),
4043
+ warehouse_id=d.get("warehouse_id", None),
4044
+ )
4045
+
4046
+
3763
4047
  @dataclass
3764
4048
  class PythonWheelTask:
3765
4049
  package_name: str
@@ -4542,9 +4826,13 @@ class Run:
4542
4826
  """Description of the run"""
4543
4827
 
4544
4828
  effective_performance_target: Optional[PerformanceTarget] = None
4545
- """effective_performance_target is the actual performance target used by the run during execution.
4546
- effective_performance_target can differ from the client-set performance_target depending on if
4547
- the job was eligible to be cost-optimized."""
4829
+ """The actual performance target used by the serverless run during execution. This can differ from
4830
+ the client-set performance target on the request depending on whether the performance mode is
4831
+ supported by the job type.
4832
+
4833
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
4834
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
4835
+ optimized cluster performance."""
4548
4836
 
4549
4837
  end_time: Optional[int] = None
4550
4838
  """The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
@@ -4568,8 +4856,8 @@ class Run:
4568
4856
  are used, `git_source` must be defined on the job."""
4569
4857
 
4570
4858
  has_more: Optional[bool] = None
4571
- """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
4572
- can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
4859
+ """Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
4860
+ They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
4573
4861
  :method:jobs/listruns requests with `expand_tasks=true`."""
4574
4862
 
4575
4863
  iterations: Optional[List[RunTask]] = None
@@ -4593,7 +4881,7 @@ class Run:
4593
4881
  that the task run belongs to."""
4594
4882
 
4595
4883
  next_page_token: Optional[str] = None
4596
- """A token that can be used to list the next page of sub-resources."""
4884
+ """A token that can be used to list the next page of array properties."""
4597
4885
 
4598
4886
  number_in_job: Optional[int] = None
4599
4887
  """A unique identifier for this job run. This is set to the same value as `run_id`."""
@@ -5266,9 +5554,13 @@ class RunNow:
5266
5554
  job will be run."""
5267
5555
 
5268
5556
  performance_target: Optional[PerformanceTarget] = None
5269
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
5270
- compute should be. For RunNow, this performance target will override the target defined on the
5271
- job-level."""
5557
+ """The performance mode on a serverless job. The performance target determines the level of compute
5558
+ performance or cost-efficiency for the run. This field overrides the performance target defined
5559
+ on the job level.
5560
+
5561
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
5562
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
5563
+ optimized cluster performance."""
5272
5564
 
5273
5565
  pipeline_params: Optional[PipelineParams] = None
5274
5566
  """Controls whether the pipeline should perform a full refresh"""
@@ -5443,6 +5735,9 @@ class RunOutput:
5443
5735
  clean_rooms_notebook_output: Optional[CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput] = None
5444
5736
  """The output of a clean rooms notebook task, if available"""
5445
5737
 
5738
+ dashboard_output: Optional[DashboardTaskOutput] = None
5739
+ """The output of a dashboard task, if available"""
5740
+
5446
5741
  dbt_output: Optional[DbtOutput] = None
5447
5742
  """The output of a dbt task, if available."""
5448
5743
 
@@ -5489,6 +5784,8 @@ class RunOutput:
5489
5784
  body = {}
5490
5785
  if self.clean_rooms_notebook_output:
5491
5786
  body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output.as_dict()
5787
+ if self.dashboard_output:
5788
+ body["dashboard_output"] = self.dashboard_output.as_dict()
5492
5789
  if self.dbt_output:
5493
5790
  body["dbt_output"] = self.dbt_output.as_dict()
5494
5791
  if self.error is not None:
@@ -5516,6 +5813,8 @@ class RunOutput:
5516
5813
  body = {}
5517
5814
  if self.clean_rooms_notebook_output:
5518
5815
  body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output
5816
+ if self.dashboard_output:
5817
+ body["dashboard_output"] = self.dashboard_output
5519
5818
  if self.dbt_output:
5520
5819
  body["dbt_output"] = self.dbt_output
5521
5820
  if self.error is not None:
@@ -5545,6 +5844,7 @@ class RunOutput:
5545
5844
  clean_rooms_notebook_output=_from_dict(
5546
5845
  d, "clean_rooms_notebook_output", CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput
5547
5846
  ),
5847
+ dashboard_output=_from_dict(d, "dashboard_output", DashboardTaskOutput),
5548
5848
  dbt_output=_from_dict(d, "dbt_output", DbtOutput),
5549
5849
  error=d.get("error", None),
5550
5850
  error_trace=d.get("error_trace", None),
@@ -5860,6 +6160,9 @@ class RunTask:
5860
6160
  `condition_task` field is present. The condition task does not require a cluster to execute and
5861
6161
  does not support retries or notifications."""
5862
6162
 
6163
+ dashboard_task: Optional[DashboardTask] = None
6164
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
6165
+
5863
6166
  dbt_task: Optional[DbtTask] = None
5864
6167
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
5865
6168
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -5873,13 +6176,16 @@ class RunTask:
5873
6176
  """An optional description for this task."""
5874
6177
 
5875
6178
  disabled: Optional[bool] = None
5876
- """Denotes whether or not the task was disabled by the user. Disabled tasks do not execute and are
5877
- immediately skipped as soon as they are unblocked."""
6179
+ """Deprecated, field was never used in production."""
5878
6180
 
5879
6181
  effective_performance_target: Optional[PerformanceTarget] = None
5880
- """effective_performance_target is the actual performance target used by the run during execution.
5881
- effective_performance_target can differ from the client-set performance_target depending on if
5882
- the job was eligible to be cost-optimized."""
6182
+ """The actual performance target used by the serverless run during execution. This can differ from
6183
+ the client-set performance target on the request depending on whether the performance mode is
6184
+ supported by the job type.
6185
+
6186
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
6187
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
6188
+ optimized cluster performance."""
5883
6189
 
5884
6190
  email_notifications: Optional[JobEmailNotifications] = None
5885
6191
  """An optional set of email addresses notified when the task run begins or completes. The default
@@ -5910,7 +6216,6 @@ class RunTask:
5910
6216
  present."""
5911
6217
 
5912
6218
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
5913
- """Next field: 9"""
5914
6219
 
5915
6220
  git_source: Optional[GitSource] = None
5916
6221
  """An optional specification for a remote Git repository containing the source code used by tasks.
@@ -5942,6 +6247,9 @@ class RunTask:
5942
6247
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
5943
6248
  configured to use triggered more are supported."""
5944
6249
 
6250
+ power_bi_task: Optional[PowerBiTask] = None
6251
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
6252
+
5945
6253
  python_wheel_task: Optional[PythonWheelTask] = None
5946
6254
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
5947
6255
 
@@ -6033,6 +6341,8 @@ class RunTask:
6033
6341
  body["cluster_instance"] = self.cluster_instance.as_dict()
6034
6342
  if self.condition_task:
6035
6343
  body["condition_task"] = self.condition_task.as_dict()
6344
+ if self.dashboard_task:
6345
+ body["dashboard_task"] = self.dashboard_task.as_dict()
6036
6346
  if self.dbt_task:
6037
6347
  body["dbt_task"] = self.dbt_task.as_dict()
6038
6348
  if self.depends_on:
@@ -6071,6 +6381,8 @@ class RunTask:
6071
6381
  body["notification_settings"] = self.notification_settings.as_dict()
6072
6382
  if self.pipeline_task:
6073
6383
  body["pipeline_task"] = self.pipeline_task.as_dict()
6384
+ if self.power_bi_task:
6385
+ body["power_bi_task"] = self.power_bi_task.as_dict()
6074
6386
  if self.python_wheel_task:
6075
6387
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
6076
6388
  if self.queue_duration is not None:
@@ -6124,6 +6436,8 @@ class RunTask:
6124
6436
  body["cluster_instance"] = self.cluster_instance
6125
6437
  if self.condition_task:
6126
6438
  body["condition_task"] = self.condition_task
6439
+ if self.dashboard_task:
6440
+ body["dashboard_task"] = self.dashboard_task
6127
6441
  if self.dbt_task:
6128
6442
  body["dbt_task"] = self.dbt_task
6129
6443
  if self.depends_on:
@@ -6162,6 +6476,8 @@ class RunTask:
6162
6476
  body["notification_settings"] = self.notification_settings
6163
6477
  if self.pipeline_task:
6164
6478
  body["pipeline_task"] = self.pipeline_task
6479
+ if self.power_bi_task:
6480
+ body["power_bi_task"] = self.power_bi_task
6165
6481
  if self.python_wheel_task:
6166
6482
  body["python_wheel_task"] = self.python_wheel_task
6167
6483
  if self.queue_duration is not None:
@@ -6211,6 +6527,7 @@ class RunTask:
6211
6527
  cleanup_duration=d.get("cleanup_duration", None),
6212
6528
  cluster_instance=_from_dict(d, "cluster_instance", ClusterInstance),
6213
6529
  condition_task=_from_dict(d, "condition_task", RunConditionTask),
6530
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
6214
6531
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
6215
6532
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
6216
6533
  description=d.get("description", None),
@@ -6230,6 +6547,7 @@ class RunTask:
6230
6547
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
6231
6548
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
6232
6549
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
6550
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
6233
6551
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
6234
6552
  queue_duration=d.get("queue_duration", None),
6235
6553
  resolved_values=_from_dict(d, "resolved_values", ResolvedValues),
@@ -7028,6 +7346,13 @@ class SqlTaskSubscription:
7028
7346
  return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
7029
7347
 
7030
7348
 
7349
+ class StorageMode(Enum):
7350
+
7351
+ DIRECT_QUERY = "DIRECT_QUERY"
7352
+ DUAL = "DUAL"
7353
+ IMPORT = "IMPORT"
7354
+
7355
+
7031
7356
  @dataclass
7032
7357
  class SubmitRun:
7033
7358
  access_control_list: Optional[List[JobAccessControlRequest]] = None
@@ -7223,6 +7548,9 @@ class SubmitTask:
7223
7548
  `condition_task` field is present. The condition task does not require a cluster to execute and
7224
7549
  does not support retries or notifications."""
7225
7550
 
7551
+ dashboard_task: Optional[DashboardTask] = None
7552
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
7553
+
7226
7554
  dbt_task: Optional[DbtTask] = None
7227
7555
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
7228
7556
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -7253,7 +7581,6 @@ class SubmitTask:
7253
7581
  present."""
7254
7582
 
7255
7583
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
7256
- """Next field: 9"""
7257
7584
 
7258
7585
  health: Optional[JobsHealthRules] = None
7259
7586
  """An optional set of health rules that can be defined for this job."""
@@ -7276,6 +7603,9 @@ class SubmitTask:
7276
7603
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
7277
7604
  configured to use triggered more are supported."""
7278
7605
 
7606
+ power_bi_task: Optional[PowerBiTask] = None
7607
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
7608
+
7279
7609
  python_wheel_task: Optional[PythonWheelTask] = None
7280
7610
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
7281
7611
 
@@ -7329,6 +7659,8 @@ class SubmitTask:
7329
7659
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
7330
7660
  if self.condition_task:
7331
7661
  body["condition_task"] = self.condition_task.as_dict()
7662
+ if self.dashboard_task:
7663
+ body["dashboard_task"] = self.dashboard_task.as_dict()
7332
7664
  if self.dbt_task:
7333
7665
  body["dbt_task"] = self.dbt_task.as_dict()
7334
7666
  if self.depends_on:
@@ -7357,6 +7689,8 @@ class SubmitTask:
7357
7689
  body["notification_settings"] = self.notification_settings.as_dict()
7358
7690
  if self.pipeline_task:
7359
7691
  body["pipeline_task"] = self.pipeline_task.as_dict()
7692
+ if self.power_bi_task:
7693
+ body["power_bi_task"] = self.power_bi_task.as_dict()
7360
7694
  if self.python_wheel_task:
7361
7695
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
7362
7696
  if self.run_if is not None:
@@ -7386,6 +7720,8 @@ class SubmitTask:
7386
7720
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
7387
7721
  if self.condition_task:
7388
7722
  body["condition_task"] = self.condition_task
7723
+ if self.dashboard_task:
7724
+ body["dashboard_task"] = self.dashboard_task
7389
7725
  if self.dbt_task:
7390
7726
  body["dbt_task"] = self.dbt_task
7391
7727
  if self.depends_on:
@@ -7414,6 +7750,8 @@ class SubmitTask:
7414
7750
  body["notification_settings"] = self.notification_settings
7415
7751
  if self.pipeline_task:
7416
7752
  body["pipeline_task"] = self.pipeline_task
7753
+ if self.power_bi_task:
7754
+ body["power_bi_task"] = self.power_bi_task
7417
7755
  if self.python_wheel_task:
7418
7756
  body["python_wheel_task"] = self.python_wheel_task
7419
7757
  if self.run_if is not None:
@@ -7442,6 +7780,7 @@ class SubmitTask:
7442
7780
  return cls(
7443
7781
  clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
7444
7782
  condition_task=_from_dict(d, "condition_task", ConditionTask),
7783
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
7445
7784
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
7446
7785
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
7447
7786
  description=d.get("description", None),
@@ -7456,6 +7795,7 @@ class SubmitTask:
7456
7795
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
7457
7796
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
7458
7797
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
7798
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
7459
7799
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
7460
7800
  run_if=_enum(d, "run_if", RunIf),
7461
7801
  run_job_task=_from_dict(d, "run_job_task", RunJobTask),
@@ -7469,6 +7809,78 @@ class SubmitTask:
7469
7809
  )
7470
7810
 
7471
7811
 
7812
+ @dataclass
7813
+ class Subscription:
7814
+ custom_subject: Optional[str] = None
7815
+ """Optional: Allows users to specify a custom subject line on the email sent to subscribers."""
7816
+
7817
+ paused: Optional[bool] = None
7818
+ """When true, the subscription will not send emails."""
7819
+
7820
+ subscribers: Optional[List[SubscriptionSubscriber]] = None
7821
+
7822
+ def as_dict(self) -> dict:
7823
+ """Serializes the Subscription into a dictionary suitable for use as a JSON request body."""
7824
+ body = {}
7825
+ if self.custom_subject is not None:
7826
+ body["custom_subject"] = self.custom_subject
7827
+ if self.paused is not None:
7828
+ body["paused"] = self.paused
7829
+ if self.subscribers:
7830
+ body["subscribers"] = [v.as_dict() for v in self.subscribers]
7831
+ return body
7832
+
7833
+ def as_shallow_dict(self) -> dict:
7834
+ """Serializes the Subscription into a shallow dictionary of its immediate attributes."""
7835
+ body = {}
7836
+ if self.custom_subject is not None:
7837
+ body["custom_subject"] = self.custom_subject
7838
+ if self.paused is not None:
7839
+ body["paused"] = self.paused
7840
+ if self.subscribers:
7841
+ body["subscribers"] = self.subscribers
7842
+ return body
7843
+
7844
+ @classmethod
7845
+ def from_dict(cls, d: Dict[str, Any]) -> Subscription:
7846
+ """Deserializes the Subscription from a dictionary."""
7847
+ return cls(
7848
+ custom_subject=d.get("custom_subject", None),
7849
+ paused=d.get("paused", None),
7850
+ subscribers=_repeated_dict(d, "subscribers", SubscriptionSubscriber),
7851
+ )
7852
+
7853
+
7854
+ @dataclass
7855
+ class SubscriptionSubscriber:
7856
+ destination_id: Optional[str] = None
7857
+
7858
+ user_name: Optional[str] = None
7859
+
7860
+ def as_dict(self) -> dict:
7861
+ """Serializes the SubscriptionSubscriber into a dictionary suitable for use as a JSON request body."""
7862
+ body = {}
7863
+ if self.destination_id is not None:
7864
+ body["destination_id"] = self.destination_id
7865
+ if self.user_name is not None:
7866
+ body["user_name"] = self.user_name
7867
+ return body
7868
+
7869
+ def as_shallow_dict(self) -> dict:
7870
+ """Serializes the SubscriptionSubscriber into a shallow dictionary of its immediate attributes."""
7871
+ body = {}
7872
+ if self.destination_id is not None:
7873
+ body["destination_id"] = self.destination_id
7874
+ if self.user_name is not None:
7875
+ body["user_name"] = self.user_name
7876
+ return body
7877
+
7878
+ @classmethod
7879
+ def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriber:
7880
+ """Deserializes the SubscriptionSubscriber from a dictionary."""
7881
+ return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
7882
+
7883
+
7472
7884
  @dataclass
7473
7885
  class TableUpdateTriggerConfiguration:
7474
7886
  condition: Optional[Condition] = None
@@ -7541,6 +7953,9 @@ class Task:
7541
7953
  `condition_task` field is present. The condition task does not require a cluster to execute and
7542
7954
  does not support retries or notifications."""
7543
7955
 
7956
+ dashboard_task: Optional[DashboardTask] = None
7957
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
7958
+
7544
7959
  dbt_task: Optional[DbtTask] = None
7545
7960
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
7546
7961
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -7575,7 +7990,6 @@ class Task:
7575
7990
  present."""
7576
7991
 
7577
7992
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
7578
- """Next field: 9"""
7579
7993
 
7580
7994
  health: Optional[JobsHealthRules] = None
7581
7995
  """An optional set of health rules that can be defined for this job."""
@@ -7612,6 +8026,9 @@ class Task:
7612
8026
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
7613
8027
  configured to use triggered more are supported."""
7614
8028
 
8029
+ power_bi_task: Optional[PowerBiTask] = None
8030
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
8031
+
7615
8032
  python_wheel_task: Optional[PythonWheelTask] = None
7616
8033
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
7617
8034
 
@@ -7672,6 +8089,8 @@ class Task:
7672
8089
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
7673
8090
  if self.condition_task:
7674
8091
  body["condition_task"] = self.condition_task.as_dict()
8092
+ if self.dashboard_task:
8093
+ body["dashboard_task"] = self.dashboard_task.as_dict()
7675
8094
  if self.dbt_task:
7676
8095
  body["dbt_task"] = self.dbt_task.as_dict()
7677
8096
  if self.depends_on:
@@ -7708,6 +8127,8 @@ class Task:
7708
8127
  body["notification_settings"] = self.notification_settings.as_dict()
7709
8128
  if self.pipeline_task:
7710
8129
  body["pipeline_task"] = self.pipeline_task.as_dict()
8130
+ if self.power_bi_task:
8131
+ body["power_bi_task"] = self.power_bi_task.as_dict()
7711
8132
  if self.python_wheel_task:
7712
8133
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
7713
8134
  if self.retry_on_timeout is not None:
@@ -7739,6 +8160,8 @@ class Task:
7739
8160
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
7740
8161
  if self.condition_task:
7741
8162
  body["condition_task"] = self.condition_task
8163
+ if self.dashboard_task:
8164
+ body["dashboard_task"] = self.dashboard_task
7742
8165
  if self.dbt_task:
7743
8166
  body["dbt_task"] = self.dbt_task
7744
8167
  if self.depends_on:
@@ -7775,6 +8198,8 @@ class Task:
7775
8198
  body["notification_settings"] = self.notification_settings
7776
8199
  if self.pipeline_task:
7777
8200
  body["pipeline_task"] = self.pipeline_task
8201
+ if self.power_bi_task:
8202
+ body["power_bi_task"] = self.power_bi_task
7778
8203
  if self.python_wheel_task:
7779
8204
  body["python_wheel_task"] = self.python_wheel_task
7780
8205
  if self.retry_on_timeout is not None:
@@ -7805,6 +8230,7 @@ class Task:
7805
8230
  return cls(
7806
8231
  clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
7807
8232
  condition_task=_from_dict(d, "condition_task", ConditionTask),
8233
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
7808
8234
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
7809
8235
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
7810
8236
  description=d.get("description", None),
@@ -7823,6 +8249,7 @@ class Task:
7823
8249
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
7824
8250
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
7825
8251
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
8252
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
7826
8253
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
7827
8254
  retry_on_timeout=d.get("retry_on_timeout", None),
7828
8255
  run_if=_enum(d, "run_if", RunIf),
@@ -8487,6 +8914,30 @@ class WebhookNotifications:
8487
8914
  )
8488
8915
 
8489
8916
 
8917
+ @dataclass
8918
+ class WidgetErrorDetail:
8919
+ message: Optional[str] = None
8920
+
8921
+ def as_dict(self) -> dict:
8922
+ """Serializes the WidgetErrorDetail into a dictionary suitable for use as a JSON request body."""
8923
+ body = {}
8924
+ if self.message is not None:
8925
+ body["message"] = self.message
8926
+ return body
8927
+
8928
+ def as_shallow_dict(self) -> dict:
8929
+ """Serializes the WidgetErrorDetail into a shallow dictionary of its immediate attributes."""
8930
+ body = {}
8931
+ if self.message is not None:
8932
+ body["message"] = self.message
8933
+ return body
8934
+
8935
+ @classmethod
8936
+ def from_dict(cls, d: Dict[str, Any]) -> WidgetErrorDetail:
8937
+ """Deserializes the WidgetErrorDetail from a dictionary."""
8938
+ return cls(message=d.get("message", None))
8939
+
8940
+
8490
8941
  class JobsAPI:
8491
8942
  """The Jobs API allows you to create, edit, and delete jobs.
8492
8943
 
@@ -8672,7 +9123,6 @@ class JobsAPI:
8672
9123
  :param job_clusters: List[:class:`JobCluster`] (optional)
8673
9124
  A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
8674
9125
  cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
8675
- If more than 100 job clusters are available, you can paginate through them using :method:jobs/get.
8676
9126
  :param max_concurrent_runs: int (optional)
8677
9127
  An optional maximum allowed number of concurrent runs of the job. Set this value if you want to be
8678
9128
  able to execute multiple runs of the same job concurrently. This is useful for example if you
@@ -8690,8 +9140,12 @@ class JobsAPI:
8690
9140
  :param parameters: List[:class:`JobParameterDefinition`] (optional)
8691
9141
  Job-level parameter definitions
8692
9142
  :param performance_target: :class:`PerformanceTarget` (optional)
8693
- PerformanceTarget defines how performant or cost efficient the execution of run on serverless should
8694
- be.
9143
+ The performance mode on a serverless job. The performance target determines the level of compute
9144
+ performance or cost-efficiency for the run.
9145
+
9146
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
9147
+ Prioritizes fast startup and execution times through rapid scaling and optimized cluster
9148
+ performance.
8695
9149
  :param queue: :class:`QueueSettings` (optional)
8696
9150
  The queue settings of the job.
8697
9151
  :param run_as: :class:`JobRunAs` (optional)
@@ -8707,9 +9161,11 @@ class JobsAPI:
8707
9161
  clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added
8708
9162
  to the job.
8709
9163
  :param tasks: List[:class:`Task`] (optional)
8710
- A list of task specifications to be executed by this job. If more than 100 tasks are available, you
8711
- can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root
8712
- to determine if more results are available.
9164
+ A list of task specifications to be executed by this job. It supports up to 1000 elements in write
9165
+ endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read
9166
+ endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them
9167
+ using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more
9168
+ results are available.
8713
9169
  :param timeout_seconds: int (optional)
8714
9170
  An optional timeout applied to each run of this job. A value of `0` means no timeout.
8715
9171
  :param trigger: :class:`TriggerSettings` (optional)
@@ -8848,16 +9304,18 @@ class JobsAPI:
8848
9304
 
8849
9305
  Retrieves the details for a single job.
8850
9306
 
8851
- In Jobs API 2.2, requests for a single job support pagination of `tasks` and `job_clusters` when
8852
- either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
8853
- value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
8854
- be empty on later pages.
9307
+ Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9308
+ job will return all properties for that job, and the first 100 elements of array properties (`tasks`,
9309
+ `job_clusters`, `environments` and `parameters`). Use the `next_page_token` field to check for more
9310
+ results and pass its value as the `page_token` in subsequent requests. If any array properties have
9311
+ more than 100 elements, additional results will be returned on subsequent requests. Arrays without
9312
+ additional results will be empty on later pages.
8855
9313
 
8856
9314
  :param job_id: int
8857
9315
  The canonical identifier of the job to retrieve information about. This field is required.
8858
9316
  :param page_token: str (optional)
8859
- Use `next_page_token` returned from the previous GetJob to request the next page of the job's
8860
- sub-resources.
9317
+ Use `next_page_token` returned from the previous GetJob response to request the next page of the
9318
+ job's array properties.
8861
9319
 
8862
9320
  :returns: :class:`Job`
8863
9321
  """
@@ -8922,10 +9380,12 @@ class JobsAPI:
8922
9380
 
8923
9381
  Retrieves the metadata of a run.
8924
9382
 
8925
- In Jobs API 2.2, requests for a single job run support pagination of `tasks` and `job_clusters` when
8926
- either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
8927
- value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
8928
- be empty on later pages.
9383
+ Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9384
+ run will return all properties for that run, and the first 100 elements of array properties (`tasks`,
9385
+ `job_clusters`, `job_parameters` and `repair_history`). Use the next_page_token field to check for
9386
+ more results and pass its value as the page_token in subsequent requests. If any array properties have
9387
+ more than 100 elements, additional results will be returned on subsequent requests. Arrays without
9388
+ additional results will be empty on later pages.
8929
9389
 
8930
9390
  :param run_id: int
8931
9391
  The canonical identifier of the run for which to retrieve the metadata. This field is required.
@@ -8934,8 +9394,8 @@ class JobsAPI:
8934
9394
  :param include_resolved_values: bool (optional)
8935
9395
  Whether to include resolved parameter values in the response.
8936
9396
  :param page_token: str (optional)
8937
- Use `next_page_token` returned from the previous GetRun to request the next page of the run's
8938
- sub-resources.
9397
+ Use `next_page_token` returned from the previous GetRun response to request the next page of the
9398
+ run's array properties.
8939
9399
 
8940
9400
  :returns: :class:`Run`
8941
9401
  """
@@ -8998,8 +9458,8 @@ class JobsAPI:
8998
9458
  Retrieves a list of jobs.
8999
9459
 
9000
9460
  :param expand_tasks: bool (optional)
9001
- Whether to include task and cluster details in the response. Note that in API 2.2, only the first
9002
- 100 elements will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
9461
+ Whether to include task and cluster details in the response. Note that only the first 100 elements
9462
+ will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
9003
9463
  :param limit: int (optional)
9004
9464
  The number of jobs to return. This value must be greater than 0 and less or equal to 100. The
9005
9465
  default value is 20.
@@ -9065,8 +9525,8 @@ class JobsAPI:
9065
9525
  If completed_only is `true`, only completed runs are included in the results; otherwise, lists both
9066
9526
  active and completed runs. This field cannot be `true` when active_only is `true`.
9067
9527
  :param expand_tasks: bool (optional)
9068
- Whether to include task and cluster details in the response. Note that in API 2.2, only the first
9069
- 100 elements will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
9528
+ Whether to include task and cluster details in the response. Note that only the first 100 elements
9529
+ will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
9070
9530
  :param job_id: int (optional)
9071
9531
  The job for which to list runs. If omitted, the Jobs service lists runs from all jobs.
9072
9532
  :param limit: int (optional)
@@ -9408,9 +9868,13 @@ class JobsAPI:
9408
9868
  A list of task keys to run inside of the job. If this field is not provided, all tasks in the job
9409
9869
  will be run.
9410
9870
  :param performance_target: :class:`PerformanceTarget` (optional)
9411
- PerformanceTarget defines how performant or cost efficient the execution of run on serverless
9412
- compute should be. For RunNow, this performance target will override the target defined on the
9413
- job-level.
9871
+ The performance mode on a serverless job. The performance target determines the level of compute
9872
+ performance or cost-efficiency for the run. This field overrides the performance target defined on
9873
+ the job level.
9874
+
9875
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
9876
+ Prioritizes fast startup and execution times through rapid scaling and optimized cluster
9877
+ performance.
9414
9878
  :param pipeline_params: :class:`PipelineParams` (optional)
9415
9879
  Controls whether the pipeline should perform a full refresh
9416
9880
  :param python_named_params: Dict[str,str] (optional)