databricks-sdk 0.48.0__py3-none-any.whl → 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -21,6 +21,12 @@ from databricks.sdk.service import compute
21
21
  # all definitions in this file are in alphabetical order
22
22
 
23
23
 
24
+ class AuthenticationMethod(Enum):
25
+
26
+ OAUTH = "OAUTH"
27
+ PAT = "PAT"
28
+
29
+
24
30
  @dataclass
25
31
  class BaseJob:
26
32
  created_time: Optional[int] = None
@@ -37,9 +43,9 @@ class BaseJob:
37
43
  on accessible budget policies of the run_as identity on job creation or modification."""
38
44
 
39
45
  has_more: Optional[bool] = None
40
- """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
41
- can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
42
- requests with `expand_tasks=true`."""
46
+ """Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
47
+ They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
48
+ :method:jobs/list requests with `expand_tasks=true`."""
43
49
 
44
50
  job_id: Optional[int] = None
45
51
  """The canonical identifier for this job."""
@@ -125,9 +131,13 @@ class BaseRun:
125
131
  """Description of the run"""
126
132
 
127
133
  effective_performance_target: Optional[PerformanceTarget] = None
128
- """effective_performance_target is the actual performance target used by the run during execution.
129
- effective_performance_target can differ from the client-set performance_target depending on if
130
- the job was eligible to be cost-optimized."""
134
+ """The actual performance target used by the serverless run during execution. This can differ from
135
+ the client-set performance target on the request depending on whether the performance mode is
136
+ supported by the job type.
137
+
138
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
139
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
140
+ optimized cluster performance."""
131
141
 
132
142
  end_time: Optional[int] = None
133
143
  """The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
@@ -151,8 +161,8 @@ class BaseRun:
151
161
  are used, `git_source` must be defined on the job."""
152
162
 
153
163
  has_more: Optional[bool] = None
154
- """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
155
- can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
164
+ """Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
165
+ They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
156
166
  :method:jobs/listruns requests with `expand_tasks=true`."""
157
167
 
158
168
  job_clusters: Optional[List[JobCluster]] = None
@@ -793,8 +803,6 @@ class ClusterSpec:
793
803
 
794
804
  @dataclass
795
805
  class ComputeConfig:
796
- """Next field: 4"""
797
-
798
806
  num_gpus: int
799
807
  """Number of GPUs."""
800
808
 
@@ -992,8 +1000,7 @@ class CreateJob:
992
1000
  job_clusters: Optional[List[JobCluster]] = None
993
1001
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
994
1002
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
995
- task settings. If more than 100 job clusters are available, you can paginate through them using
996
- :method:jobs/get."""
1003
+ task settings."""
997
1004
 
998
1005
  max_concurrent_runs: Optional[int] = None
999
1006
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -1016,8 +1023,12 @@ class CreateJob:
1016
1023
  """Job-level parameter definitions"""
1017
1024
 
1018
1025
  performance_target: Optional[PerformanceTarget] = None
1019
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
1020
- should be."""
1026
+ """The performance mode on a serverless job. The performance target determines the level of compute
1027
+ performance or cost-efficiency for the run.
1028
+
1029
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
1030
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
1031
+ optimized cluster performance."""
1021
1032
 
1022
1033
  queue: Optional[QueueSettings] = None
1023
1034
  """The queue settings of the job."""
@@ -1038,9 +1049,11 @@ class CreateJob:
1038
1049
  be added to the job."""
1039
1050
 
1040
1051
  tasks: Optional[List[Task]] = None
1041
- """A list of task specifications to be executed by this job. If more than 100 tasks are available,
1042
- you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
1043
- object root to determine if more results are available."""
1052
+ """A list of task specifications to be executed by this job. It supports up to 1000 elements in
1053
+ write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
1054
+ :method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
1055
+ available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
1056
+ at the object root to determine if more results are available."""
1044
1057
 
1045
1058
  timeout_seconds: Optional[int] = None
1046
1059
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -1271,6 +1284,107 @@ class CronSchedule:
1271
1284
  )
1272
1285
 
1273
1286
 
1287
+ @dataclass
1288
+ class DashboardPageSnapshot:
1289
+ page_display_name: Optional[str] = None
1290
+
1291
+ widget_error_details: Optional[List[WidgetErrorDetail]] = None
1292
+
1293
+ def as_dict(self) -> dict:
1294
+ """Serializes the DashboardPageSnapshot into a dictionary suitable for use as a JSON request body."""
1295
+ body = {}
1296
+ if self.page_display_name is not None:
1297
+ body["page_display_name"] = self.page_display_name
1298
+ if self.widget_error_details:
1299
+ body["widget_error_details"] = [v.as_dict() for v in self.widget_error_details]
1300
+ return body
1301
+
1302
+ def as_shallow_dict(self) -> dict:
1303
+ """Serializes the DashboardPageSnapshot into a shallow dictionary of its immediate attributes."""
1304
+ body = {}
1305
+ if self.page_display_name is not None:
1306
+ body["page_display_name"] = self.page_display_name
1307
+ if self.widget_error_details:
1308
+ body["widget_error_details"] = self.widget_error_details
1309
+ return body
1310
+
1311
+ @classmethod
1312
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardPageSnapshot:
1313
+ """Deserializes the DashboardPageSnapshot from a dictionary."""
1314
+ return cls(
1315
+ page_display_name=d.get("page_display_name", None),
1316
+ widget_error_details=_repeated_dict(d, "widget_error_details", WidgetErrorDetail),
1317
+ )
1318
+
1319
+
1320
+ @dataclass
1321
+ class DashboardTask:
1322
+ """Configures the Lakeview Dashboard job task type."""
1323
+
1324
+ dashboard_id: Optional[str] = None
1325
+
1326
+ subscription: Optional[Subscription] = None
1327
+
1328
+ warehouse_id: Optional[str] = None
1329
+ """The warehouse id to execute the dashboard with for the schedule"""
1330
+
1331
+ def as_dict(self) -> dict:
1332
+ """Serializes the DashboardTask into a dictionary suitable for use as a JSON request body."""
1333
+ body = {}
1334
+ if self.dashboard_id is not None:
1335
+ body["dashboard_id"] = self.dashboard_id
1336
+ if self.subscription:
1337
+ body["subscription"] = self.subscription.as_dict()
1338
+ if self.warehouse_id is not None:
1339
+ body["warehouse_id"] = self.warehouse_id
1340
+ return body
1341
+
1342
+ def as_shallow_dict(self) -> dict:
1343
+ """Serializes the DashboardTask into a shallow dictionary of its immediate attributes."""
1344
+ body = {}
1345
+ if self.dashboard_id is not None:
1346
+ body["dashboard_id"] = self.dashboard_id
1347
+ if self.subscription:
1348
+ body["subscription"] = self.subscription
1349
+ if self.warehouse_id is not None:
1350
+ body["warehouse_id"] = self.warehouse_id
1351
+ return body
1352
+
1353
+ @classmethod
1354
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardTask:
1355
+ """Deserializes the DashboardTask from a dictionary."""
1356
+ return cls(
1357
+ dashboard_id=d.get("dashboard_id", None),
1358
+ subscription=_from_dict(d, "subscription", Subscription),
1359
+ warehouse_id=d.get("warehouse_id", None),
1360
+ )
1361
+
1362
+
1363
+ @dataclass
1364
+ class DashboardTaskOutput:
1365
+ page_snapshots: Optional[List[DashboardPageSnapshot]] = None
1366
+ """Should only be populated for manual PDF download jobs."""
1367
+
1368
+ def as_dict(self) -> dict:
1369
+ """Serializes the DashboardTaskOutput into a dictionary suitable for use as a JSON request body."""
1370
+ body = {}
1371
+ if self.page_snapshots:
1372
+ body["page_snapshots"] = [v.as_dict() for v in self.page_snapshots]
1373
+ return body
1374
+
1375
+ def as_shallow_dict(self) -> dict:
1376
+ """Serializes the DashboardTaskOutput into a shallow dictionary of its immediate attributes."""
1377
+ body = {}
1378
+ if self.page_snapshots:
1379
+ body["page_snapshots"] = self.page_snapshots
1380
+ return body
1381
+
1382
+ @classmethod
1383
+ def from_dict(cls, d: Dict[str, Any]) -> DashboardTaskOutput:
1384
+ """Deserializes the DashboardTaskOutput from a dictionary."""
1385
+ return cls(page_snapshots=_repeated_dict(d, "page_snapshots", DashboardPageSnapshot))
1386
+
1387
+
1274
1388
  @dataclass
1275
1389
  class DbtOutput:
1276
1390
  artifacts_headers: Optional[Dict[str, str]] = None
@@ -1881,8 +1995,6 @@ class Format(Enum):
1881
1995
 
1882
1996
  @dataclass
1883
1997
  class GenAiComputeTask:
1884
- """Next field: 9"""
1885
-
1886
1998
  dl_runtime_image: str
1887
1999
  """Runtime image"""
1888
2000
 
@@ -1890,7 +2002,6 @@ class GenAiComputeTask:
1890
2002
  """Command launcher to run the actual script, e.g. bash, python etc."""
1891
2003
 
1892
2004
  compute: Optional[ComputeConfig] = None
1893
- """Next field: 4"""
1894
2005
 
1895
2006
  mlflow_experiment_name: Optional[str] = None
1896
2007
  """Optional string containing the name of the MLflow experiment to log the run to. If name is not
@@ -2183,15 +2294,15 @@ class Job:
2183
2294
  on accessible budget policies of the run_as identity on job creation or modification."""
2184
2295
 
2185
2296
  has_more: Optional[bool] = None
2186
- """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
2187
- can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
2188
- requests with `expand_tasks=true`."""
2297
+ """Indicates if the job has more array properties (`tasks`, `job_clusters`) that are not shown.
2298
+ They can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2
2299
+ :method:jobs/list requests with `expand_tasks=true`."""
2189
2300
 
2190
2301
  job_id: Optional[int] = None
2191
2302
  """The canonical identifier for this job."""
2192
2303
 
2193
2304
  next_page_token: Optional[str] = None
2194
- """A token that can be used to list the next page of sub-resources."""
2305
+ """A token that can be used to list the next page of array properties."""
2195
2306
 
2196
2307
  run_as_user_name: Optional[str] = None
2197
2308
  """The email of an active workspace user or the application ID of a service principal that the job
@@ -2977,8 +3088,7 @@ class JobSettings:
2977
3088
  job_clusters: Optional[List[JobCluster]] = None
2978
3089
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
2979
3090
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
2980
- task settings. If more than 100 job clusters are available, you can paginate through them using
2981
- :method:jobs/get."""
3091
+ task settings."""
2982
3092
 
2983
3093
  max_concurrent_runs: Optional[int] = None
2984
3094
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -3001,8 +3111,12 @@ class JobSettings:
3001
3111
  """Job-level parameter definitions"""
3002
3112
 
3003
3113
  performance_target: Optional[PerformanceTarget] = None
3004
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
3005
- should be."""
3114
+ """The performance mode on a serverless job. The performance target determines the level of compute
3115
+ performance or cost-efficiency for the run.
3116
+
3117
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
3118
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
3119
+ optimized cluster performance."""
3006
3120
 
3007
3121
  queue: Optional[QueueSettings] = None
3008
3122
  """The queue settings of the job."""
@@ -3023,9 +3137,11 @@ class JobSettings:
3023
3137
  be added to the job."""
3024
3138
 
3025
3139
  tasks: Optional[List[Task]] = None
3026
- """A list of task specifications to be executed by this job. If more than 100 tasks are available,
3027
- you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
3028
- object root to determine if more results are available."""
3140
+ """A list of task specifications to be executed by this job. It supports up to 1000 elements in
3141
+ write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update,
3142
+ :method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are
3143
+ available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field
3144
+ at the object root to determine if more results are available."""
3029
3145
 
3030
3146
  timeout_seconds: Optional[int] = None
3031
3147
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -3659,8 +3775,8 @@ class PerformanceTarget(Enum):
3659
3775
  on serverless compute should be. The performance mode on the job or pipeline should map to a
3660
3776
  performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget)."""
3661
3777
 
3662
- COST_OPTIMIZED = "COST_OPTIMIZED"
3663
3778
  PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED"
3779
+ STANDARD = "STANDARD"
3664
3780
 
3665
3781
 
3666
3782
  @dataclass
@@ -3759,6 +3875,175 @@ class PipelineTask:
3759
3875
  return cls(full_refresh=d.get("full_refresh", None), pipeline_id=d.get("pipeline_id", None))
3760
3876
 
3761
3877
 
3878
+ @dataclass
3879
+ class PowerBiModel:
3880
+ authentication_method: Optional[AuthenticationMethod] = None
3881
+ """How the published Power BI model authenticates to Databricks"""
3882
+
3883
+ model_name: Optional[str] = None
3884
+ """The name of the Power BI model"""
3885
+
3886
+ overwrite_existing: Optional[bool] = None
3887
+ """Whether to overwrite existing Power BI models"""
3888
+
3889
+ storage_mode: Optional[StorageMode] = None
3890
+ """The default storage mode of the Power BI model"""
3891
+
3892
+ workspace_name: Optional[str] = None
3893
+ """The name of the Power BI workspace of the model"""
3894
+
3895
+ def as_dict(self) -> dict:
3896
+ """Serializes the PowerBiModel into a dictionary suitable for use as a JSON request body."""
3897
+ body = {}
3898
+ if self.authentication_method is not None:
3899
+ body["authentication_method"] = self.authentication_method.value
3900
+ if self.model_name is not None:
3901
+ body["model_name"] = self.model_name
3902
+ if self.overwrite_existing is not None:
3903
+ body["overwrite_existing"] = self.overwrite_existing
3904
+ if self.storage_mode is not None:
3905
+ body["storage_mode"] = self.storage_mode.value
3906
+ if self.workspace_name is not None:
3907
+ body["workspace_name"] = self.workspace_name
3908
+ return body
3909
+
3910
+ def as_shallow_dict(self) -> dict:
3911
+ """Serializes the PowerBiModel into a shallow dictionary of its immediate attributes."""
3912
+ body = {}
3913
+ if self.authentication_method is not None:
3914
+ body["authentication_method"] = self.authentication_method
3915
+ if self.model_name is not None:
3916
+ body["model_name"] = self.model_name
3917
+ if self.overwrite_existing is not None:
3918
+ body["overwrite_existing"] = self.overwrite_existing
3919
+ if self.storage_mode is not None:
3920
+ body["storage_mode"] = self.storage_mode
3921
+ if self.workspace_name is not None:
3922
+ body["workspace_name"] = self.workspace_name
3923
+ return body
3924
+
3925
+ @classmethod
3926
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiModel:
3927
+ """Deserializes the PowerBiModel from a dictionary."""
3928
+ return cls(
3929
+ authentication_method=_enum(d, "authentication_method", AuthenticationMethod),
3930
+ model_name=d.get("model_name", None),
3931
+ overwrite_existing=d.get("overwrite_existing", None),
3932
+ storage_mode=_enum(d, "storage_mode", StorageMode),
3933
+ workspace_name=d.get("workspace_name", None),
3934
+ )
3935
+
3936
+
3937
+ @dataclass
3938
+ class PowerBiTable:
3939
+ catalog: Optional[str] = None
3940
+ """The catalog name in Databricks"""
3941
+
3942
+ name: Optional[str] = None
3943
+ """The table name in Databricks"""
3944
+
3945
+ schema: Optional[str] = None
3946
+ """The schema name in Databricks"""
3947
+
3948
+ storage_mode: Optional[StorageMode] = None
3949
+ """The Power BI storage mode of the table"""
3950
+
3951
+ def as_dict(self) -> dict:
3952
+ """Serializes the PowerBiTable into a dictionary suitable for use as a JSON request body."""
3953
+ body = {}
3954
+ if self.catalog is not None:
3955
+ body["catalog"] = self.catalog
3956
+ if self.name is not None:
3957
+ body["name"] = self.name
3958
+ if self.schema is not None:
3959
+ body["schema"] = self.schema
3960
+ if self.storage_mode is not None:
3961
+ body["storage_mode"] = self.storage_mode.value
3962
+ return body
3963
+
3964
+ def as_shallow_dict(self) -> dict:
3965
+ """Serializes the PowerBiTable into a shallow dictionary of its immediate attributes."""
3966
+ body = {}
3967
+ if self.catalog is not None:
3968
+ body["catalog"] = self.catalog
3969
+ if self.name is not None:
3970
+ body["name"] = self.name
3971
+ if self.schema is not None:
3972
+ body["schema"] = self.schema
3973
+ if self.storage_mode is not None:
3974
+ body["storage_mode"] = self.storage_mode
3975
+ return body
3976
+
3977
+ @classmethod
3978
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiTable:
3979
+ """Deserializes the PowerBiTable from a dictionary."""
3980
+ return cls(
3981
+ catalog=d.get("catalog", None),
3982
+ name=d.get("name", None),
3983
+ schema=d.get("schema", None),
3984
+ storage_mode=_enum(d, "storage_mode", StorageMode),
3985
+ )
3986
+
3987
+
3988
+ @dataclass
3989
+ class PowerBiTask:
3990
+ connection_resource_name: Optional[str] = None
3991
+ """The resource name of the UC connection to authenticate from Databricks to Power BI"""
3992
+
3993
+ power_bi_model: Optional[PowerBiModel] = None
3994
+ """The semantic model to update"""
3995
+
3996
+ refresh_after_update: Optional[bool] = None
3997
+ """Whether the model should be refreshed after the update"""
3998
+
3999
+ tables: Optional[List[PowerBiTable]] = None
4000
+ """The tables to be exported to Power BI"""
4001
+
4002
+ warehouse_id: Optional[str] = None
4003
+ """The SQL warehouse ID to use as the Power BI data source"""
4004
+
4005
+ def as_dict(self) -> dict:
4006
+ """Serializes the PowerBiTask into a dictionary suitable for use as a JSON request body."""
4007
+ body = {}
4008
+ if self.connection_resource_name is not None:
4009
+ body["connection_resource_name"] = self.connection_resource_name
4010
+ if self.power_bi_model:
4011
+ body["power_bi_model"] = self.power_bi_model.as_dict()
4012
+ if self.refresh_after_update is not None:
4013
+ body["refresh_after_update"] = self.refresh_after_update
4014
+ if self.tables:
4015
+ body["tables"] = [v.as_dict() for v in self.tables]
4016
+ if self.warehouse_id is not None:
4017
+ body["warehouse_id"] = self.warehouse_id
4018
+ return body
4019
+
4020
+ def as_shallow_dict(self) -> dict:
4021
+ """Serializes the PowerBiTask into a shallow dictionary of its immediate attributes."""
4022
+ body = {}
4023
+ if self.connection_resource_name is not None:
4024
+ body["connection_resource_name"] = self.connection_resource_name
4025
+ if self.power_bi_model:
4026
+ body["power_bi_model"] = self.power_bi_model
4027
+ if self.refresh_after_update is not None:
4028
+ body["refresh_after_update"] = self.refresh_after_update
4029
+ if self.tables:
4030
+ body["tables"] = self.tables
4031
+ if self.warehouse_id is not None:
4032
+ body["warehouse_id"] = self.warehouse_id
4033
+ return body
4034
+
4035
+ @classmethod
4036
+ def from_dict(cls, d: Dict[str, Any]) -> PowerBiTask:
4037
+ """Deserializes the PowerBiTask from a dictionary."""
4038
+ return cls(
4039
+ connection_resource_name=d.get("connection_resource_name", None),
4040
+ power_bi_model=_from_dict(d, "power_bi_model", PowerBiModel),
4041
+ refresh_after_update=d.get("refresh_after_update", None),
4042
+ tables=_repeated_dict(d, "tables", PowerBiTable),
4043
+ warehouse_id=d.get("warehouse_id", None),
4044
+ )
4045
+
4046
+
3762
4047
  @dataclass
3763
4048
  class PythonWheelTask:
3764
4049
  package_name: str
@@ -4541,9 +4826,13 @@ class Run:
4541
4826
  """Description of the run"""
4542
4827
 
4543
4828
  effective_performance_target: Optional[PerformanceTarget] = None
4544
- """effective_performance_target is the actual performance target used by the run during execution.
4545
- effective_performance_target can differ from the client-set performance_target depending on if
4546
- the job was eligible to be cost-optimized."""
4829
+ """The actual performance target used by the serverless run during execution. This can differ from
4830
+ the client-set performance target on the request depending on whether the performance mode is
4831
+ supported by the job type.
4832
+
4833
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
4834
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
4835
+ optimized cluster performance."""
4547
4836
 
4548
4837
  end_time: Optional[int] = None
4549
4838
  """The time at which this run ended in epoch milliseconds (milliseconds since 1/1/1970 UTC). This
@@ -4567,8 +4856,8 @@ class Run:
4567
4856
  are used, `git_source` must be defined on the job."""
4568
4857
 
4569
4858
  has_more: Optional[bool] = None
4570
- """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
4571
- can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
4859
+ """Indicates if the run has more array properties (`tasks`, `job_clusters`) that are not shown.
4860
+ They can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
4572
4861
  :method:jobs/listruns requests with `expand_tasks=true`."""
4573
4862
 
4574
4863
  iterations: Optional[List[RunTask]] = None
@@ -4592,7 +4881,7 @@ class Run:
4592
4881
  that the task run belongs to."""
4593
4882
 
4594
4883
  next_page_token: Optional[str] = None
4595
- """A token that can be used to list the next page of sub-resources."""
4884
+ """A token that can be used to list the next page of array properties."""
4596
4885
 
4597
4886
  number_in_job: Optional[int] = None
4598
4887
  """A unique identifier for this job run. This is set to the same value as `run_id`."""
@@ -5265,9 +5554,13 @@ class RunNow:
5265
5554
  job will be run."""
5266
5555
 
5267
5556
  performance_target: Optional[PerformanceTarget] = None
5268
- """PerformanceTarget defines how performant or cost efficient the execution of run on serverless
5269
- compute should be. For RunNow, this performance target will override the target defined on the
5270
- job-level."""
5557
+ """The performance mode on a serverless job. The performance target determines the level of compute
5558
+ performance or cost-efficiency for the run. This field overrides the performance target defined
5559
+ on the job level.
5560
+
5561
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
5562
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
5563
+ optimized cluster performance."""
5271
5564
 
5272
5565
  pipeline_params: Optional[PipelineParams] = None
5273
5566
  """Controls whether the pipeline should perform a full refresh"""
@@ -5442,6 +5735,9 @@ class RunOutput:
5442
5735
  clean_rooms_notebook_output: Optional[CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput] = None
5443
5736
  """The output of a clean rooms notebook task, if available"""
5444
5737
 
5738
+ dashboard_output: Optional[DashboardTaskOutput] = None
5739
+ """The output of a dashboard task, if available"""
5740
+
5445
5741
  dbt_output: Optional[DbtOutput] = None
5446
5742
  """The output of a dbt task, if available."""
5447
5743
 
@@ -5488,6 +5784,8 @@ class RunOutput:
5488
5784
  body = {}
5489
5785
  if self.clean_rooms_notebook_output:
5490
5786
  body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output.as_dict()
5787
+ if self.dashboard_output:
5788
+ body["dashboard_output"] = self.dashboard_output.as_dict()
5491
5789
  if self.dbt_output:
5492
5790
  body["dbt_output"] = self.dbt_output.as_dict()
5493
5791
  if self.error is not None:
@@ -5515,6 +5813,8 @@ class RunOutput:
5515
5813
  body = {}
5516
5814
  if self.clean_rooms_notebook_output:
5517
5815
  body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output
5816
+ if self.dashboard_output:
5817
+ body["dashboard_output"] = self.dashboard_output
5518
5818
  if self.dbt_output:
5519
5819
  body["dbt_output"] = self.dbt_output
5520
5820
  if self.error is not None:
@@ -5544,6 +5844,7 @@ class RunOutput:
5544
5844
  clean_rooms_notebook_output=_from_dict(
5545
5845
  d, "clean_rooms_notebook_output", CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput
5546
5846
  ),
5847
+ dashboard_output=_from_dict(d, "dashboard_output", DashboardTaskOutput),
5547
5848
  dbt_output=_from_dict(d, "dbt_output", DbtOutput),
5548
5849
  error=d.get("error", None),
5549
5850
  error_trace=d.get("error_trace", None),
@@ -5859,6 +6160,9 @@ class RunTask:
5859
6160
  `condition_task` field is present. The condition task does not require a cluster to execute and
5860
6161
  does not support retries or notifications."""
5861
6162
 
6163
+ dashboard_task: Optional[DashboardTask] = None
6164
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
6165
+
5862
6166
  dbt_task: Optional[DbtTask] = None
5863
6167
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
5864
6168
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -5872,13 +6176,16 @@ class RunTask:
5872
6176
  """An optional description for this task."""
5873
6177
 
5874
6178
  disabled: Optional[bool] = None
5875
- """Denotes whether or not the task was disabled by the user. Disabled tasks do not execute and are
5876
- immediately skipped as soon as they are unblocked."""
6179
+ """Deprecated, field was never used in production."""
5877
6180
 
5878
6181
  effective_performance_target: Optional[PerformanceTarget] = None
5879
- """effective_performance_target is the actual performance target used by the run during execution.
5880
- effective_performance_target can differ from the client-set performance_target depending on if
5881
- the job was eligible to be cost-optimized."""
6182
+ """The actual performance target used by the serverless run during execution. This can differ from
6183
+ the client-set performance target on the request depending on whether the performance mode is
6184
+ supported by the job type.
6185
+
6186
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
6187
+ `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
6188
+ optimized cluster performance."""
5882
6189
 
5883
6190
  email_notifications: Optional[JobEmailNotifications] = None
5884
6191
  """An optional set of email addresses notified when the task run begins or completes. The default
@@ -5909,7 +6216,6 @@ class RunTask:
5909
6216
  present."""
5910
6217
 
5911
6218
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
5912
- """Next field: 9"""
5913
6219
 
5914
6220
  git_source: Optional[GitSource] = None
5915
6221
  """An optional specification for a remote Git repository containing the source code used by tasks.
@@ -5941,6 +6247,9 @@ class RunTask:
5941
6247
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
5942
6248
  configured to use triggered more are supported."""
5943
6249
 
6250
+ power_bi_task: Optional[PowerBiTask] = None
6251
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
6252
+
5944
6253
  python_wheel_task: Optional[PythonWheelTask] = None
5945
6254
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
5946
6255
 
@@ -6032,6 +6341,8 @@ class RunTask:
6032
6341
  body["cluster_instance"] = self.cluster_instance.as_dict()
6033
6342
  if self.condition_task:
6034
6343
  body["condition_task"] = self.condition_task.as_dict()
6344
+ if self.dashboard_task:
6345
+ body["dashboard_task"] = self.dashboard_task.as_dict()
6035
6346
  if self.dbt_task:
6036
6347
  body["dbt_task"] = self.dbt_task.as_dict()
6037
6348
  if self.depends_on:
@@ -6070,6 +6381,8 @@ class RunTask:
6070
6381
  body["notification_settings"] = self.notification_settings.as_dict()
6071
6382
  if self.pipeline_task:
6072
6383
  body["pipeline_task"] = self.pipeline_task.as_dict()
6384
+ if self.power_bi_task:
6385
+ body["power_bi_task"] = self.power_bi_task.as_dict()
6073
6386
  if self.python_wheel_task:
6074
6387
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
6075
6388
  if self.queue_duration is not None:
@@ -6123,6 +6436,8 @@ class RunTask:
6123
6436
  body["cluster_instance"] = self.cluster_instance
6124
6437
  if self.condition_task:
6125
6438
  body["condition_task"] = self.condition_task
6439
+ if self.dashboard_task:
6440
+ body["dashboard_task"] = self.dashboard_task
6126
6441
  if self.dbt_task:
6127
6442
  body["dbt_task"] = self.dbt_task
6128
6443
  if self.depends_on:
@@ -6161,6 +6476,8 @@ class RunTask:
6161
6476
  body["notification_settings"] = self.notification_settings
6162
6477
  if self.pipeline_task:
6163
6478
  body["pipeline_task"] = self.pipeline_task
6479
+ if self.power_bi_task:
6480
+ body["power_bi_task"] = self.power_bi_task
6164
6481
  if self.python_wheel_task:
6165
6482
  body["python_wheel_task"] = self.python_wheel_task
6166
6483
  if self.queue_duration is not None:
@@ -6210,6 +6527,7 @@ class RunTask:
6210
6527
  cleanup_duration=d.get("cleanup_duration", None),
6211
6528
  cluster_instance=_from_dict(d, "cluster_instance", ClusterInstance),
6212
6529
  condition_task=_from_dict(d, "condition_task", RunConditionTask),
6530
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
6213
6531
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
6214
6532
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
6215
6533
  description=d.get("description", None),
@@ -6229,6 +6547,7 @@ class RunTask:
6229
6547
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
6230
6548
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
6231
6549
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
6550
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
6232
6551
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
6233
6552
  queue_duration=d.get("queue_duration", None),
6234
6553
  resolved_values=_from_dict(d, "resolved_values", ResolvedValues),
@@ -7027,6 +7346,13 @@ class SqlTaskSubscription:
7027
7346
  return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
7028
7347
 
7029
7348
 
7349
+ class StorageMode(Enum):
7350
+
7351
+ DIRECT_QUERY = "DIRECT_QUERY"
7352
+ DUAL = "DUAL"
7353
+ IMPORT = "IMPORT"
7354
+
7355
+
7030
7356
  @dataclass
7031
7357
  class SubmitRun:
7032
7358
  access_control_list: Optional[List[JobAccessControlRequest]] = None
@@ -7222,6 +7548,9 @@ class SubmitTask:
7222
7548
  `condition_task` field is present. The condition task does not require a cluster to execute and
7223
7549
  does not support retries or notifications."""
7224
7550
 
7551
+ dashboard_task: Optional[DashboardTask] = None
7552
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
7553
+
7225
7554
  dbt_task: Optional[DbtTask] = None
7226
7555
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
7227
7556
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -7252,7 +7581,6 @@ class SubmitTask:
7252
7581
  present."""
7253
7582
 
7254
7583
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
7255
- """Next field: 9"""
7256
7584
 
7257
7585
  health: Optional[JobsHealthRules] = None
7258
7586
  """An optional set of health rules that can be defined for this job."""
@@ -7275,6 +7603,9 @@ class SubmitTask:
7275
7603
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
7276
7604
  configured to use triggered more are supported."""
7277
7605
 
7606
+ power_bi_task: Optional[PowerBiTask] = None
7607
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
7608
+
7278
7609
  python_wheel_task: Optional[PythonWheelTask] = None
7279
7610
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
7280
7611
 
@@ -7328,6 +7659,8 @@ class SubmitTask:
7328
7659
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
7329
7660
  if self.condition_task:
7330
7661
  body["condition_task"] = self.condition_task.as_dict()
7662
+ if self.dashboard_task:
7663
+ body["dashboard_task"] = self.dashboard_task.as_dict()
7331
7664
  if self.dbt_task:
7332
7665
  body["dbt_task"] = self.dbt_task.as_dict()
7333
7666
  if self.depends_on:
@@ -7356,6 +7689,8 @@ class SubmitTask:
7356
7689
  body["notification_settings"] = self.notification_settings.as_dict()
7357
7690
  if self.pipeline_task:
7358
7691
  body["pipeline_task"] = self.pipeline_task.as_dict()
7692
+ if self.power_bi_task:
7693
+ body["power_bi_task"] = self.power_bi_task.as_dict()
7359
7694
  if self.python_wheel_task:
7360
7695
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
7361
7696
  if self.run_if is not None:
@@ -7385,6 +7720,8 @@ class SubmitTask:
7385
7720
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
7386
7721
  if self.condition_task:
7387
7722
  body["condition_task"] = self.condition_task
7723
+ if self.dashboard_task:
7724
+ body["dashboard_task"] = self.dashboard_task
7388
7725
  if self.dbt_task:
7389
7726
  body["dbt_task"] = self.dbt_task
7390
7727
  if self.depends_on:
@@ -7413,6 +7750,8 @@ class SubmitTask:
7413
7750
  body["notification_settings"] = self.notification_settings
7414
7751
  if self.pipeline_task:
7415
7752
  body["pipeline_task"] = self.pipeline_task
7753
+ if self.power_bi_task:
7754
+ body["power_bi_task"] = self.power_bi_task
7416
7755
  if self.python_wheel_task:
7417
7756
  body["python_wheel_task"] = self.python_wheel_task
7418
7757
  if self.run_if is not None:
@@ -7441,6 +7780,7 @@ class SubmitTask:
7441
7780
  return cls(
7442
7781
  clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
7443
7782
  condition_task=_from_dict(d, "condition_task", ConditionTask),
7783
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
7444
7784
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
7445
7785
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
7446
7786
  description=d.get("description", None),
@@ -7455,6 +7795,7 @@ class SubmitTask:
7455
7795
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
7456
7796
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
7457
7797
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
7798
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
7458
7799
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
7459
7800
  run_if=_enum(d, "run_if", RunIf),
7460
7801
  run_job_task=_from_dict(d, "run_job_task", RunJobTask),
@@ -7468,6 +7809,78 @@ class SubmitTask:
7468
7809
  )
7469
7810
 
7470
7811
 
7812
+ @dataclass
7813
+ class Subscription:
7814
+ custom_subject: Optional[str] = None
7815
+ """Optional: Allows users to specify a custom subject line on the email sent to subscribers."""
7816
+
7817
+ paused: Optional[bool] = None
7818
+ """When true, the subscription will not send emails."""
7819
+
7820
+ subscribers: Optional[List[SubscriptionSubscriber]] = None
7821
+
7822
+ def as_dict(self) -> dict:
7823
+ """Serializes the Subscription into a dictionary suitable for use as a JSON request body."""
7824
+ body = {}
7825
+ if self.custom_subject is not None:
7826
+ body["custom_subject"] = self.custom_subject
7827
+ if self.paused is not None:
7828
+ body["paused"] = self.paused
7829
+ if self.subscribers:
7830
+ body["subscribers"] = [v.as_dict() for v in self.subscribers]
7831
+ return body
7832
+
7833
+ def as_shallow_dict(self) -> dict:
7834
+ """Serializes the Subscription into a shallow dictionary of its immediate attributes."""
7835
+ body = {}
7836
+ if self.custom_subject is not None:
7837
+ body["custom_subject"] = self.custom_subject
7838
+ if self.paused is not None:
7839
+ body["paused"] = self.paused
7840
+ if self.subscribers:
7841
+ body["subscribers"] = self.subscribers
7842
+ return body
7843
+
7844
+ @classmethod
7845
+ def from_dict(cls, d: Dict[str, Any]) -> Subscription:
7846
+ """Deserializes the Subscription from a dictionary."""
7847
+ return cls(
7848
+ custom_subject=d.get("custom_subject", None),
7849
+ paused=d.get("paused", None),
7850
+ subscribers=_repeated_dict(d, "subscribers", SubscriptionSubscriber),
7851
+ )
7852
+
7853
+
7854
+ @dataclass
7855
+ class SubscriptionSubscriber:
7856
+ destination_id: Optional[str] = None
7857
+
7858
+ user_name: Optional[str] = None
7859
+
7860
+ def as_dict(self) -> dict:
7861
+ """Serializes the SubscriptionSubscriber into a dictionary suitable for use as a JSON request body."""
7862
+ body = {}
7863
+ if self.destination_id is not None:
7864
+ body["destination_id"] = self.destination_id
7865
+ if self.user_name is not None:
7866
+ body["user_name"] = self.user_name
7867
+ return body
7868
+
7869
+ def as_shallow_dict(self) -> dict:
7870
+ """Serializes the SubscriptionSubscriber into a shallow dictionary of its immediate attributes."""
7871
+ body = {}
7872
+ if self.destination_id is not None:
7873
+ body["destination_id"] = self.destination_id
7874
+ if self.user_name is not None:
7875
+ body["user_name"] = self.user_name
7876
+ return body
7877
+
7878
+ @classmethod
7879
+ def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriber:
7880
+ """Deserializes the SubscriptionSubscriber from a dictionary."""
7881
+ return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None))
7882
+
7883
+
7471
7884
  @dataclass
7472
7885
  class TableUpdateTriggerConfiguration:
7473
7886
  condition: Optional[Condition] = None
@@ -7540,6 +7953,9 @@ class Task:
7540
7953
  `condition_task` field is present. The condition task does not require a cluster to execute and
7541
7954
  does not support retries or notifications."""
7542
7955
 
7956
+ dashboard_task: Optional[DashboardTask] = None
7957
+ """The task runs a DashboardTask when the `dashboard_task` field is present."""
7958
+
7543
7959
  dbt_task: Optional[DbtTask] = None
7544
7960
  """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task
7545
7961
  requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse."""
@@ -7574,7 +7990,6 @@ class Task:
7574
7990
  present."""
7575
7991
 
7576
7992
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
7577
- """Next field: 9"""
7578
7993
 
7579
7994
  health: Optional[JobsHealthRules] = None
7580
7995
  """An optional set of health rules that can be defined for this job."""
@@ -7611,6 +8026,9 @@ class Task:
7611
8026
  """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines
7612
8027
  configured to use triggered more are supported."""
7613
8028
 
8029
+ power_bi_task: Optional[PowerBiTask] = None
8030
+ """The task triggers a Power BI semantic model update when the `power_bi_task` field is present."""
8031
+
7614
8032
  python_wheel_task: Optional[PythonWheelTask] = None
7615
8033
  """The task runs a Python wheel when the `python_wheel_task` field is present."""
7616
8034
 
@@ -7671,6 +8089,8 @@ class Task:
7671
8089
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task.as_dict()
7672
8090
  if self.condition_task:
7673
8091
  body["condition_task"] = self.condition_task.as_dict()
8092
+ if self.dashboard_task:
8093
+ body["dashboard_task"] = self.dashboard_task.as_dict()
7674
8094
  if self.dbt_task:
7675
8095
  body["dbt_task"] = self.dbt_task.as_dict()
7676
8096
  if self.depends_on:
@@ -7707,6 +8127,8 @@ class Task:
7707
8127
  body["notification_settings"] = self.notification_settings.as_dict()
7708
8128
  if self.pipeline_task:
7709
8129
  body["pipeline_task"] = self.pipeline_task.as_dict()
8130
+ if self.power_bi_task:
8131
+ body["power_bi_task"] = self.power_bi_task.as_dict()
7710
8132
  if self.python_wheel_task:
7711
8133
  body["python_wheel_task"] = self.python_wheel_task.as_dict()
7712
8134
  if self.retry_on_timeout is not None:
@@ -7738,6 +8160,8 @@ class Task:
7738
8160
  body["clean_rooms_notebook_task"] = self.clean_rooms_notebook_task
7739
8161
  if self.condition_task:
7740
8162
  body["condition_task"] = self.condition_task
8163
+ if self.dashboard_task:
8164
+ body["dashboard_task"] = self.dashboard_task
7741
8165
  if self.dbt_task:
7742
8166
  body["dbt_task"] = self.dbt_task
7743
8167
  if self.depends_on:
@@ -7774,6 +8198,8 @@ class Task:
7774
8198
  body["notification_settings"] = self.notification_settings
7775
8199
  if self.pipeline_task:
7776
8200
  body["pipeline_task"] = self.pipeline_task
8201
+ if self.power_bi_task:
8202
+ body["power_bi_task"] = self.power_bi_task
7777
8203
  if self.python_wheel_task:
7778
8204
  body["python_wheel_task"] = self.python_wheel_task
7779
8205
  if self.retry_on_timeout is not None:
@@ -7804,6 +8230,7 @@ class Task:
7804
8230
  return cls(
7805
8231
  clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask),
7806
8232
  condition_task=_from_dict(d, "condition_task", ConditionTask),
8233
+ dashboard_task=_from_dict(d, "dashboard_task", DashboardTask),
7807
8234
  dbt_task=_from_dict(d, "dbt_task", DbtTask),
7808
8235
  depends_on=_repeated_dict(d, "depends_on", TaskDependency),
7809
8236
  description=d.get("description", None),
@@ -7822,6 +8249,7 @@ class Task:
7822
8249
  notebook_task=_from_dict(d, "notebook_task", NotebookTask),
7823
8250
  notification_settings=_from_dict(d, "notification_settings", TaskNotificationSettings),
7824
8251
  pipeline_task=_from_dict(d, "pipeline_task", PipelineTask),
8252
+ power_bi_task=_from_dict(d, "power_bi_task", PowerBiTask),
7825
8253
  python_wheel_task=_from_dict(d, "python_wheel_task", PythonWheelTask),
7826
8254
  retry_on_timeout=d.get("retry_on_timeout", None),
7827
8255
  run_if=_enum(d, "run_if", RunIf),
@@ -8486,6 +8914,30 @@ class WebhookNotifications:
8486
8914
  )
8487
8915
 
8488
8916
 
8917
+ @dataclass
8918
+ class WidgetErrorDetail:
8919
+ message: Optional[str] = None
8920
+
8921
+ def as_dict(self) -> dict:
8922
+ """Serializes the WidgetErrorDetail into a dictionary suitable for use as a JSON request body."""
8923
+ body = {}
8924
+ if self.message is not None:
8925
+ body["message"] = self.message
8926
+ return body
8927
+
8928
+ def as_shallow_dict(self) -> dict:
8929
+ """Serializes the WidgetErrorDetail into a shallow dictionary of its immediate attributes."""
8930
+ body = {}
8931
+ if self.message is not None:
8932
+ body["message"] = self.message
8933
+ return body
8934
+
8935
+ @classmethod
8936
+ def from_dict(cls, d: Dict[str, Any]) -> WidgetErrorDetail:
8937
+ """Deserializes the WidgetErrorDetail from a dictionary."""
8938
+ return cls(message=d.get("message", None))
8939
+
8940
+
8489
8941
  class JobsAPI:
8490
8942
  """The Jobs API allows you to create, edit, and delete jobs.
8491
8943
 
@@ -8671,7 +9123,6 @@ class JobsAPI:
8671
9123
  :param job_clusters: List[:class:`JobCluster`] (optional)
8672
9124
  A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
8673
9125
  cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
8674
- If more than 100 job clusters are available, you can paginate through them using :method:jobs/get.
8675
9126
  :param max_concurrent_runs: int (optional)
8676
9127
  An optional maximum allowed number of concurrent runs of the job. Set this value if you want to be
8677
9128
  able to execute multiple runs of the same job concurrently. This is useful for example if you
@@ -8689,8 +9140,12 @@ class JobsAPI:
8689
9140
  :param parameters: List[:class:`JobParameterDefinition`] (optional)
8690
9141
  Job-level parameter definitions
8691
9142
  :param performance_target: :class:`PerformanceTarget` (optional)
8692
- PerformanceTarget defines how performant or cost efficient the execution of run on serverless should
8693
- be.
9143
+ The performance mode on a serverless job. The performance target determines the level of compute
9144
+ performance or cost-efficiency for the run.
9145
+
9146
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
9147
+ Prioritizes fast startup and execution times through rapid scaling and optimized cluster
9148
+ performance.
8694
9149
  :param queue: :class:`QueueSettings` (optional)
8695
9150
  The queue settings of the job.
8696
9151
  :param run_as: :class:`JobRunAs` (optional)
@@ -8706,9 +9161,11 @@ class JobsAPI:
8706
9161
  clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added
8707
9162
  to the job.
8708
9163
  :param tasks: List[:class:`Task`] (optional)
8709
- A list of task specifications to be executed by this job. If more than 100 tasks are available, you
8710
- can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root
8711
- to determine if more results are available.
9164
+ A list of task specifications to be executed by this job. It supports up to 1000 elements in write
9165
+ endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read
9166
+ endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them
9167
+ using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more
9168
+ results are available.
8712
9169
  :param timeout_seconds: int (optional)
8713
9170
  An optional timeout applied to each run of this job. A value of `0` means no timeout.
8714
9171
  :param trigger: :class:`TriggerSettings` (optional)
@@ -8847,16 +9304,18 @@ class JobsAPI:
8847
9304
 
8848
9305
  Retrieves the details for a single job.
8849
9306
 
8850
- In Jobs API 2.2, requests for a single job support pagination of `tasks` and `job_clusters` when
8851
- either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
8852
- value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
8853
- be empty on later pages.
9307
+ Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9308
+ job will return all properties for that job, and the first 100 elements of array properties (`tasks`,
9309
+ `job_clusters`, `environments` and `parameters`). Use the `next_page_token` field to check for more
9310
+ results and pass its value as the `page_token` in subsequent requests. If any array properties have
9311
+ more than 100 elements, additional results will be returned on subsequent requests. Arrays without
9312
+ additional results will be empty on later pages.
8854
9313
 
8855
9314
  :param job_id: int
8856
9315
  The canonical identifier of the job to retrieve information about. This field is required.
8857
9316
  :param page_token: str (optional)
8858
- Use `next_page_token` returned from the previous GetJob to request the next page of the job's
8859
- sub-resources.
9317
+ Use `next_page_token` returned from the previous GetJob response to request the next page of the
9318
+ job's array properties.
8860
9319
 
8861
9320
  :returns: :class:`Job`
8862
9321
  """
@@ -8921,10 +9380,12 @@ class JobsAPI:
8921
9380
 
8922
9381
  Retrieves the metadata of a run.
8923
9382
 
8924
- In Jobs API 2.2, requests for a single job run support pagination of `tasks` and `job_clusters` when
8925
- either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
8926
- value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
8927
- be empty on later pages.
9383
+ Large arrays in the results will be paginated when they exceed 100 elements. A request for a single
9384
+ run will return all properties for that run, and the first 100 elements of array properties (`tasks`,
9385
+ `job_clusters`, `job_parameters` and `repair_history`). Use the next_page_token field to check for
9386
+ more results and pass its value as the page_token in subsequent requests. If any array properties have
9387
+ more than 100 elements, additional results will be returned on subsequent requests. Arrays without
9388
+ additional results will be empty on later pages.
8928
9389
 
8929
9390
  :param run_id: int
8930
9391
  The canonical identifier of the run for which to retrieve the metadata. This field is required.
@@ -8933,8 +9394,8 @@ class JobsAPI:
8933
9394
  :param include_resolved_values: bool (optional)
8934
9395
  Whether to include resolved parameter values in the response.
8935
9396
  :param page_token: str (optional)
8936
- Use `next_page_token` returned from the previous GetRun to request the next page of the run's
8937
- sub-resources.
9397
+ Use `next_page_token` returned from the previous GetRun response to request the next page of the
9398
+ run's array properties.
8938
9399
 
8939
9400
  :returns: :class:`Run`
8940
9401
  """
@@ -8997,8 +9458,8 @@ class JobsAPI:
8997
9458
  Retrieves a list of jobs.
8998
9459
 
8999
9460
  :param expand_tasks: bool (optional)
9000
- Whether to include task and cluster details in the response. Note that in API 2.2, only the first
9001
- 100 elements will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
9461
+ Whether to include task and cluster details in the response. Note that only the first 100 elements
9462
+ will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
9002
9463
  :param limit: int (optional)
9003
9464
  The number of jobs to return. This value must be greater than 0 and less or equal to 100. The
9004
9465
  default value is 20.
@@ -9064,8 +9525,8 @@ class JobsAPI:
9064
9525
  If completed_only is `true`, only completed runs are included in the results; otherwise, lists both
9065
9526
  active and completed runs. This field cannot be `true` when active_only is `true`.
9066
9527
  :param expand_tasks: bool (optional)
9067
- Whether to include task and cluster details in the response. Note that in API 2.2, only the first
9068
- 100 elements will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
9528
+ Whether to include task and cluster details in the response. Note that only the first 100 elements
9529
+ will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
9069
9530
  :param job_id: int (optional)
9070
9531
  The job for which to list runs. If omitted, the Jobs service lists runs from all jobs.
9071
9532
  :param limit: int (optional)
@@ -9407,9 +9868,13 @@ class JobsAPI:
9407
9868
  A list of task keys to run inside of the job. If this field is not provided, all tasks in the job
9408
9869
  will be run.
9409
9870
  :param performance_target: :class:`PerformanceTarget` (optional)
9410
- PerformanceTarget defines how performant or cost efficient the execution of run on serverless
9411
- compute should be. For RunNow, this performance target will override the target defined on the
9412
- job-level.
9871
+ The performance mode on a serverless job. The performance target determines the level of compute
9872
+ performance or cost-efficiency for the run. This field overrides the performance target defined on
9873
+ the job level.
9874
+
9875
+ * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`:
9876
+ Prioritizes fast startup and execution times through rapid scaling and optimized cluster
9877
+ performance.
9413
9878
  :param pipeline_params: :class:`PipelineParams` (optional)
9414
9879
  Controls whether the pipeline should perform a full refresh
9415
9880
  :param python_named_params: Dict[str,str] (optional)