databricks-sdk 0.31.0__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -892,8 +892,8 @@ class PermissionsRequest:
892
892
 
893
893
  request_object_type: Optional[str] = None
894
894
  """The type of the request object. Can be one of the following: alerts, authorization, clusters,
895
- cluster-policies, dbsql-dashboards, directories, experiments, files, instance-pools, jobs,
896
- notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses."""
895
+ cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools,
896
+ jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses."""
897
897
 
898
898
  def as_dict(self) -> dict:
899
899
  """Serializes the PermissionsRequest into a dictionary suitable for use as a JSON request body."""
@@ -2600,8 +2600,8 @@ class PermissionsAPI:
2600
2600
 
2601
2601
  :param request_object_type: str
2602
2602
  The type of the request object. Can be one of the following: alerts, authorization, clusters,
2603
- cluster-policies, dbsql-dashboards, directories, experiments, files, instance-pools, jobs,
2604
- notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2603
+ cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools,
2604
+ jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2605
2605
  :param request_object_id: str
2606
2606
  The id of the request object.
2607
2607
 
@@ -2648,8 +2648,8 @@ class PermissionsAPI:
2648
2648
 
2649
2649
  :param request_object_type: str
2650
2650
  The type of the request object. Can be one of the following: alerts, authorization, clusters,
2651
- cluster-policies, dbsql-dashboards, directories, experiments, files, instance-pools, jobs,
2652
- notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2651
+ cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools,
2652
+ jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2653
2653
  :param request_object_id: str
2654
2654
  The id of the request object.
2655
2655
  :param access_control_list: List[:class:`AccessControlRequest`] (optional)
@@ -2679,8 +2679,8 @@ class PermissionsAPI:
2679
2679
 
2680
2680
  :param request_object_type: str
2681
2681
  The type of the request object. Can be one of the following: alerts, authorization, clusters,
2682
- cluster-policies, dbsql-dashboards, directories, experiments, files, instance-pools, jobs,
2683
- notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2682
+ cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools,
2683
+ jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses.
2684
2684
  :param request_object_id: str
2685
2685
  The id of the request object.
2686
2686
  :param access_control_list: List[:class:`AccessControlRequest`] (optional)
@@ -171,7 +171,10 @@ class BaseRun:
171
171
  scheduled to run on a new cluster, this is the time the cluster creation call is issued."""
172
172
 
173
173
  state: Optional[RunState] = None
174
- """The current state of the run."""
174
+ """Deprecated. Please use the `status` field instead."""
175
+
176
+ status: Optional[RunStatus] = None
177
+ """The current status of the run"""
175
178
 
176
179
  tasks: Optional[List[RunTask]] = None
177
180
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -222,6 +225,7 @@ class BaseRun:
222
225
  if self.setup_duration is not None: body['setup_duration'] = self.setup_duration
223
226
  if self.start_time is not None: body['start_time'] = self.start_time
224
227
  if self.state: body['state'] = self.state.as_dict()
228
+ if self.status: body['status'] = self.status.as_dict()
225
229
  if self.tasks: body['tasks'] = [v.as_dict() for v in self.tasks]
226
230
  if self.trigger is not None: body['trigger'] = self.trigger.value
227
231
  if self.trigger_info: body['trigger_info'] = self.trigger_info.as_dict()
@@ -257,6 +261,7 @@ class BaseRun:
257
261
  setup_duration=d.get('setup_duration', None),
258
262
  start_time=d.get('start_time', None),
259
263
  state=_from_dict(d, 'state', RunState),
264
+ status=_from_dict(d, 'status', RunStatus),
260
265
  tasks=_repeated_dict(d, 'tasks', RunTask),
261
266
  trigger=_enum(d, 'trigger', TriggerType),
262
267
  trigger_info=_from_dict(d, 'trigger_info', TriggerInfo))
@@ -2314,6 +2319,44 @@ class PythonWheelTask:
2314
2319
  parameters=d.get('parameters', None))
2315
2320
 
2316
2321
 
2322
+ @dataclass
2323
+ class QueueDetails:
2324
+ code: Optional[QueueDetailsCodeCode] = None
2325
+ """The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to
2326
+ reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was
2327
+ queued due to reaching the per-job limit of concurrent job runs. *
2328
+ `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of
2329
+ active run job tasks."""
2330
+
2331
+ message: Optional[str] = None
2332
+ """A descriptive message with the queuing details. This field is unstructured, and its exact format
2333
+ is subject to change."""
2334
+
2335
+ def as_dict(self) -> dict:
2336
+ """Serializes the QueueDetails into a dictionary suitable for use as a JSON request body."""
2337
+ body = {}
2338
+ if self.code is not None: body['code'] = self.code.value
2339
+ if self.message is not None: body['message'] = self.message
2340
+ return body
2341
+
2342
+ @classmethod
2343
+ def from_dict(cls, d: Dict[str, any]) -> QueueDetails:
2344
+ """Deserializes the QueueDetails from a dictionary."""
2345
+ return cls(code=_enum(d, 'code', QueueDetailsCodeCode), message=d.get('message', None))
2346
+
2347
+
2348
+ class QueueDetailsCodeCode(Enum):
2349
+ """The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to
2350
+ reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was
2351
+ queued due to reaching the per-job limit of concurrent job runs. *
2352
+ `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of
2353
+ active run job tasks."""
2354
+
2355
+ ACTIVE_RUNS_LIMIT_REACHED = 'ACTIVE_RUNS_LIMIT_REACHED'
2356
+ ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED = 'ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED'
2357
+ MAX_CONCURRENT_RUNS_REACHED = 'MAX_CONCURRENT_RUNS_REACHED'
2358
+
2359
+
2317
2360
  @dataclass
2318
2361
  class QueueSettings:
2319
2362
  enabled: bool
@@ -2343,7 +2386,10 @@ class RepairHistoryItem:
2343
2386
  """The start time of the (repaired) run."""
2344
2387
 
2345
2388
  state: Optional[RunState] = None
2346
- """The current state of the run."""
2389
+ """Deprecated. Please use the `status` field instead."""
2390
+
2391
+ status: Optional[RunStatus] = None
2392
+ """The current status of the run"""
2347
2393
 
2348
2394
  task_run_ids: Optional[List[int]] = None
2349
2395
  """The run IDs of the task runs that ran as part of this repair history item."""
@@ -2358,6 +2404,7 @@ class RepairHistoryItem:
2358
2404
  if self.id is not None: body['id'] = self.id
2359
2405
  if self.start_time is not None: body['start_time'] = self.start_time
2360
2406
  if self.state: body['state'] = self.state.as_dict()
2407
+ if self.status: body['status'] = self.status.as_dict()
2361
2408
  if self.task_run_ids: body['task_run_ids'] = [v for v in self.task_run_ids]
2362
2409
  if self.type is not None: body['type'] = self.type.value
2363
2410
  return body
@@ -2369,6 +2416,7 @@ class RepairHistoryItem:
2369
2416
  id=d.get('id', None),
2370
2417
  start_time=d.get('start_time', None),
2371
2418
  state=_from_dict(d, 'state', RunState),
2419
+ status=_from_dict(d, 'status', RunStatus),
2372
2420
  task_run_ids=d.get('task_run_ids', None),
2373
2421
  type=_enum(d, 'type', RepairHistoryItemType))
2374
2422
 
@@ -2873,7 +2921,10 @@ class Run:
2873
2921
  scheduled to run on a new cluster, this is the time the cluster creation call is issued."""
2874
2922
 
2875
2923
  state: Optional[RunState] = None
2876
- """The current state of the run."""
2924
+ """Deprecated. Please use the `status` field instead."""
2925
+
2926
+ status: Optional[RunStatus] = None
2927
+ """The current status of the run"""
2877
2928
 
2878
2929
  tasks: Optional[List[RunTask]] = None
2879
2930
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -2927,6 +2978,7 @@ class Run:
2927
2978
  if self.setup_duration is not None: body['setup_duration'] = self.setup_duration
2928
2979
  if self.start_time is not None: body['start_time'] = self.start_time
2929
2980
  if self.state: body['state'] = self.state.as_dict()
2981
+ if self.status: body['status'] = self.status.as_dict()
2930
2982
  if self.tasks: body['tasks'] = [v.as_dict() for v in self.tasks]
2931
2983
  if self.trigger is not None: body['trigger'] = self.trigger.value
2932
2984
  if self.trigger_info: body['trigger_info'] = self.trigger_info.as_dict()
@@ -2965,6 +3017,7 @@ class Run:
2965
3017
  setup_duration=d.get('setup_duration', None),
2966
3018
  start_time=d.get('start_time', None),
2967
3019
  state=_from_dict(d, 'state', RunState),
3020
+ status=_from_dict(d, 'status', RunStatus),
2968
3021
  tasks=_repeated_dict(d, 'tasks', RunTask),
2969
3022
  trigger=_enum(d, 'trigger', TriggerType),
2970
3023
  trigger_info=_from_dict(d, 'trigger_info', TriggerInfo))
@@ -3216,6 +3269,17 @@ class RunLifeCycleState(Enum):
3216
3269
  WAITING_FOR_RETRY = 'WAITING_FOR_RETRY'
3217
3270
 
3218
3271
 
3272
+ class RunLifecycleStateV2State(Enum):
3273
+ """The current state of the run."""
3274
+
3275
+ BLOCKED = 'BLOCKED'
3276
+ PENDING = 'PENDING'
3277
+ QUEUED = 'QUEUED'
3278
+ RUNNING = 'RUNNING'
3279
+ TERMINATED = 'TERMINATED'
3280
+ TERMINATING = 'TERMINATING'
3281
+
3282
+
3219
3283
  @dataclass
3220
3284
  class RunNow:
3221
3285
  job_id: int
@@ -3609,6 +3673,36 @@ class RunState:
3609
3673
  user_cancelled_or_timedout=d.get('user_cancelled_or_timedout', None))
3610
3674
 
3611
3675
 
3676
+ @dataclass
3677
+ class RunStatus:
3678
+ """The current status of the run"""
3679
+
3680
+ queue_details: Optional[QueueDetails] = None
3681
+ """If the run was queued, details about the reason for queuing the run."""
3682
+
3683
+ state: Optional[RunLifecycleStateV2State] = None
3684
+ """The current state of the run."""
3685
+
3686
+ termination_details: Optional[TerminationDetails] = None
3687
+ """If the run is in a TERMINATING or TERMINATED state, details about the reason for terminating the
3688
+ run."""
3689
+
3690
+ def as_dict(self) -> dict:
3691
+ """Serializes the RunStatus into a dictionary suitable for use as a JSON request body."""
3692
+ body = {}
3693
+ if self.queue_details: body['queue_details'] = self.queue_details.as_dict()
3694
+ if self.state is not None: body['state'] = self.state.value
3695
+ if self.termination_details: body['termination_details'] = self.termination_details.as_dict()
3696
+ return body
3697
+
3698
+ @classmethod
3699
+ def from_dict(cls, d: Dict[str, any]) -> RunStatus:
3700
+ """Deserializes the RunStatus from a dictionary."""
3701
+ return cls(queue_details=_from_dict(d, 'queue_details', QueueDetails),
3702
+ state=_enum(d, 'state', RunLifecycleStateV2State),
3703
+ termination_details=_from_dict(d, 'termination_details', TerminationDetails))
3704
+
3705
+
3612
3706
  @dataclass
3613
3707
  class RunTask:
3614
3708
  """Used when outputting a child run, in GetRun or ListRuns."""
@@ -3773,7 +3867,10 @@ class RunTask:
3773
3867
  scheduled to run on a new cluster, this is the time the cluster creation call is issued."""
3774
3868
 
3775
3869
  state: Optional[RunState] = None
3776
- """The current state of the run."""
3870
+ """Deprecated. Please use the `status` field instead."""
3871
+
3872
+ status: Optional[RunStatus] = None
3873
+ """The current status of the run"""
3777
3874
 
3778
3875
  timeout_seconds: Optional[int] = None
3779
3876
  """An optional timeout applied to each run of this job task. A value of `0` means no timeout."""
@@ -3821,6 +3918,7 @@ class RunTask:
3821
3918
  if self.sql_task: body['sql_task'] = self.sql_task.as_dict()
3822
3919
  if self.start_time is not None: body['start_time'] = self.start_time
3823
3920
  if self.state: body['state'] = self.state.as_dict()
3921
+ if self.status: body['status'] = self.status.as_dict()
3824
3922
  if self.task_key is not None: body['task_key'] = self.task_key
3825
3923
  if self.timeout_seconds is not None: body['timeout_seconds'] = self.timeout_seconds
3826
3924
  if self.webhook_notifications: body['webhook_notifications'] = self.webhook_notifications.as_dict()
@@ -3864,6 +3962,7 @@ class RunTask:
3864
3962
  sql_task=_from_dict(d, 'sql_task', SqlTask),
3865
3963
  start_time=d.get('start_time', None),
3866
3964
  state=_from_dict(d, 'state', RunState),
3965
+ status=_from_dict(d, 'status', RunStatus),
3867
3966
  task_key=d.get('task_key', None),
3868
3967
  timeout_seconds=d.get('timeout_seconds', None),
3869
3968
  webhook_notifications=_from_dict(d, 'webhook_notifications', WebhookNotifications))
@@ -5027,6 +5126,149 @@ class TaskNotificationSettings:
5027
5126
  no_alert_for_skipped_runs=d.get('no_alert_for_skipped_runs', None))
5028
5127
 
5029
5128
 
5129
+ class TerminationCodeCode(Enum):
5130
+ """The code indicates why the run was terminated. Additional codes might be introduced in future
5131
+ releases. * `SUCCESS`: The run was completed successfully. * `CANCELED`: The run was canceled
5132
+ during execution by the Databricks platform; for example, if the maximum run duration was
5133
+ exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the
5134
+ dependency type condition was not met, or there were no material tasks to execute. *
5135
+ `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for
5136
+ further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the
5137
+ Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state
5138
+ message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due
5139
+ to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The
5140
+ run failed because it issued an invalid request to start the cluster. *
5141
+ `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of
5142
+ concurrent active runs. Consider scheduling the runs over a larger time frame. *
5143
+ `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the
5144
+ workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize
5145
+ requests have exceeded the allotted rate limit. Consider spreading the run execution over a
5146
+ larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the
5147
+ customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`:
5148
+ The run was completed with task failures. For more details, refer to the state message or run
5149
+ output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a
5150
+ resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The
5151
+ run failed while installing the user-requested library. Refer to the state message for further
5152
+ details. The causes might include, but are not limited to: The provided library is invalid,
5153
+ there are insufficient permissions to install the library, and so forth. *
5154
+ `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs
5155
+ set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has
5156
+ already reached the maximum number of contexts it is configured to create. See: [Link]. *
5157
+ `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state
5158
+ message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid
5159
+ configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed
5160
+ due to a cloud provider issue. Refer to the state message for further details. *
5161
+ `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
5162
+ limit.
5163
+
5164
+ [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
5165
+
5166
+ CANCELED = 'CANCELED'
5167
+ CLOUD_FAILURE = 'CLOUD_FAILURE'
5168
+ CLUSTER_ERROR = 'CLUSTER_ERROR'
5169
+ CLUSTER_REQUEST_LIMIT_EXCEEDED = 'CLUSTER_REQUEST_LIMIT_EXCEEDED'
5170
+ DRIVER_ERROR = 'DRIVER_ERROR'
5171
+ FEATURE_DISABLED = 'FEATURE_DISABLED'
5172
+ INTERNAL_ERROR = 'INTERNAL_ERROR'
5173
+ INVALID_CLUSTER_REQUEST = 'INVALID_CLUSTER_REQUEST'
5174
+ INVALID_RUN_CONFIGURATION = 'INVALID_RUN_CONFIGURATION'
5175
+ LIBRARY_INSTALLATION_ERROR = 'LIBRARY_INSTALLATION_ERROR'
5176
+ MAX_CONCURRENT_RUNS_EXCEEDED = 'MAX_CONCURRENT_RUNS_EXCEEDED'
5177
+ MAX_JOB_QUEUE_SIZE_EXCEEDED = 'MAX_JOB_QUEUE_SIZE_EXCEEDED'
5178
+ MAX_SPARK_CONTEXTS_EXCEEDED = 'MAX_SPARK_CONTEXTS_EXCEEDED'
5179
+ REPOSITORY_CHECKOUT_FAILED = 'REPOSITORY_CHECKOUT_FAILED'
5180
+ RESOURCE_NOT_FOUND = 'RESOURCE_NOT_FOUND'
5181
+ RUN_EXECUTION_ERROR = 'RUN_EXECUTION_ERROR'
5182
+ SKIPPED = 'SKIPPED'
5183
+ STORAGE_ACCESS_ERROR = 'STORAGE_ACCESS_ERROR'
5184
+ SUCCESS = 'SUCCESS'
5185
+ UNAUTHORIZED_ERROR = 'UNAUTHORIZED_ERROR'
5186
+ WORKSPACE_RUN_LIMIT_EXCEEDED = 'WORKSPACE_RUN_LIMIT_EXCEEDED'
5187
+
5188
+
5189
+ @dataclass
5190
+ class TerminationDetails:
5191
+ code: Optional[TerminationCodeCode] = None
5192
+ """The code indicates why the run was terminated. Additional codes might be introduced in future
5193
+ releases. * `SUCCESS`: The run was completed successfully. * `CANCELED`: The run was canceled
5194
+ during execution by the Databricks platform; for example, if the maximum run duration was
5195
+ exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the
5196
+ dependency type condition was not met, or there were no material tasks to execute. *
5197
+ `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for
5198
+ further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the
5199
+ Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state
5200
+ message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due
5201
+ to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The
5202
+ run failed because it issued an invalid request to start the cluster. *
5203
+ `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of
5204
+ concurrent active runs. Consider scheduling the runs over a larger time frame. *
5205
+ `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the
5206
+ workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize
5207
+ requests have exceeded the allotted rate limit. Consider spreading the run execution over a
5208
+ larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the
5209
+ customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`:
5210
+ The run was completed with task failures. For more details, refer to the state message or run
5211
+ output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a
5212
+ resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The
5213
+ run failed while installing the user-requested library. Refer to the state message for further
5214
+ details. The causes might include, but are not limited to: The provided library is invalid,
5215
+ there are insufficient permissions to install the library, and so forth. *
5216
+ `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs
5217
+ set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has
5218
+ already reached the maximum number of contexts it is configured to create. See: [Link]. *
5219
+ `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state
5220
+ message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid
5221
+ configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed
5222
+ due to a cloud provider issue. Refer to the state message for further details. *
5223
+ `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
5224
+ limit.
5225
+
5226
+ [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
5227
+
5228
+ message: Optional[str] = None
5229
+ """A descriptive message with the termination details. This field is unstructured and the format
5230
+ might change."""
5231
+
5232
+ type: Optional[TerminationTypeType] = None
5233
+ """* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the
5234
+ Databricks platform. Please look at the [status page] or contact support if the issue persists.
5235
+ * `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job
5236
+ configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud
5237
+ provider.
5238
+
5239
+ [status page]: https://status.databricks.com/"""
5240
+
5241
+ def as_dict(self) -> dict:
5242
+ """Serializes the TerminationDetails into a dictionary suitable for use as a JSON request body."""
5243
+ body = {}
5244
+ if self.code is not None: body['code'] = self.code.value
5245
+ if self.message is not None: body['message'] = self.message
5246
+ if self.type is not None: body['type'] = self.type.value
5247
+ return body
5248
+
5249
+ @classmethod
5250
+ def from_dict(cls, d: Dict[str, any]) -> TerminationDetails:
5251
+ """Deserializes the TerminationDetails from a dictionary."""
5252
+ return cls(code=_enum(d, 'code', TerminationCodeCode),
5253
+ message=d.get('message', None),
5254
+ type=_enum(d, 'type', TerminationTypeType))
5255
+
5256
+
5257
+ class TerminationTypeType(Enum):
5258
+ """* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the
5259
+ Databricks platform. Please look at the [status page] or contact support if the issue persists.
5260
+ * `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job
5261
+ configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud
5262
+ provider.
5263
+
5264
+ [status page]: https://status.databricks.com/"""
5265
+
5266
+ CLIENT_ERROR = 'CLIENT_ERROR'
5267
+ CLOUD_FAILURE = 'CLOUD_FAILURE'
5268
+ INTERNAL_ERROR = 'INTERNAL_ERROR'
5269
+ SUCCESS = 'SUCCESS'
5270
+
5271
+
5030
5272
  @dataclass
5031
5273
  class TriggerInfo:
5032
5274
  """Additional details about what triggered the run"""
@@ -4143,10 +4143,16 @@ class ExperimentsAPI:
4143
4143
  """Get all artifacts.
4144
4144
 
4145
4145
  List artifacts for a run. Takes an optional `artifact_path` prefix. If it is specified, the response
4146
- contains only artifacts with the specified prefix.",
4146
+ contains only artifacts with the specified prefix. This API does not support pagination when listing
4147
+ artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
4148
+ `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports
4149
+ pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents).
4147
4150
 
4148
4151
  :param page_token: str (optional)
4149
- Token indicating the page of artifact results to fetch
4152
+ Token indicating the page of artifact results to fetch. `page_token` is not supported when listing
4153
+ artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
4154
+ `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports
4155
+ pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents).
4150
4156
  :param path: str (optional)
4151
4157
  Filter artifacts matching this path (a relative path from the root artifact directory).
4152
4158
  :param run_id: str (optional)
@@ -1629,14 +1629,6 @@ class ServedModelInput:
1629
1629
  model_version: str
1630
1630
  """The version of the model in Databricks Model Registry or Unity Catalog to be served."""
1631
1631
 
1632
- workload_size: ServedModelInputWorkloadSize
1633
- """The workload size of the served model. The workload size corresponds to a range of provisioned
1634
- concurrency that the compute will autoscale between. A single unit of provisioned concurrency
1635
- can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned
1636
- concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned
1637
- concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for
1638
- each workload size will be 0."""
1639
-
1640
1632
  scale_to_zero_enabled: bool
1641
1633
  """Whether the compute resources for the served model should scale down to zero."""
1642
1634
 
@@ -1649,11 +1641,25 @@ class ServedModelInput:
1649
1641
  instance_profile_arn: Optional[str] = None
1650
1642
  """ARN of the instance profile that the served model will use to access AWS resources."""
1651
1643
 
1644
+ max_provisioned_throughput: Optional[int] = None
1645
+ """The maximum tokens per second that the endpoint can scale up to."""
1646
+
1647
+ min_provisioned_throughput: Optional[int] = None
1648
+ """The minimum tokens per second that the endpoint can scale down to."""
1649
+
1652
1650
  name: Optional[str] = None
1653
1651
  """The name of a served model. It must be unique across an endpoint. If not specified, this field
1654
1652
  will default to <model-name>-<model-version>. A served model name can consist of alphanumeric
1655
1653
  characters, dashes, and underscores."""
1656
1654
 
1655
+ workload_size: Optional[ServedModelInputWorkloadSize] = None
1656
+ """The workload size of the served model. The workload size corresponds to a range of provisioned
1657
+ concurrency that the compute will autoscale between. A single unit of provisioned concurrency
1658
+ can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned
1659
+ concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned
1660
+ concurrency). If scale-to-zero is enabled, the lower bound of the provisioned concurrency for
1661
+ each workload size will be 0."""
1662
+
1657
1663
  workload_type: Optional[ServedModelInputWorkloadType] = None
1658
1664
  """The workload type of the served model. The workload type selects which type of compute to use in
1659
1665
  the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU
@@ -1667,6 +1673,10 @@ class ServedModelInput:
1667
1673
  body = {}
1668
1674
  if self.environment_vars: body['environment_vars'] = self.environment_vars
1669
1675
  if self.instance_profile_arn is not None: body['instance_profile_arn'] = self.instance_profile_arn
1676
+ if self.max_provisioned_throughput is not None:
1677
+ body['max_provisioned_throughput'] = self.max_provisioned_throughput
1678
+ if self.min_provisioned_throughput is not None:
1679
+ body['min_provisioned_throughput'] = self.min_provisioned_throughput
1670
1680
  if self.model_name is not None: body['model_name'] = self.model_name
1671
1681
  if self.model_version is not None: body['model_version'] = self.model_version
1672
1682
  if self.name is not None: body['name'] = self.name
@@ -1680,6 +1690,8 @@ class ServedModelInput:
1680
1690
  """Deserializes the ServedModelInput from a dictionary."""
1681
1691
  return cls(environment_vars=d.get('environment_vars', None),
1682
1692
  instance_profile_arn=d.get('instance_profile_arn', None),
1693
+ max_provisioned_throughput=d.get('max_provisioned_throughput', None),
1694
+ min_provisioned_throughput=d.get('min_provisioned_throughput', None),
1683
1695
  model_name=d.get('model_name', None),
1684
1696
  model_version=d.get('model_version', None),
1685
1697
  name=d.get('name', None),
@@ -231,6 +231,11 @@ class DeleteIndexResponse:
231
231
 
232
232
  @dataclass
233
233
  class DeltaSyncVectorIndexSpecRequest:
234
+ columns_to_sync: Optional[List[str]] = None
235
+ """[Optional] Select the columns to sync with the vector index. If you leave this field blank, all
236
+ columns from the source table are synced with the index. The primary key column and embedding
237
+ source column or embedding vector column are always synced."""
238
+
234
239
  embedding_source_columns: Optional[List[EmbeddingSourceColumn]] = None
235
240
  """The columns that contain the embedding source."""
236
241
 
@@ -256,6 +261,7 @@ class DeltaSyncVectorIndexSpecRequest:
256
261
  def as_dict(self) -> dict:
257
262
  """Serializes the DeltaSyncVectorIndexSpecRequest into a dictionary suitable for use as a JSON request body."""
258
263
  body = {}
264
+ if self.columns_to_sync: body['columns_to_sync'] = [v for v in self.columns_to_sync]
259
265
  if self.embedding_source_columns:
260
266
  body['embedding_source_columns'] = [v.as_dict() for v in self.embedding_source_columns]
261
267
  if self.embedding_vector_columns:
@@ -269,7 +275,8 @@ class DeltaSyncVectorIndexSpecRequest:
269
275
  @classmethod
270
276
  def from_dict(cls, d: Dict[str, any]) -> DeltaSyncVectorIndexSpecRequest:
271
277
  """Deserializes the DeltaSyncVectorIndexSpecRequest from a dictionary."""
272
- return cls(embedding_source_columns=_repeated_dict(d, 'embedding_source_columns',
278
+ return cls(columns_to_sync=d.get('columns_to_sync', None),
279
+ embedding_source_columns=_repeated_dict(d, 'embedding_source_columns',
273
280
  EmbeddingSourceColumn),
274
281
  embedding_vector_columns=_repeated_dict(d, 'embedding_vector_columns',
275
282
  EmbeddingVectorColumn),
databricks/sdk/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '0.31.0'
1
+ __version__ = '0.32.0'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: databricks-sdk
3
- Version: 0.31.0
3
+ Version: 0.32.0
4
4
  Summary: Databricks SDK for Python (Beta)
5
5
  Home-page: https://databricks-sdk-py.readthedocs.io
6
6
  Author: Serge Smertin
@@ -5,8 +5,8 @@ databricks/sdk/azure.py,sha256=8P7nEdun0hbQCap9Ojo7yZse_JHxnhYsE6ApojnPz7Q,1009
5
5
  databricks/sdk/casing.py,sha256=NKYPrfPbQjM7lU4hhNQK3z1jb_VEA29BfH4FEdby2tg,1137
6
6
  databricks/sdk/clock.py,sha256=Ivlow0r_TkXcTJ8UXkxSA0czKrY0GvwHAeOvjPkJnAQ,1360
7
7
  databricks/sdk/config.py,sha256=FWEiIY34C_4Mmv8B9w284BR2FHug2T2ySpmtyg51ttA,21139
8
- databricks/sdk/core.py,sha256=PWU2kTHXOF6x7i9_yRUFGj-iusr_Mo7awROiBpA9nJQ,20398
9
- databricks/sdk/credentials_provider.py,sha256=V8QxVUvZmOfVjEpogNEzu5nUBXzRLevWfi-NiPaDOks,29232
8
+ databricks/sdk/core.py,sha256=7zjTSCUNrI1R4WFvQ-243tKyuDhBPQdg1rrJB-3IU04,15636
9
+ databricks/sdk/credentials_provider.py,sha256=1y5Very59gwWIJkL3183PwPaYtiYJDAycn1hp2JZVx4,29282
10
10
  databricks/sdk/data_plane.py,sha256=Er2z2fT-KVupJKzGozGGZ-jCQ3AmDWq-DZppahIK6tU,2591
11
11
  databricks/sdk/dbutils.py,sha256=HFCuB-el6SFKhF8qRfJxYANtyLTm-VG9GtQuQgZXFkM,15741
12
12
  databricks/sdk/environments.py,sha256=5KoVuVfF-ZX17rua1sH3EJCCtniVrREXBXsMNDEV-UU,4293
@@ -14,17 +14,20 @@ databricks/sdk/oauth.py,sha256=KzcJPYLL3JL6RDvf_Q8SDAaF9xSaoYNCRD4rYInZDuo,18319
14
14
  databricks/sdk/py.typed,sha256=pSvaHpbY1UPNEXyVFUjlgBhjPFZMmVC_UNrPC7eMOHI,74
15
15
  databricks/sdk/retries.py,sha256=WgLh12bwdBc6fCQlaig3kKu18cVhPzFDGsspvq629Ew,2454
16
16
  databricks/sdk/useragent.py,sha256=I2-VnJSE6cg9QV4GXkoQSkHsEB3bDvRGgkawbBNl4G0,5540
17
- databricks/sdk/version.py,sha256=aHhOnzInPOGVCJ67D-A5ijrJDM_PjhpzFe3OpLz4xig,23
17
+ databricks/sdk/version.py,sha256=Mhyb-htYwxK6GrdhYyT56CJeGp4A5jZLmueZGXkszEY,23
18
18
  databricks/sdk/_widgets/__init__.py,sha256=Qm3JB8LmdPgEn_-VgxKkodTO4gn6OdaDPwsYcDmeIRI,2667
19
19
  databricks/sdk/_widgets/default_widgets_utils.py,sha256=Rk59AFzVYVpOektB_yC_7j-vSt5OdtZA85IlG0kw0xA,1202
20
20
  databricks/sdk/_widgets/ipywidgets_utils.py,sha256=P-AyGeahPiX3S59mxpAMgffi4gyJ0irEOY7Ekkn9nQ0,2850
21
- databricks/sdk/errors/__init__.py,sha256=3l_wHB0S9Y6mDc_c5mUHb-TndDQxa-tdPeWmTbnBNAo,176
22
- databricks/sdk/errors/base.py,sha256=oawBxpuoyImsLu29ntpAgOc6RQ7kD-UcuFFER9jB3iI,3880
23
- databricks/sdk/errors/mapper.py,sha256=sK4aoloV-F8h1J4YHFrcNVAUBLLQQFti-ceXVmm6HpU,1386
21
+ databricks/sdk/errors/__init__.py,sha256=S_xkqeqBtYrW-QeFL007woajSA07TLwDKCgD0h3zxAI,211
22
+ databricks/sdk/errors/base.py,sha256=eSOKUZ5t8e_S6OFrsEyzx-vraQ0PYemsP98H9Md53M4,4893
23
+ databricks/sdk/errors/mapper.py,sha256=G52KAcRfDFUOjgS-gvh8_X_3FXqN1P5Mmgi6F0VAb5k,1162
24
24
  databricks/sdk/errors/overrides.py,sha256=u1fZ1X2gPRv_zf1u_4EqVzbWHiFsPzm_X0sMNOCMwAE,1649
25
+ databricks/sdk/errors/parser.py,sha256=_R8Gd5IMlGQbwsVxDKjTchA46YKAulIENAQm_HjIwyU,6085
25
26
  databricks/sdk/errors/platform.py,sha256=0EwGUTcmoobAK41KsFAnRkT6AlOY_umzr4jWEgd-6hY,3113
26
- databricks/sdk/errors/private_link.py,sha256=6wVRJQqousGQC7qfT0pV8LqujqfR3XLbSix_XjqVC8s,2304
27
+ databricks/sdk/errors/private_link.py,sha256=-cDxHSm7MBpdaEFgDGvbrW4dxCRVQwSunGhwe5Ay80g,2314
27
28
  databricks/sdk/errors/sdk.py,sha256=_euMruhvquB0v_SKtgqxJUiyXHWuTb4Jl7ji6_h0E_A,109
29
+ databricks/sdk/logger/__init__.py,sha256=0_sSQfDkaFGqMHZUVw-g_Ax-RFmOv0Z6NjxCVAeUSO0,41
30
+ databricks/sdk/logger/round_trip_logger.py,sha256=c33VEVdy_tifQ62lF-2U39nvaRfGaahFqULb0Z7g3mk,4746
28
31
  databricks/sdk/mixins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
32
  databricks/sdk/mixins/compute.py,sha256=khb00BzBckc4RLUF4-GnNMCSO5lXKt_XYMM3IhiUxlA,11237
30
33
  databricks/sdk/mixins/files.py,sha256=bLGFu1kVIQECTmuc_9jUf-n_Cth4COBMbmKqAYxkEkM,20542
@@ -35,26 +38,26 @@ databricks/sdk/service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
35
38
  databricks/sdk/service/_internal.py,sha256=nWbJfW5eJCQgAZ3TmA26xoWb6SNZ5N76ZA8bO1N4AsU,1961
36
39
  databricks/sdk/service/apps.py,sha256=536HvScC96edq9EXMUcyVh1h2jE5zCeCMa_l7HZiu20,38170
37
40
  databricks/sdk/service/billing.py,sha256=Ru6GumI-M4_X71HTMj2VSVBQ7tRMTrwKzhdwNyiC3fA,69733
38
- databricks/sdk/service/catalog.py,sha256=pJ3LWcHaljhfXKAJoHnbJpaiSOUpGymsWLHfSYaDOdo,430741
41
+ databricks/sdk/service/catalog.py,sha256=CUm4OCGgvTUNDLXKURsp-Fj5Cx7A-iKTNnQ2dcGSsZ0,434746
39
42
  databricks/sdk/service/compute.py,sha256=u8iVnqGFS7WGrLYQ0CrhwQEXs0WkjoPbHsHeVA6-h6g,433402
40
43
  databricks/sdk/service/dashboards.py,sha256=-nrRkNLBDJU4w0OAy-EjZkI8I5ktYqX475YTAW65u9I,76081
41
44
  databricks/sdk/service/files.py,sha256=VCt83YSI9rhQexmxaQdrUXHq2UCYfZcDMLvJx5X6n1M,38162
42
- databricks/sdk/service/iam.py,sha256=fj1RQtCdg8E8oUt1SEnm6PzMR6UB-jaCn8M354KiB-o,148500
43
- databricks/sdk/service/jobs.py,sha256=c9m2GoNfMBCGSfr82MZd6zf2aDIza6Ip0jaaW8E_3hQ,318698
45
+ databricks/sdk/service/iam.py,sha256=P_2k7_MDV-Iw4heUD78i3XQriSoYZX1Jhhfnn4gS4Zk,148548
46
+ databricks/sdk/service/jobs.py,sha256=vcwp6ZaMJ1xo5HPkGg63k-RYVulQmA4zaXcEbJcpGwI,332433
44
47
  databricks/sdk/service/marketplace.py,sha256=Fgk_8V9zbQ8QcNPUw-yZehHv8LgnDtFJUe-YixjxkYo,136405
45
- databricks/sdk/service/ml.py,sha256=vohBdESClI3EOpO-ZZ44W-CMz1alq5Tw4oJnWa99Z2M,236128
48
+ databricks/sdk/service/ml.py,sha256=KG5nG9ap1IJejha2JFhX13f61C6tShO0AnHvLNDz0KE,236858
46
49
  databricks/sdk/service/oauth2.py,sha256=67pr6gUnYwO6BaGNQfjW1qvcEB3ejdNbI9Pmvqs5bSE,39928
47
50
  databricks/sdk/service/pipelines.py,sha256=tGCo1F3tW1GxB9Q63qsh2AyisJmXqYSsGkJK0OdS06Q,119378
48
51
  databricks/sdk/service/provisioning.py,sha256=DP4Df4X-p0JEUk4zAJQhjX_wxpMi673OKLXFhxl6YSE,142678
49
- databricks/sdk/service/serving.py,sha256=BfShf0ceupXgLccU5zp1CZyBW1260Ga73USM2T5KxXs,140008
52
+ databricks/sdk/service/serving.py,sha256=DfgyhXi1UB88pYJnxXqzYoAZ8vqBCbr5OFitYdHOztA,140737
50
53
  databricks/sdk/service/settings.py,sha256=7PXxsrXUe7exM35O7_iUp9r78zn5oGnPbhX_sh3v1_0,193732
51
54
  databricks/sdk/service/sharing.py,sha256=kalJYd0v1SwuGhlCaq4l2ZhzNlev9OwNbCXFIOKIMXU,113253
52
55
  databricks/sdk/service/sql.py,sha256=RaXIYMDtHbhvB7gtSMyvQsqiO_E0cMz5NXeTsrqtPVk,334558
53
- databricks/sdk/service/vectorsearch.py,sha256=ZfiTEpTNg8nnzPuw24MeiDn8eq6PHmEWqTHS0zdDdEo,62484
56
+ databricks/sdk/service/vectorsearch.py,sha256=a5Y4vrS_oAJJqa69XwKMANhGuZi5glS0PSXBXz1bKGU,62961
54
57
  databricks/sdk/service/workspace.py,sha256=FKLf5esRmfFstIXo7HQg6HQCzQ2svrb6ulr8yzZ7-8U,101182
55
- databricks_sdk-0.31.0.dist-info/LICENSE,sha256=afBgTZo-JsYqj4VOjnejBetMuHKcFR30YobDdpVFkqY,11411
56
- databricks_sdk-0.31.0.dist-info/METADATA,sha256=MM4NlZk11KyaPzRoCsIQwAQ6cwOviQ8e7nXSd92TjTU,37967
57
- databricks_sdk-0.31.0.dist-info/NOTICE,sha256=Qnc0m8JjZNTDV80y0h1aJGvsr4GqM63m1nr2VTypg6E,963
58
- databricks_sdk-0.31.0.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
59
- databricks_sdk-0.31.0.dist-info/top_level.txt,sha256=7kRdatoSgU0EUurRQJ_3F1Nv4EOSHWAr6ng25tJOJKU,11
60
- databricks_sdk-0.31.0.dist-info/RECORD,,
58
+ databricks_sdk-0.32.0.dist-info/LICENSE,sha256=afBgTZo-JsYqj4VOjnejBetMuHKcFR30YobDdpVFkqY,11411
59
+ databricks_sdk-0.32.0.dist-info/METADATA,sha256=5bGVpoXLejnZgLyT7jHHe5zK-7iW7ISiKOVyThA3Il0,37967
60
+ databricks_sdk-0.32.0.dist-info/NOTICE,sha256=Qnc0m8JjZNTDV80y0h1aJGvsr4GqM63m1nr2VTypg6E,963
61
+ databricks_sdk-0.32.0.dist-info/WHEEL,sha256=uCRv0ZEik_232NlR4YDw4Pv3Ajt5bKvMH13NUU7hFuI,91
62
+ databricks_sdk-0.32.0.dist-info/top_level.txt,sha256=7kRdatoSgU0EUurRQJ_3F1Nv4EOSHWAr6ng25tJOJKU,11
63
+ databricks_sdk-0.32.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (73.0.1)
2
+ Generator: setuptools (74.1.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5