databricks-sdk 0.39.0__py3-none-any.whl → 0.40.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -711,12 +711,18 @@ class MigrateDashboardRequest:
711
711
  parent_path: Optional[str] = None
712
712
  """The workspace path of the folder to contain the migrated Lakeview dashboard."""
713
713
 
714
+ update_parameter_syntax: Optional[bool] = None
715
+ """Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named
716
+ syntax (:param) when converting datasets in the dashboard."""
717
+
714
718
  def as_dict(self) -> dict:
715
719
  """Serializes the MigrateDashboardRequest into a dictionary suitable for use as a JSON request body."""
716
720
  body = {}
717
721
  if self.display_name is not None: body['display_name'] = self.display_name
718
722
  if self.parent_path is not None: body['parent_path'] = self.parent_path
719
723
  if self.source_dashboard_id is not None: body['source_dashboard_id'] = self.source_dashboard_id
724
+ if self.update_parameter_syntax is not None:
725
+ body['update_parameter_syntax'] = self.update_parameter_syntax
720
726
  return body
721
727
 
722
728
  def as_shallow_dict(self) -> dict:
@@ -725,6 +731,8 @@ class MigrateDashboardRequest:
725
731
  if self.display_name is not None: body['display_name'] = self.display_name
726
732
  if self.parent_path is not None: body['parent_path'] = self.parent_path
727
733
  if self.source_dashboard_id is not None: body['source_dashboard_id'] = self.source_dashboard_id
734
+ if self.update_parameter_syntax is not None:
735
+ body['update_parameter_syntax'] = self.update_parameter_syntax
728
736
  return body
729
737
 
730
738
  @classmethod
@@ -732,7 +740,8 @@ class MigrateDashboardRequest:
732
740
  """Deserializes the MigrateDashboardRequest from a dictionary."""
733
741
  return cls(display_name=d.get('display_name', None),
734
742
  parent_path=d.get('parent_path', None),
735
- source_dashboard_id=d.get('source_dashboard_id', None))
743
+ source_dashboard_id=d.get('source_dashboard_id', None),
744
+ update_parameter_syntax=d.get('update_parameter_syntax', None))
736
745
 
737
746
 
738
747
  @dataclass
@@ -1759,7 +1768,8 @@ class LakeviewAPI:
1759
1768
  source_dashboard_id: str,
1760
1769
  *,
1761
1770
  display_name: Optional[str] = None,
1762
- parent_path: Optional[str] = None) -> Dashboard:
1771
+ parent_path: Optional[str] = None,
1772
+ update_parameter_syntax: Optional[bool] = None) -> Dashboard:
1763
1773
  """Migrate dashboard.
1764
1774
 
1765
1775
  Migrates a classic SQL dashboard to Lakeview.
@@ -1770,6 +1780,9 @@ class LakeviewAPI:
1770
1780
  Display name for the new Lakeview dashboard.
1771
1781
  :param parent_path: str (optional)
1772
1782
  The workspace path of the folder to contain the migrated Lakeview dashboard.
1783
+ :param update_parameter_syntax: bool (optional)
1784
+ Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax
1785
+ (:param) when converting datasets in the dashboard.
1773
1786
 
1774
1787
  :returns: :class:`Dashboard`
1775
1788
  """
@@ -1777,6 +1790,7 @@ class LakeviewAPI:
1777
1790
  if display_name is not None: body['display_name'] = display_name
1778
1791
  if parent_path is not None: body['parent_path'] = parent_path
1779
1792
  if source_dashboard_id is not None: body['source_dashboard_id'] = source_dashboard_id
1793
+ if update_parameter_syntax is not None: body['update_parameter_syntax'] = update_parameter_syntax
1780
1794
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
1781
1795
 
1782
1796
  res = self._api.do('POST', '/api/2.0/lakeview/dashboards/migrate', body=body, headers=headers)
@@ -209,7 +209,8 @@ class BaseRun:
209
209
  previously failed run. This occurs when you request to re-run the job in case of failures. *
210
210
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
211
211
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
212
- triggered by a table update."""
212
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
213
+ restart a continuous job run."""
213
214
 
214
215
  trigger_info: Optional[TriggerInfo] = None
215
216
  """Additional details about what triggered the run"""
@@ -449,7 +450,7 @@ class CleanRoomTaskRunResultState(Enum):
449
450
 
450
451
  @dataclass
451
452
  class CleanRoomTaskRunState:
452
- """Stores the run state of the clean room notebook V1 task."""
453
+ """Stores the run state of the clean rooms notebook task."""
453
454
 
454
455
  life_cycle_state: Optional[CleanRoomTaskRunLifeCycleState] = None
455
456
  """A value indicating the run's current lifecycle state. This field is always available in the
@@ -479,6 +480,48 @@ class CleanRoomTaskRunState:
479
480
  result_state=_enum(d, 'result_state', CleanRoomTaskRunResultState))
480
481
 
481
482
 
483
+ @dataclass
484
+ class CleanRoomsNotebookTask:
485
+ clean_room_name: str
486
+ """The clean room that the notebook belongs to."""
487
+
488
+ notebook_name: str
489
+ """Name of the notebook being run."""
490
+
491
+ etag: Optional[str] = None
492
+ """Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the
493
+ latest version). It can be fetched by calling the :method:cleanroomassets/get API."""
494
+
495
+ notebook_base_parameters: Optional[Dict[str, str]] = None
496
+ """Base parameters to be used for the clean room notebook job."""
497
+
498
+ def as_dict(self) -> dict:
499
+ """Serializes the CleanRoomsNotebookTask into a dictionary suitable for use as a JSON request body."""
500
+ body = {}
501
+ if self.clean_room_name is not None: body['clean_room_name'] = self.clean_room_name
502
+ if self.etag is not None: body['etag'] = self.etag
503
+ if self.notebook_base_parameters: body['notebook_base_parameters'] = self.notebook_base_parameters
504
+ if self.notebook_name is not None: body['notebook_name'] = self.notebook_name
505
+ return body
506
+
507
+ def as_shallow_dict(self) -> dict:
508
+ """Serializes the CleanRoomsNotebookTask into a shallow dictionary of its immediate attributes."""
509
+ body = {}
510
+ if self.clean_room_name is not None: body['clean_room_name'] = self.clean_room_name
511
+ if self.etag is not None: body['etag'] = self.etag
512
+ if self.notebook_base_parameters: body['notebook_base_parameters'] = self.notebook_base_parameters
513
+ if self.notebook_name is not None: body['notebook_name'] = self.notebook_name
514
+ return body
515
+
516
+ @classmethod
517
+ def from_dict(cls, d: Dict[str, any]) -> CleanRoomsNotebookTask:
518
+ """Deserializes the CleanRoomsNotebookTask from a dictionary."""
519
+ return cls(clean_room_name=d.get('clean_room_name', None),
520
+ etag=d.get('etag', None),
521
+ notebook_base_parameters=d.get('notebook_base_parameters', None),
522
+ notebook_name=d.get('notebook_name', None))
523
+
524
+
482
525
  @dataclass
483
526
  class ClusterInstance:
484
527
  cluster_id: Optional[str] = None
@@ -2526,11 +2569,11 @@ class JobsHealthMetric(Enum):
2526
2569
 
2527
2570
  * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
2528
2571
  An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
2529
- is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2530
- across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An
2531
- estimate of the maximum consumer delay across all streams. This metric is in Private Preview. *
2572
+ is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2573
+ across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
2574
+ of the maximum consumer delay across all streams. This metric is in Public Preview. *
2532
2575
  `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
2533
- streams. This metric is in Private Preview."""
2576
+ streams. This metric is in Public Preview."""
2534
2577
 
2535
2578
  RUN_DURATION_SECONDS = 'RUN_DURATION_SECONDS'
2536
2579
  STREAMING_BACKLOG_BYTES = 'STREAMING_BACKLOG_BYTES'
@@ -2552,11 +2595,11 @@ class JobsHealthRule:
2552
2595
 
2553
2596
  * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
2554
2597
  An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
2555
- is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2556
- across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An
2557
- estimate of the maximum consumer delay across all streams. This metric is in Private Preview. *
2598
+ is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2599
+ across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
2600
+ of the maximum consumer delay across all streams. This metric is in Public Preview. *
2558
2601
  `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
2559
- streams. This metric is in Private Preview."""
2602
+ streams. This metric is in Public Preview."""
2560
2603
 
2561
2604
  op: JobsHealthOperator
2562
2605
  """Specifies the operator used to compare the health metric value with the specified threshold."""
@@ -3711,7 +3754,8 @@ class Run:
3711
3754
  previously failed run. This occurs when you request to re-run the job in case of failures. *
3712
3755
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
3713
3756
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
3714
- triggered by a table update."""
3757
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
3758
+ restart a continuous job run."""
3715
3759
 
3716
3760
  trigger_info: Optional[TriggerInfo] = None
3717
3761
  """Additional details about what triggered the run"""
@@ -4653,6 +4697,11 @@ class RunTask:
4653
4697
  original attempt’s ID and an incrementing `attempt_number`. Runs are retried only until they
4654
4698
  succeed, and the maximum `attempt_number` is the same as the `max_retries` value for the job."""
4655
4699
 
4700
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
4701
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
4702
+
4703
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
4704
+
4656
4705
  cleanup_duration: Optional[int] = None
4657
4706
  """The time in milliseconds it took to terminate the cluster and clean up any associated artifacts.
4658
4707
  The duration of a task run is the sum of the `setup_duration`, `execution_duration`, and the
@@ -4820,6 +4869,8 @@ class RunTask:
4820
4869
  """Serializes the RunTask into a dictionary suitable for use as a JSON request body."""
4821
4870
  body = {}
4822
4871
  if self.attempt_number is not None: body['attempt_number'] = self.attempt_number
4872
+ if self.clean_rooms_notebook_task:
4873
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
4823
4874
  if self.cleanup_duration is not None: body['cleanup_duration'] = self.cleanup_duration
4824
4875
  if self.cluster_instance: body['cluster_instance'] = self.cluster_instance.as_dict()
4825
4876
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
@@ -4864,6 +4915,7 @@ class RunTask:
4864
4915
  """Serializes the RunTask into a shallow dictionary of its immediate attributes."""
4865
4916
  body = {}
4866
4917
  if self.attempt_number is not None: body['attempt_number'] = self.attempt_number
4918
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
4867
4919
  if self.cleanup_duration is not None: body['cleanup_duration'] = self.cleanup_duration
4868
4920
  if self.cluster_instance: body['cluster_instance'] = self.cluster_instance
4869
4921
  if self.condition_task: body['condition_task'] = self.condition_task
@@ -4908,6 +4960,8 @@ class RunTask:
4908
4960
  def from_dict(cls, d: Dict[str, any]) -> RunTask:
4909
4961
  """Deserializes the RunTask from a dictionary."""
4910
4962
  return cls(attempt_number=d.get('attempt_number', None),
4963
+ clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
4964
+ CleanRoomsNotebookTask),
4911
4965
  cleanup_duration=d.get('cleanup_duration', None),
4912
4966
  cluster_instance=_from_dict(d, 'cluster_instance', ClusterInstance),
4913
4967
  condition_task=_from_dict(d, 'condition_task', RunConditionTask),
@@ -5753,6 +5807,11 @@ class SubmitTask:
5753
5807
  field is required and must be unique within its parent job. On Update or Reset, this field is
5754
5808
  used to reference the tasks to be updated or reset."""
5755
5809
 
5810
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
5811
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
5812
+
5813
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
5814
+
5756
5815
  condition_task: Optional[ConditionTask] = None
5757
5816
  """The task evaluates a condition that can be used to control the execution of other tasks when the
5758
5817
  `condition_task` field is present. The condition task does not require a cluster to execute and
@@ -5857,6 +5916,8 @@ class SubmitTask:
5857
5916
  def as_dict(self) -> dict:
5858
5917
  """Serializes the SubmitTask into a dictionary suitable for use as a JSON request body."""
5859
5918
  body = {}
5919
+ if self.clean_rooms_notebook_task:
5920
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
5860
5921
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
5861
5922
  if self.dbt_task: body['dbt_task'] = self.dbt_task.as_dict()
5862
5923
  if self.depends_on: body['depends_on'] = [v.as_dict() for v in self.depends_on]
@@ -5886,6 +5947,7 @@ class SubmitTask:
5886
5947
  def as_shallow_dict(self) -> dict:
5887
5948
  """Serializes the SubmitTask into a shallow dictionary of its immediate attributes."""
5888
5949
  body = {}
5950
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
5889
5951
  if self.condition_task: body['condition_task'] = self.condition_task
5890
5952
  if self.dbt_task: body['dbt_task'] = self.dbt_task
5891
5953
  if self.depends_on: body['depends_on'] = self.depends_on
@@ -5915,7 +5977,9 @@ class SubmitTask:
5915
5977
  @classmethod
5916
5978
  def from_dict(cls, d: Dict[str, any]) -> SubmitTask:
5917
5979
  """Deserializes the SubmitTask from a dictionary."""
5918
- return cls(condition_task=_from_dict(d, 'condition_task', ConditionTask),
5980
+ return cls(clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
5981
+ CleanRoomsNotebookTask),
5982
+ condition_task=_from_dict(d, 'condition_task', ConditionTask),
5919
5983
  dbt_task=_from_dict(d, 'dbt_task', DbtTask),
5920
5984
  depends_on=_repeated_dict(d, 'depends_on', TaskDependency),
5921
5985
  description=d.get('description', None),
@@ -5997,6 +6061,11 @@ class Task:
5997
6061
  field is required and must be unique within its parent job. On Update or Reset, this field is
5998
6062
  used to reference the tasks to be updated or reset."""
5999
6063
 
6064
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
6065
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
6066
+
6067
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
6068
+
6000
6069
  condition_task: Optional[ConditionTask] = None
6001
6070
  """The task evaluates a condition that can be used to control the execution of other tasks when the
6002
6071
  `condition_task` field is present. The condition task does not require a cluster to execute and
@@ -6126,6 +6195,8 @@ class Task:
6126
6195
  def as_dict(self) -> dict:
6127
6196
  """Serializes the Task into a dictionary suitable for use as a JSON request body."""
6128
6197
  body = {}
6198
+ if self.clean_rooms_notebook_task:
6199
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
6129
6200
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
6130
6201
  if self.dbt_task: body['dbt_task'] = self.dbt_task.as_dict()
6131
6202
  if self.depends_on: body['depends_on'] = [v.as_dict() for v in self.depends_on]
@@ -6162,6 +6233,7 @@ class Task:
6162
6233
  def as_shallow_dict(self) -> dict:
6163
6234
  """Serializes the Task into a shallow dictionary of its immediate attributes."""
6164
6235
  body = {}
6236
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
6165
6237
  if self.condition_task: body['condition_task'] = self.condition_task
6166
6238
  if self.dbt_task: body['dbt_task'] = self.dbt_task
6167
6239
  if self.depends_on: body['depends_on'] = self.depends_on
@@ -6198,7 +6270,9 @@ class Task:
6198
6270
  @classmethod
6199
6271
  def from_dict(cls, d: Dict[str, any]) -> Task:
6200
6272
  """Deserializes the Task from a dictionary."""
6201
- return cls(condition_task=_from_dict(d, 'condition_task', ConditionTask),
6273
+ return cls(clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
6274
+ CleanRoomsNotebookTask),
6275
+ condition_task=_from_dict(d, 'condition_task', ConditionTask),
6202
6276
  dbt_task=_from_dict(d, 'dbt_task', DbtTask),
6203
6277
  depends_on=_repeated_dict(d, 'depends_on', TaskDependency),
6204
6278
  description=d.get('description', None),
@@ -6610,7 +6684,8 @@ class TriggerType(Enum):
6610
6684
  previously failed run. This occurs when you request to re-run the job in case of failures. *
6611
6685
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
6612
6686
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
6613
- triggered by a table update."""
6687
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
6688
+ restart a continuous job run."""
6614
6689
 
6615
6690
  FILE_ARRIVAL = 'FILE_ARRIVAL'
6616
6691
  ONE_TIME = 'ONE_TIME'