databricks-sdk 0.39.0__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -35,6 +35,11 @@ class BaseJob:
35
35
  Jobs UI in the job details page and Jobs API using `budget_policy_id` 3. Inferred default based
36
36
  on accessible budget policies of the run_as identity on job creation or modification."""
37
37
 
38
+ has_more: Optional[bool] = None
39
+ """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
40
+ can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
41
+ requests with `expand_tasks=true`."""
42
+
38
43
  job_id: Optional[int] = None
39
44
  """The canonical identifier for this job."""
40
45
 
@@ -49,6 +54,7 @@ class BaseJob:
49
54
  if self.creator_user_name is not None: body['creator_user_name'] = self.creator_user_name
50
55
  if self.effective_budget_policy_id is not None:
51
56
  body['effective_budget_policy_id'] = self.effective_budget_policy_id
57
+ if self.has_more is not None: body['has_more'] = self.has_more
52
58
  if self.job_id is not None: body['job_id'] = self.job_id
53
59
  if self.settings: body['settings'] = self.settings.as_dict()
54
60
  return body
@@ -60,6 +66,7 @@ class BaseJob:
60
66
  if self.creator_user_name is not None: body['creator_user_name'] = self.creator_user_name
61
67
  if self.effective_budget_policy_id is not None:
62
68
  body['effective_budget_policy_id'] = self.effective_budget_policy_id
69
+ if self.has_more is not None: body['has_more'] = self.has_more
63
70
  if self.job_id is not None: body['job_id'] = self.job_id
64
71
  if self.settings: body['settings'] = self.settings
65
72
  return body
@@ -70,6 +77,7 @@ class BaseJob:
70
77
  return cls(created_time=d.get('created_time', None),
71
78
  creator_user_name=d.get('creator_user_name', None),
72
79
  effective_budget_policy_id=d.get('effective_budget_policy_id', None),
80
+ has_more=d.get('has_more', None),
73
81
  job_id=d.get('job_id', None),
74
82
  settings=_from_dict(d, 'settings', JobSettings))
75
83
 
@@ -124,10 +132,16 @@ class BaseRun:
124
132
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks
125
133
  are used, `git_source` must be defined on the job."""
126
134
 
135
+ has_more: Optional[bool] = None
136
+ """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
137
+ can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
138
+ :method:jobs/listruns requests with `expand_tasks=true`."""
139
+
127
140
  job_clusters: Optional[List[JobCluster]] = None
128
141
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
129
142
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
130
- task settings."""
143
+ task settings. If more than 100 job clusters are available, you can paginate through them using
144
+ :method:jobs/getrun."""
131
145
 
132
146
  job_id: Optional[int] = None
133
147
  """The canonical identifier of the job that contains this run."""
@@ -198,7 +212,9 @@ class BaseRun:
198
212
 
199
213
  tasks: Optional[List[RunTask]] = None
200
214
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
201
- `JobsGetOutput` to retrieve the run resutls."""
215
+ `JobsGetOutput` to retrieve the run resutls. If more than 100 tasks are available, you can
216
+ paginate through them using :method:jobs/getrun. Use the `next_page_token` field at the object
217
+ root to determine if more results are available."""
202
218
 
203
219
  trigger: Optional[TriggerType] = None
204
220
  """The type of trigger that fired this run.
@@ -209,7 +225,8 @@ class BaseRun:
209
225
  previously failed run. This occurs when you request to re-run the job in case of failures. *
210
226
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
211
227
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
212
- triggered by a table update."""
228
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
229
+ restart a continuous job run."""
213
230
 
214
231
  trigger_info: Optional[TriggerInfo] = None
215
232
  """Additional details about what triggered the run"""
@@ -226,6 +243,7 @@ class BaseRun:
226
243
  if self.end_time is not None: body['end_time'] = self.end_time
227
244
  if self.execution_duration is not None: body['execution_duration'] = self.execution_duration
228
245
  if self.git_source: body['git_source'] = self.git_source.as_dict()
246
+ if self.has_more is not None: body['has_more'] = self.has_more
229
247
  if self.job_clusters: body['job_clusters'] = [v.as_dict() for v in self.job_clusters]
230
248
  if self.job_id is not None: body['job_id'] = self.job_id
231
249
  if self.job_parameters: body['job_parameters'] = [v.as_dict() for v in self.job_parameters]
@@ -263,6 +281,7 @@ class BaseRun:
263
281
  if self.end_time is not None: body['end_time'] = self.end_time
264
282
  if self.execution_duration is not None: body['execution_duration'] = self.execution_duration
265
283
  if self.git_source: body['git_source'] = self.git_source
284
+ if self.has_more is not None: body['has_more'] = self.has_more
266
285
  if self.job_clusters: body['job_clusters'] = self.job_clusters
267
286
  if self.job_id is not None: body['job_id'] = self.job_id
268
287
  if self.job_parameters: body['job_parameters'] = self.job_parameters
@@ -300,6 +319,7 @@ class BaseRun:
300
319
  end_time=d.get('end_time', None),
301
320
  execution_duration=d.get('execution_duration', None),
302
321
  git_source=_from_dict(d, 'git_source', GitSource),
322
+ has_more=d.get('has_more', None),
303
323
  job_clusters=_repeated_dict(d, 'job_clusters', JobCluster),
304
324
  job_id=d.get('job_id', None),
305
325
  job_parameters=_repeated_dict(d, 'job_parameters', JobParameter),
@@ -423,6 +443,7 @@ class CleanRoomTaskRunLifeCycleState(Enum):
423
443
  PENDING = 'PENDING'
424
444
  QUEUED = 'QUEUED'
425
445
  RUNNING = 'RUNNING'
446
+ RUN_LIFE_CYCLE_STATE_UNSPECIFIED = 'RUN_LIFE_CYCLE_STATE_UNSPECIFIED'
426
447
  SKIPPED = 'SKIPPED'
427
448
  TERMINATED = 'TERMINATED'
428
449
  TERMINATING = 'TERMINATING'
@@ -439,6 +460,7 @@ class CleanRoomTaskRunResultState(Enum):
439
460
  EXCLUDED = 'EXCLUDED'
440
461
  FAILED = 'FAILED'
441
462
  MAXIMUM_CONCURRENT_RUNS_REACHED = 'MAXIMUM_CONCURRENT_RUNS_REACHED'
463
+ RUN_RESULT_STATE_UNSPECIFIED = 'RUN_RESULT_STATE_UNSPECIFIED'
442
464
  SUCCESS = 'SUCCESS'
443
465
  SUCCESS_WITH_FAILURES = 'SUCCESS_WITH_FAILURES'
444
466
  TIMEDOUT = 'TIMEDOUT'
@@ -449,7 +471,7 @@ class CleanRoomTaskRunResultState(Enum):
449
471
 
450
472
  @dataclass
451
473
  class CleanRoomTaskRunState:
452
- """Stores the run state of the clean room notebook V1 task."""
474
+ """Stores the run state of the clean rooms notebook task."""
453
475
 
454
476
  life_cycle_state: Optional[CleanRoomTaskRunLifeCycleState] = None
455
477
  """A value indicating the run's current lifecycle state. This field is always available in the
@@ -479,6 +501,84 @@ class CleanRoomTaskRunState:
479
501
  result_state=_enum(d, 'result_state', CleanRoomTaskRunResultState))
480
502
 
481
503
 
504
+ @dataclass
505
+ class CleanRoomsNotebookTask:
506
+ clean_room_name: str
507
+ """The clean room that the notebook belongs to."""
508
+
509
+ notebook_name: str
510
+ """Name of the notebook being run."""
511
+
512
+ etag: Optional[str] = None
513
+ """Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the
514
+ latest version). It can be fetched by calling the :method:cleanroomassets/get API."""
515
+
516
+ notebook_base_parameters: Optional[Dict[str, str]] = None
517
+ """Base parameters to be used for the clean room notebook job."""
518
+
519
+ def as_dict(self) -> dict:
520
+ """Serializes the CleanRoomsNotebookTask into a dictionary suitable for use as a JSON request body."""
521
+ body = {}
522
+ if self.clean_room_name is not None: body['clean_room_name'] = self.clean_room_name
523
+ if self.etag is not None: body['etag'] = self.etag
524
+ if self.notebook_base_parameters: body['notebook_base_parameters'] = self.notebook_base_parameters
525
+ if self.notebook_name is not None: body['notebook_name'] = self.notebook_name
526
+ return body
527
+
528
+ def as_shallow_dict(self) -> dict:
529
+ """Serializes the CleanRoomsNotebookTask into a shallow dictionary of its immediate attributes."""
530
+ body = {}
531
+ if self.clean_room_name is not None: body['clean_room_name'] = self.clean_room_name
532
+ if self.etag is not None: body['etag'] = self.etag
533
+ if self.notebook_base_parameters: body['notebook_base_parameters'] = self.notebook_base_parameters
534
+ if self.notebook_name is not None: body['notebook_name'] = self.notebook_name
535
+ return body
536
+
537
+ @classmethod
538
+ def from_dict(cls, d: Dict[str, any]) -> CleanRoomsNotebookTask:
539
+ """Deserializes the CleanRoomsNotebookTask from a dictionary."""
540
+ return cls(clean_room_name=d.get('clean_room_name', None),
541
+ etag=d.get('etag', None),
542
+ notebook_base_parameters=d.get('notebook_base_parameters', None),
543
+ notebook_name=d.get('notebook_name', None))
544
+
545
+
546
+ @dataclass
547
+ class CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput:
548
+ clean_room_job_run_state: Optional[CleanRoomTaskRunState] = None
549
+ """The run state of the clean rooms notebook task."""
550
+
551
+ notebook_output: Optional[NotebookOutput] = None
552
+ """The notebook output for the clean room run"""
553
+
554
+ output_schema_info: Optional[OutputSchemaInfo] = None
555
+ """Information on how to access the output schema for the clean room run"""
556
+
557
+ def as_dict(self) -> dict:
558
+ """Serializes the CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput into a dictionary suitable for use as a JSON request body."""
559
+ body = {}
560
+ if self.clean_room_job_run_state:
561
+ body['clean_room_job_run_state'] = self.clean_room_job_run_state.as_dict()
562
+ if self.notebook_output: body['notebook_output'] = self.notebook_output.as_dict()
563
+ if self.output_schema_info: body['output_schema_info'] = self.output_schema_info.as_dict()
564
+ return body
565
+
566
+ def as_shallow_dict(self) -> dict:
567
+ """Serializes the CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput into a shallow dictionary of its immediate attributes."""
568
+ body = {}
569
+ if self.clean_room_job_run_state: body['clean_room_job_run_state'] = self.clean_room_job_run_state
570
+ if self.notebook_output: body['notebook_output'] = self.notebook_output
571
+ if self.output_schema_info: body['output_schema_info'] = self.output_schema_info
572
+ return body
573
+
574
+ @classmethod
575
+ def from_dict(cls, d: Dict[str, any]) -> CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput:
576
+ """Deserializes the CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput from a dictionary."""
577
+ return cls(clean_room_job_run_state=_from_dict(d, 'clean_room_job_run_state', CleanRoomTaskRunState),
578
+ notebook_output=_from_dict(d, 'notebook_output', NotebookOutput),
579
+ output_schema_info=_from_dict(d, 'output_schema_info', OutputSchemaInfo))
580
+
581
+
482
582
  @dataclass
483
583
  class ClusterInstance:
484
584
  cluster_id: Optional[str] = None
@@ -711,7 +811,8 @@ class CreateJob:
711
811
  job_clusters: Optional[List[JobCluster]] = None
712
812
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
713
813
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
714
- task settings."""
814
+ task settings. If more than 100 job clusters are available, you can paginate through them using
815
+ :method:jobs/get."""
715
816
 
716
817
  max_concurrent_runs: Optional[int] = None
717
818
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -752,7 +853,9 @@ class CreateJob:
752
853
  be added to the job."""
753
854
 
754
855
  tasks: Optional[List[Task]] = None
755
- """A list of task specifications to be executed by this job."""
856
+ """A list of task specifications to be executed by this job. If more than 100 tasks are available,
857
+ you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
858
+ object root to determine if more results are available."""
756
859
 
757
860
  timeout_seconds: Optional[int] = None
758
861
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -1637,9 +1740,17 @@ class Job:
1637
1740
  Jobs UI in the job details page and Jobs API using `budget_policy_id` 3. Inferred default based
1638
1741
  on accessible budget policies of the run_as identity on job creation or modification."""
1639
1742
 
1743
+ has_more: Optional[bool] = None
1744
+ """Indicates if the job has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
1745
+ can be accessed via :method:jobs/get endpoint. It is only relevant for API 2.2 :method:jobs/list
1746
+ requests with `expand_tasks=true`."""
1747
+
1640
1748
  job_id: Optional[int] = None
1641
1749
  """The canonical identifier for this job."""
1642
1750
 
1751
+ next_page_token: Optional[str] = None
1752
+ """A token that can be used to list the next page of sub-resources."""
1753
+
1643
1754
  run_as_user_name: Optional[str] = None
1644
1755
  """The email of an active workspace user or the application ID of a service principal that the job
1645
1756
  runs as. This value can be changed by setting the `run_as` field when creating or updating a
@@ -1660,7 +1771,9 @@ class Job:
1660
1771
  if self.creator_user_name is not None: body['creator_user_name'] = self.creator_user_name
1661
1772
  if self.effective_budget_policy_id is not None:
1662
1773
  body['effective_budget_policy_id'] = self.effective_budget_policy_id
1774
+ if self.has_more is not None: body['has_more'] = self.has_more
1663
1775
  if self.job_id is not None: body['job_id'] = self.job_id
1776
+ if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
1664
1777
  if self.run_as_user_name is not None: body['run_as_user_name'] = self.run_as_user_name
1665
1778
  if self.settings: body['settings'] = self.settings.as_dict()
1666
1779
  return body
@@ -1672,7 +1785,9 @@ class Job:
1672
1785
  if self.creator_user_name is not None: body['creator_user_name'] = self.creator_user_name
1673
1786
  if self.effective_budget_policy_id is not None:
1674
1787
  body['effective_budget_policy_id'] = self.effective_budget_policy_id
1788
+ if self.has_more is not None: body['has_more'] = self.has_more
1675
1789
  if self.job_id is not None: body['job_id'] = self.job_id
1790
+ if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
1676
1791
  if self.run_as_user_name is not None: body['run_as_user_name'] = self.run_as_user_name
1677
1792
  if self.settings: body['settings'] = self.settings
1678
1793
  return body
@@ -1683,7 +1798,9 @@ class Job:
1683
1798
  return cls(created_time=d.get('created_time', None),
1684
1799
  creator_user_name=d.get('creator_user_name', None),
1685
1800
  effective_budget_policy_id=d.get('effective_budget_policy_id', None),
1801
+ has_more=d.get('has_more', None),
1686
1802
  job_id=d.get('job_id', None),
1803
+ next_page_token=d.get('next_page_token', None),
1687
1804
  run_as_user_name=d.get('run_as_user_name', None),
1688
1805
  settings=_from_dict(d, 'settings', JobSettings))
1689
1806
 
@@ -2323,7 +2440,8 @@ class JobSettings:
2323
2440
  job_clusters: Optional[List[JobCluster]] = None
2324
2441
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
2325
2442
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
2326
- task settings."""
2443
+ task settings. If more than 100 job clusters are available, you can paginate through them using
2444
+ :method:jobs/get."""
2327
2445
 
2328
2446
  max_concurrent_runs: Optional[int] = None
2329
2447
  """An optional maximum allowed number of concurrent runs of the job. Set this value if you want to
@@ -2364,7 +2482,9 @@ class JobSettings:
2364
2482
  be added to the job."""
2365
2483
 
2366
2484
  tasks: Optional[List[Task]] = None
2367
- """A list of task specifications to be executed by this job."""
2485
+ """A list of task specifications to be executed by this job. If more than 100 tasks are available,
2486
+ you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the
2487
+ object root to determine if more results are available."""
2368
2488
 
2369
2489
  timeout_seconds: Optional[int] = None
2370
2490
  """An optional timeout applied to each run of this job. A value of `0` means no timeout."""
@@ -2526,11 +2646,11 @@ class JobsHealthMetric(Enum):
2526
2646
 
2527
2647
  * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
2528
2648
  An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
2529
- is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2530
- across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An
2531
- estimate of the maximum consumer delay across all streams. This metric is in Private Preview. *
2649
+ is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2650
+ across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
2651
+ of the maximum consumer delay across all streams. This metric is in Public Preview. *
2532
2652
  `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
2533
- streams. This metric is in Private Preview."""
2653
+ streams. This metric is in Public Preview."""
2534
2654
 
2535
2655
  RUN_DURATION_SECONDS = 'RUN_DURATION_SECONDS'
2536
2656
  STREAMING_BACKLOG_BYTES = 'STREAMING_BACKLOG_BYTES'
@@ -2552,11 +2672,11 @@ class JobsHealthRule:
2552
2672
 
2553
2673
  * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
2554
2674
  An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
2555
- is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2556
- across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An
2557
- estimate of the maximum consumer delay across all streams. This metric is in Private Preview. *
2675
+ is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
2676
+ across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
2677
+ of the maximum consumer delay across all streams. This metric is in Public Preview. *
2558
2678
  `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
2559
- streams. This metric is in Private Preview."""
2679
+ streams. This metric is in Public Preview."""
2560
2680
 
2561
2681
  op: JobsHealthOperator
2562
2682
  """Specifies the operator used to compare the health metric value with the specified threshold."""
@@ -2832,6 +2952,42 @@ class NotebookTask:
2832
2952
  warehouse_id=d.get('warehouse_id', None))
2833
2953
 
2834
2954
 
2955
+ @dataclass
2956
+ class OutputSchemaInfo:
2957
+ """Stores the catalog name, schema name, and the output schema expiration time for the clean room
2958
+ run."""
2959
+
2960
+ catalog_name: Optional[str] = None
2961
+
2962
+ expiration_time: Optional[int] = None
2963
+ """The expiration time for the output schema as a Unix timestamp in milliseconds."""
2964
+
2965
+ schema_name: Optional[str] = None
2966
+
2967
+ def as_dict(self) -> dict:
2968
+ """Serializes the OutputSchemaInfo into a dictionary suitable for use as a JSON request body."""
2969
+ body = {}
2970
+ if self.catalog_name is not None: body['catalog_name'] = self.catalog_name
2971
+ if self.expiration_time is not None: body['expiration_time'] = self.expiration_time
2972
+ if self.schema_name is not None: body['schema_name'] = self.schema_name
2973
+ return body
2974
+
2975
+ def as_shallow_dict(self) -> dict:
2976
+ """Serializes the OutputSchemaInfo into a shallow dictionary of its immediate attributes."""
2977
+ body = {}
2978
+ if self.catalog_name is not None: body['catalog_name'] = self.catalog_name
2979
+ if self.expiration_time is not None: body['expiration_time'] = self.expiration_time
2980
+ if self.schema_name is not None: body['schema_name'] = self.schema_name
2981
+ return body
2982
+
2983
+ @classmethod
2984
+ def from_dict(cls, d: Dict[str, any]) -> OutputSchemaInfo:
2985
+ """Deserializes the OutputSchemaInfo from a dictionary."""
2986
+ return cls(catalog_name=d.get('catalog_name', None),
2987
+ expiration_time=d.get('expiration_time', None),
2988
+ schema_name=d.get('schema_name', None))
2989
+
2990
+
2835
2991
  class PauseStatus(Enum):
2836
2992
 
2837
2993
  PAUSED = 'PAUSED'
@@ -3620,13 +3776,19 @@ class Run:
3620
3776
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks
3621
3777
  are used, `git_source` must be defined on the job."""
3622
3778
 
3779
+ has_more: Optional[bool] = None
3780
+ """Indicates if the run has more sub-resources (`tasks`, `job_clusters`) that are not shown. They
3781
+ can be accessed via :method:jobs/getrun endpoint. It is only relevant for API 2.2
3782
+ :method:jobs/listruns requests with `expand_tasks=true`."""
3783
+
3623
3784
  iterations: Optional[List[RunTask]] = None
3624
3785
  """Only populated by for-each iterations. The parent for-each task is located in tasks array."""
3625
3786
 
3626
3787
  job_clusters: Optional[List[JobCluster]] = None
3627
3788
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
3628
3789
  Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in
3629
- task settings."""
3790
+ task settings. If more than 100 job clusters are available, you can paginate through them using
3791
+ :method:jobs/getrun."""
3630
3792
 
3631
3793
  job_id: Optional[int] = None
3632
3794
  """The canonical identifier of the job that contains this run."""
@@ -3700,7 +3862,9 @@ class Run:
3700
3862
 
3701
3863
  tasks: Optional[List[RunTask]] = None
3702
3864
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
3703
- `JobsGetOutput` to retrieve the run resutls."""
3865
+ `JobsGetOutput` to retrieve the run resutls. If more than 100 tasks are available, you can
3866
+ paginate through them using :method:jobs/getrun. Use the `next_page_token` field at the object
3867
+ root to determine if more results are available."""
3704
3868
 
3705
3869
  trigger: Optional[TriggerType] = None
3706
3870
  """The type of trigger that fired this run.
@@ -3711,7 +3875,8 @@ class Run:
3711
3875
  previously failed run. This occurs when you request to re-run the job in case of failures. *
3712
3876
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
3713
3877
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
3714
- triggered by a table update."""
3878
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
3879
+ restart a continuous job run."""
3715
3880
 
3716
3881
  trigger_info: Optional[TriggerInfo] = None
3717
3882
  """Additional details about what triggered the run"""
@@ -3728,6 +3893,7 @@ class Run:
3728
3893
  if self.end_time is not None: body['end_time'] = self.end_time
3729
3894
  if self.execution_duration is not None: body['execution_duration'] = self.execution_duration
3730
3895
  if self.git_source: body['git_source'] = self.git_source.as_dict()
3896
+ if self.has_more is not None: body['has_more'] = self.has_more
3731
3897
  if self.iterations: body['iterations'] = [v.as_dict() for v in self.iterations]
3732
3898
  if self.job_clusters: body['job_clusters'] = [v.as_dict() for v in self.job_clusters]
3733
3899
  if self.job_id is not None: body['job_id'] = self.job_id
@@ -3767,6 +3933,7 @@ class Run:
3767
3933
  if self.end_time is not None: body['end_time'] = self.end_time
3768
3934
  if self.execution_duration is not None: body['execution_duration'] = self.execution_duration
3769
3935
  if self.git_source: body['git_source'] = self.git_source
3936
+ if self.has_more is not None: body['has_more'] = self.has_more
3770
3937
  if self.iterations: body['iterations'] = self.iterations
3771
3938
  if self.job_clusters: body['job_clusters'] = self.job_clusters
3772
3939
  if self.job_id is not None: body['job_id'] = self.job_id
@@ -3806,6 +3973,7 @@ class Run:
3806
3973
  end_time=d.get('end_time', None),
3807
3974
  execution_duration=d.get('execution_duration', None),
3808
3975
  git_source=_from_dict(d, 'git_source', GitSource),
3976
+ has_more=d.get('has_more', None),
3809
3977
  iterations=_repeated_dict(d, 'iterations', RunTask),
3810
3978
  job_clusters=_repeated_dict(d, 'job_clusters', JobCluster),
3811
3979
  job_id=d.get('job_id', None),
@@ -4321,6 +4489,9 @@ class RunNowResponse:
4321
4489
  class RunOutput:
4322
4490
  """Run output was retrieved successfully."""
4323
4491
 
4492
+ clean_rooms_notebook_output: Optional[CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput] = None
4493
+ """The output of a clean rooms notebook task, if available"""
4494
+
4324
4495
  dbt_output: Optional[DbtOutput] = None
4325
4496
  """The output of a dbt task, if available."""
4326
4497
 
@@ -4365,6 +4536,8 @@ class RunOutput:
4365
4536
  def as_dict(self) -> dict:
4366
4537
  """Serializes the RunOutput into a dictionary suitable for use as a JSON request body."""
4367
4538
  body = {}
4539
+ if self.clean_rooms_notebook_output:
4540
+ body['clean_rooms_notebook_output'] = self.clean_rooms_notebook_output.as_dict()
4368
4541
  if self.dbt_output: body['dbt_output'] = self.dbt_output.as_dict()
4369
4542
  if self.error is not None: body['error'] = self.error
4370
4543
  if self.error_trace is not None: body['error_trace'] = self.error_trace
@@ -4380,6 +4553,8 @@ class RunOutput:
4380
4553
  def as_shallow_dict(self) -> dict:
4381
4554
  """Serializes the RunOutput into a shallow dictionary of its immediate attributes."""
4382
4555
  body = {}
4556
+ if self.clean_rooms_notebook_output:
4557
+ body['clean_rooms_notebook_output'] = self.clean_rooms_notebook_output
4383
4558
  if self.dbt_output: body['dbt_output'] = self.dbt_output
4384
4559
  if self.error is not None: body['error'] = self.error
4385
4560
  if self.error_trace is not None: body['error_trace'] = self.error_trace
@@ -4395,7 +4570,9 @@ class RunOutput:
4395
4570
  @classmethod
4396
4571
  def from_dict(cls, d: Dict[str, any]) -> RunOutput:
4397
4572
  """Deserializes the RunOutput from a dictionary."""
4398
- return cls(dbt_output=_from_dict(d, 'dbt_output', DbtOutput),
4573
+ return cls(clean_rooms_notebook_output=_from_dict(d, 'clean_rooms_notebook_output',
4574
+ CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput),
4575
+ dbt_output=_from_dict(d, 'dbt_output', DbtOutput),
4399
4576
  error=d.get('error', None),
4400
4577
  error_trace=d.get('error_trace', None),
4401
4578
  info=d.get('info', None),
@@ -4653,6 +4830,11 @@ class RunTask:
4653
4830
  original attempt’s ID and an incrementing `attempt_number`. Runs are retried only until they
4654
4831
  succeed, and the maximum `attempt_number` is the same as the `max_retries` value for the job."""
4655
4832
 
4833
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
4834
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
4835
+
4836
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
4837
+
4656
4838
  cleanup_duration: Optional[int] = None
4657
4839
  """The time in milliseconds it took to terminate the cluster and clean up any associated artifacts.
4658
4840
  The duration of a task run is the sum of the `setup_duration`, `execution_duration`, and the
@@ -4820,6 +5002,8 @@ class RunTask:
4820
5002
  """Serializes the RunTask into a dictionary suitable for use as a JSON request body."""
4821
5003
  body = {}
4822
5004
  if self.attempt_number is not None: body['attempt_number'] = self.attempt_number
5005
+ if self.clean_rooms_notebook_task:
5006
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
4823
5007
  if self.cleanup_duration is not None: body['cleanup_duration'] = self.cleanup_duration
4824
5008
  if self.cluster_instance: body['cluster_instance'] = self.cluster_instance.as_dict()
4825
5009
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
@@ -4864,6 +5048,7 @@ class RunTask:
4864
5048
  """Serializes the RunTask into a shallow dictionary of its immediate attributes."""
4865
5049
  body = {}
4866
5050
  if self.attempt_number is not None: body['attempt_number'] = self.attempt_number
5051
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
4867
5052
  if self.cleanup_duration is not None: body['cleanup_duration'] = self.cleanup_duration
4868
5053
  if self.cluster_instance: body['cluster_instance'] = self.cluster_instance
4869
5054
  if self.condition_task: body['condition_task'] = self.condition_task
@@ -4908,6 +5093,8 @@ class RunTask:
4908
5093
  def from_dict(cls, d: Dict[str, any]) -> RunTask:
4909
5094
  """Deserializes the RunTask from a dictionary."""
4910
5095
  return cls(attempt_number=d.get('attempt_number', None),
5096
+ clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
5097
+ CleanRoomsNotebookTask),
4911
5098
  cleanup_duration=d.get('cleanup_duration', None),
4912
5099
  cluster_instance=_from_dict(d, 'cluster_instance', ClusterInstance),
4913
5100
  condition_task=_from_dict(d, 'condition_task', RunConditionTask),
@@ -5753,6 +5940,11 @@ class SubmitTask:
5753
5940
  field is required and must be unique within its parent job. On Update or Reset, this field is
5754
5941
  used to reference the tasks to be updated or reset."""
5755
5942
 
5943
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
5944
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
5945
+
5946
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
5947
+
5756
5948
  condition_task: Optional[ConditionTask] = None
5757
5949
  """The task evaluates a condition that can be used to control the execution of other tasks when the
5758
5950
  `condition_task` field is present. The condition task does not require a cluster to execute and
@@ -5857,6 +6049,8 @@ class SubmitTask:
5857
6049
  def as_dict(self) -> dict:
5858
6050
  """Serializes the SubmitTask into a dictionary suitable for use as a JSON request body."""
5859
6051
  body = {}
6052
+ if self.clean_rooms_notebook_task:
6053
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
5860
6054
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
5861
6055
  if self.dbt_task: body['dbt_task'] = self.dbt_task.as_dict()
5862
6056
  if self.depends_on: body['depends_on'] = [v.as_dict() for v in self.depends_on]
@@ -5886,6 +6080,7 @@ class SubmitTask:
5886
6080
  def as_shallow_dict(self) -> dict:
5887
6081
  """Serializes the SubmitTask into a shallow dictionary of its immediate attributes."""
5888
6082
  body = {}
6083
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
5889
6084
  if self.condition_task: body['condition_task'] = self.condition_task
5890
6085
  if self.dbt_task: body['dbt_task'] = self.dbt_task
5891
6086
  if self.depends_on: body['depends_on'] = self.depends_on
@@ -5915,7 +6110,9 @@ class SubmitTask:
5915
6110
  @classmethod
5916
6111
  def from_dict(cls, d: Dict[str, any]) -> SubmitTask:
5917
6112
  """Deserializes the SubmitTask from a dictionary."""
5918
- return cls(condition_task=_from_dict(d, 'condition_task', ConditionTask),
6113
+ return cls(clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
6114
+ CleanRoomsNotebookTask),
6115
+ condition_task=_from_dict(d, 'condition_task', ConditionTask),
5919
6116
  dbt_task=_from_dict(d, 'dbt_task', DbtTask),
5920
6117
  depends_on=_repeated_dict(d, 'depends_on', TaskDependency),
5921
6118
  description=d.get('description', None),
@@ -5997,6 +6194,11 @@ class Task:
5997
6194
  field is required and must be unique within its parent job. On Update or Reset, this field is
5998
6195
  used to reference the tasks to be updated or reset."""
5999
6196
 
6197
+ clean_rooms_notebook_task: Optional[CleanRoomsNotebookTask] = None
6198
+ """The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present.
6199
+
6200
+ [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html"""
6201
+
6000
6202
  condition_task: Optional[ConditionTask] = None
6001
6203
  """The task evaluates a condition that can be used to control the execution of other tasks when the
6002
6204
  `condition_task` field is present. The condition task does not require a cluster to execute and
@@ -6126,6 +6328,8 @@ class Task:
6126
6328
  def as_dict(self) -> dict:
6127
6329
  """Serializes the Task into a dictionary suitable for use as a JSON request body."""
6128
6330
  body = {}
6331
+ if self.clean_rooms_notebook_task:
6332
+ body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task.as_dict()
6129
6333
  if self.condition_task: body['condition_task'] = self.condition_task.as_dict()
6130
6334
  if self.dbt_task: body['dbt_task'] = self.dbt_task.as_dict()
6131
6335
  if self.depends_on: body['depends_on'] = [v.as_dict() for v in self.depends_on]
@@ -6162,6 +6366,7 @@ class Task:
6162
6366
  def as_shallow_dict(self) -> dict:
6163
6367
  """Serializes the Task into a shallow dictionary of its immediate attributes."""
6164
6368
  body = {}
6369
+ if self.clean_rooms_notebook_task: body['clean_rooms_notebook_task'] = self.clean_rooms_notebook_task
6165
6370
  if self.condition_task: body['condition_task'] = self.condition_task
6166
6371
  if self.dbt_task: body['dbt_task'] = self.dbt_task
6167
6372
  if self.depends_on: body['depends_on'] = self.depends_on
@@ -6198,7 +6403,9 @@ class Task:
6198
6403
  @classmethod
6199
6404
  def from_dict(cls, d: Dict[str, any]) -> Task:
6200
6405
  """Deserializes the Task from a dictionary."""
6201
- return cls(condition_task=_from_dict(d, 'condition_task', ConditionTask),
6406
+ return cls(clean_rooms_notebook_task=_from_dict(d, 'clean_rooms_notebook_task',
6407
+ CleanRoomsNotebookTask),
6408
+ condition_task=_from_dict(d, 'condition_task', ConditionTask),
6202
6409
  dbt_task=_from_dict(d, 'dbt_task', DbtTask),
6203
6410
  depends_on=_repeated_dict(d, 'depends_on', TaskDependency),
6204
6411
  description=d.get('description', None),
@@ -6610,7 +6817,8 @@ class TriggerType(Enum):
6610
6817
  previously failed run. This occurs when you request to re-run the job in case of failures. *
6611
6818
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
6612
6819
  Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
6613
- triggered by a table update."""
6820
+ triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
6821
+ restart a continuous job run."""
6614
6822
 
6615
6823
  FILE_ARRIVAL = 'FILE_ARRIVAL'
6616
6824
  ONE_TIME = 'ONE_TIME'
@@ -6991,6 +7199,7 @@ class JobsAPI:
6991
7199
  :param job_clusters: List[:class:`JobCluster`] (optional)
6992
7200
  A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
6993
7201
  cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
7202
+ If more than 100 job clusters are available, you can paginate through them using :method:jobs/get.
6994
7203
  :param max_concurrent_runs: int (optional)
6995
7204
  An optional maximum allowed number of concurrent runs of the job. Set this value if you want to be
6996
7205
  able to execute multiple runs of the same job concurrently. This is useful for example if you
@@ -7022,7 +7231,9 @@ class JobsAPI:
7022
7231
  clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added
7023
7232
  to the job.
7024
7233
  :param tasks: List[:class:`Task`] (optional)
7025
- A list of task specifications to be executed by this job.
7234
+ A list of task specifications to be executed by this job. If more than 100 tasks are available, you
7235
+ can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root
7236
+ to determine if more results are available.
7026
7237
  :param timeout_seconds: int (optional)
7027
7238
  An optional timeout applied to each run of this job. A value of `0` means no timeout.
7028
7239
  :param trigger: :class:`TriggerSettings` (optional)
@@ -7118,19 +7329,28 @@ class JobsAPI:
7118
7329
  res = self._api.do('GET', '/api/2.1/jobs/runs/export', query=query, headers=headers)
7119
7330
  return ExportRunOutput.from_dict(res)
7120
7331
 
7121
- def get(self, job_id: int) -> Job:
7332
+ def get(self, job_id: int, *, page_token: Optional[str] = None) -> Job:
7122
7333
  """Get a single job.
7123
7334
 
7124
7335
  Retrieves the details for a single job.
7125
7336
 
7337
+ In Jobs API 2.2, requests for a single job support pagination of `tasks` and `job_clusters` when
7338
+ either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
7339
+ value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
7340
+ be empty on later pages.
7341
+
7126
7342
  :param job_id: int
7127
7343
  The canonical identifier of the job to retrieve information about. This field is required.
7344
+ :param page_token: str (optional)
7345
+ Use `next_page_token` returned from the previous GetJob to request the next page of the job's
7346
+ sub-resources.
7128
7347
 
7129
7348
  :returns: :class:`Job`
7130
7349
  """
7131
7350
 
7132
7351
  query = {}
7133
7352
  if job_id is not None: query['job_id'] = job_id
7353
+ if page_token is not None: query['page_token'] = page_token
7134
7354
  headers = {'Accept': 'application/json', }
7135
7355
 
7136
7356
  res = self._api.do('GET', '/api/2.1/jobs/get', query=query, headers=headers)
@@ -7176,7 +7396,12 @@ class JobsAPI:
7176
7396
  page_token: Optional[str] = None) -> Run:
7177
7397
  """Get a single job run.
7178
7398
 
7179
- Retrieve the metadata of a run.
7399
+ Retrieves the metadata of a run.
7400
+
7401
+ In Jobs API 2.2, requests for a single job run support pagination of `tasks` and `job_clusters` when
7402
+ either exceeds 100 elements. Use the `next_page_token` field to check for more results and pass its
7403
+ value as the `page_token` in subsequent requests. Arrays with fewer than 100 elements in a page will
7404
+ be empty on later pages.
7180
7405
 
7181
7406
  :param run_id: int
7182
7407
  The canonical identifier of the run for which to retrieve the metadata. This field is required.
@@ -7185,8 +7410,8 @@ class JobsAPI:
7185
7410
  :param include_resolved_values: bool (optional)
7186
7411
  Whether to include resolved parameter values in the response.
7187
7412
  :param page_token: str (optional)
7188
- To list the next page of job tasks, set this field to the value of the `next_page_token` returned in
7189
- the GetJob response.
7413
+ Use `next_page_token` returned from the previous GetRun to request the next page of the run's
7414
+ sub-resources.
7190
7415
 
7191
7416
  :returns: :class:`Run`
7192
7417
  """
@@ -7238,7 +7463,8 @@ class JobsAPI:
7238
7463
  Retrieves a list of jobs.
7239
7464
 
7240
7465
  :param expand_tasks: bool (optional)
7241
- Whether to include task and cluster details in the response.
7466
+ Whether to include task and cluster details in the response. Note that in API 2.2, only the first
7467
+ 100 elements will be shown. Use :method:jobs/get to paginate through all tasks and clusters.
7242
7468
  :param limit: int (optional)
7243
7469
  The number of jobs to return. This value must be greater than 0 and less or equal to 100. The
7244
7470
  default value is 20.
@@ -7295,7 +7521,8 @@ class JobsAPI:
7295
7521
  If completed_only is `true`, only completed runs are included in the results; otherwise, lists both
7296
7522
  active and completed runs. This field cannot be `true` when active_only is `true`.
7297
7523
  :param expand_tasks: bool (optional)
7298
- Whether to include task and cluster details in the response.
7524
+ Whether to include task and cluster details in the response. Note that in API 2.2, only the first
7525
+ 100 elements will be shown. Use :method:jobs/getrun to paginate through all tasks and clusters.
7299
7526
  :param job_id: int (optional)
7300
7527
  The job for which to list runs. If omitted, the Jobs service lists runs from all jobs.
7301
7528
  :param limit: int (optional)