databricks-sdk 0.50.0__py3-none-any.whl → 0.51.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -4499,6 +4499,10 @@ class EditInstancePool:
4499
4499
  min_idle_instances: Optional[int] = None
4500
4500
  """Minimum number of idle instances to keep in the instance pool"""
4501
4501
 
4502
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
4503
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
4504
+ when attempting to launch a cluster if the node type id is not available."""
4505
+
4502
4506
  def as_dict(self) -> dict:
4503
4507
  """Serializes the EditInstancePool into a dictionary suitable for use as a JSON request body."""
4504
4508
  body = {}
@@ -4514,6 +4518,8 @@ class EditInstancePool:
4514
4518
  body["max_capacity"] = self.max_capacity
4515
4519
  if self.min_idle_instances is not None:
4516
4520
  body["min_idle_instances"] = self.min_idle_instances
4521
+ if self.node_type_flexibility:
4522
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
4517
4523
  if self.node_type_id is not None:
4518
4524
  body["node_type_id"] = self.node_type_id
4519
4525
  return body
@@ -4533,6 +4539,8 @@ class EditInstancePool:
4533
4539
  body["max_capacity"] = self.max_capacity
4534
4540
  if self.min_idle_instances is not None:
4535
4541
  body["min_idle_instances"] = self.min_idle_instances
4542
+ if self.node_type_flexibility:
4543
+ body["node_type_flexibility"] = self.node_type_flexibility
4536
4544
  if self.node_type_id is not None:
4537
4545
  body["node_type_id"] = self.node_type_id
4538
4546
  return body
@@ -4547,6 +4555,7 @@ class EditInstancePool:
4547
4555
  instance_pool_name=d.get("instance_pool_name", None),
4548
4556
  max_capacity=d.get("max_capacity", None),
4549
4557
  min_idle_instances=d.get("min_idle_instances", None),
4558
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
4550
4559
  node_type_id=d.get("node_type_id", None),
4551
4560
  )
4552
4561
 
@@ -4772,8 +4781,11 @@ class EnforceClusterComplianceResponse:
4772
4781
 
4773
4782
  @dataclass
4774
4783
  class Environment:
4775
- """The environment entity used to preserve serverless environment side panel and jobs' environment
4776
- for non-notebook task. In this minimal environment spec, only pip dependencies are supported."""
4784
+ """The environment entity used to preserve serverless environment side panel, jobs' environment for
4785
+ non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a
4786
+ copied version of the Environment proto below, at
4787
+ //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal
4788
+ environment spec, only pip dependencies are supported."""
4777
4789
 
4778
4790
  client: str
4779
4791
  """Client version used by the environment The client is the user-facing environment of the runtime.
@@ -5261,16 +5273,30 @@ class GetEvents:
5261
5273
  """An optional set of event types to filter on. If empty, all event types are returned."""
5262
5274
 
5263
5275
  limit: Optional[int] = None
5264
- """The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
5276
+ """Deprecated: use page_token in combination with page_size instead.
5277
+
5278
+ The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
5265
5279
  value is 500."""
5266
5280
 
5267
5281
  offset: Optional[int] = None
5268
- """The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
5282
+ """Deprecated: use page_token in combination with page_size instead.
5283
+
5284
+ The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
5269
5285
  results are requested in descending order, the end_time field is required."""
5270
5286
 
5271
5287
  order: Optional[GetEventsOrder] = None
5272
5288
  """The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
5273
5289
 
5290
+ page_size: Optional[int] = None
5291
+ """The maximum number of events to include in a page of events. The server may further constrain
5292
+ the maximum number of results returned in a single page. If the page_size is empty or 0, the
5293
+ server will decide the number of results to be returned. The field has to be in the range
5294
+ [0,500]. If the value is outside the range, the server enforces 0 or 500."""
5295
+
5296
+ page_token: Optional[str] = None
5297
+ """Use next_page_token or prev_page_token returned from the previous request to list the next or
5298
+ previous page of events respectively. If page_token is empty, the first page is returned."""
5299
+
5274
5300
  start_time: Optional[int] = None
5275
5301
  """The start time in epoch milliseconds. If empty, returns events starting from the beginning of
5276
5302
  time."""
@@ -5290,6 +5316,10 @@ class GetEvents:
5290
5316
  body["offset"] = self.offset
5291
5317
  if self.order is not None:
5292
5318
  body["order"] = self.order.value
5319
+ if self.page_size is not None:
5320
+ body["page_size"] = self.page_size
5321
+ if self.page_token is not None:
5322
+ body["page_token"] = self.page_token
5293
5323
  if self.start_time is not None:
5294
5324
  body["start_time"] = self.start_time
5295
5325
  return body
@@ -5309,6 +5339,10 @@ class GetEvents:
5309
5339
  body["offset"] = self.offset
5310
5340
  if self.order is not None:
5311
5341
  body["order"] = self.order
5342
+ if self.page_size is not None:
5343
+ body["page_size"] = self.page_size
5344
+ if self.page_token is not None:
5345
+ body["page_token"] = self.page_token
5312
5346
  if self.start_time is not None:
5313
5347
  body["start_time"] = self.start_time
5314
5348
  return body
@@ -5323,6 +5357,8 @@ class GetEvents:
5323
5357
  limit=d.get("limit", None),
5324
5358
  offset=d.get("offset", None),
5325
5359
  order=_enum(d, "order", GetEventsOrder),
5360
+ page_size=d.get("page_size", None),
5361
+ page_token=d.get("page_token", None),
5326
5362
  start_time=d.get("start_time", None),
5327
5363
  )
5328
5364
 
@@ -5338,11 +5374,24 @@ class GetEventsResponse:
5338
5374
  events: Optional[List[ClusterEvent]] = None
5339
5375
 
5340
5376
  next_page: Optional[GetEvents] = None
5341
- """The parameters required to retrieve the next page of events. Omitted if there are no more events
5377
+ """Deprecated: use next_page_token or prev_page_token instead.
5378
+
5379
+ The parameters required to retrieve the next page of events. Omitted if there are no more events
5342
5380
  to read."""
5343
5381
 
5382
+ next_page_token: Optional[str] = None
5383
+ """This field represents the pagination token to retrieve the next page of results. If the value is
5384
+ "", it means no further results for the request."""
5385
+
5386
+ prev_page_token: Optional[str] = None
5387
+ """This field represents the pagination token to retrieve the previous page of results. If the
5388
+ value is "", it means no further results for the request."""
5389
+
5344
5390
  total_count: Optional[int] = None
5345
- """The total number of events filtered by the start_time, end_time, and event_types."""
5391
+ """Deprecated: Returns 0 when request uses page_token. Will start returning zero when request uses
5392
+ offset/limit soon.
5393
+
5394
+ The total number of events filtered by the start_time, end_time, and event_types."""
5346
5395
 
5347
5396
  def as_dict(self) -> dict:
5348
5397
  """Serializes the GetEventsResponse into a dictionary suitable for use as a JSON request body."""
@@ -5351,6 +5400,10 @@ class GetEventsResponse:
5351
5400
  body["events"] = [v.as_dict() for v in self.events]
5352
5401
  if self.next_page:
5353
5402
  body["next_page"] = self.next_page.as_dict()
5403
+ if self.next_page_token is not None:
5404
+ body["next_page_token"] = self.next_page_token
5405
+ if self.prev_page_token is not None:
5406
+ body["prev_page_token"] = self.prev_page_token
5354
5407
  if self.total_count is not None:
5355
5408
  body["total_count"] = self.total_count
5356
5409
  return body
@@ -5362,6 +5415,10 @@ class GetEventsResponse:
5362
5415
  body["events"] = self.events
5363
5416
  if self.next_page:
5364
5417
  body["next_page"] = self.next_page
5418
+ if self.next_page_token is not None:
5419
+ body["next_page_token"] = self.next_page_token
5420
+ if self.prev_page_token is not None:
5421
+ body["prev_page_token"] = self.prev_page_token
5365
5422
  if self.total_count is not None:
5366
5423
  body["total_count"] = self.total_count
5367
5424
  return body
@@ -5372,6 +5429,8 @@ class GetEventsResponse:
5372
5429
  return cls(
5373
5430
  events=_repeated_dict(d, "events", ClusterEvent),
5374
5431
  next_page=_from_dict(d, "next_page", GetEvents),
5432
+ next_page_token=d.get("next_page_token", None),
5433
+ prev_page_token=d.get("prev_page_token", None),
5375
5434
  total_count=d.get("total_count", None),
5376
5435
  )
5377
5436
 
@@ -5438,6 +5497,10 @@ class GetInstancePool:
5438
5497
  min_idle_instances: Optional[int] = None
5439
5498
  """Minimum number of idle instances to keep in the instance pool"""
5440
5499
 
5500
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
5501
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
5502
+ when attempting to launch a cluster if the node type id is not available."""
5503
+
5441
5504
  node_type_id: Optional[str] = None
5442
5505
  """This field encodes, through a single value, the resources available to each of the Spark nodes
5443
5506
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -5488,6 +5551,8 @@ class GetInstancePool:
5488
5551
  body["max_capacity"] = self.max_capacity
5489
5552
  if self.min_idle_instances is not None:
5490
5553
  body["min_idle_instances"] = self.min_idle_instances
5554
+ if self.node_type_flexibility:
5555
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
5491
5556
  if self.node_type_id is not None:
5492
5557
  body["node_type_id"] = self.node_type_id
5493
5558
  if self.preloaded_docker_images:
@@ -5529,6 +5594,8 @@ class GetInstancePool:
5529
5594
  body["max_capacity"] = self.max_capacity
5530
5595
  if self.min_idle_instances is not None:
5531
5596
  body["min_idle_instances"] = self.min_idle_instances
5597
+ if self.node_type_flexibility:
5598
+ body["node_type_flexibility"] = self.node_type_flexibility
5532
5599
  if self.node_type_id is not None:
5533
5600
  body["node_type_id"] = self.node_type_id
5534
5601
  if self.preloaded_docker_images:
@@ -5559,6 +5626,7 @@ class GetInstancePool:
5559
5626
  instance_pool_name=d.get("instance_pool_name", None),
5560
5627
  max_capacity=d.get("max_capacity", None),
5561
5628
  min_idle_instances=d.get("min_idle_instances", None),
5629
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
5562
5630
  node_type_id=d.get("node_type_id", None),
5563
5631
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
5564
5632
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -6393,6 +6461,10 @@ class InstancePoolAndStats:
6393
6461
  min_idle_instances: Optional[int] = None
6394
6462
  """Minimum number of idle instances to keep in the instance pool"""
6395
6463
 
6464
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
6465
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
6466
+ when attempting to launch a cluster if the node type id is not available."""
6467
+
6396
6468
  node_type_id: Optional[str] = None
6397
6469
  """This field encodes, through a single value, the resources available to each of the Spark nodes
6398
6470
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -6443,6 +6515,8 @@ class InstancePoolAndStats:
6443
6515
  body["max_capacity"] = self.max_capacity
6444
6516
  if self.min_idle_instances is not None:
6445
6517
  body["min_idle_instances"] = self.min_idle_instances
6518
+ if self.node_type_flexibility:
6519
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
6446
6520
  if self.node_type_id is not None:
6447
6521
  body["node_type_id"] = self.node_type_id
6448
6522
  if self.preloaded_docker_images:
@@ -6484,6 +6558,8 @@ class InstancePoolAndStats:
6484
6558
  body["max_capacity"] = self.max_capacity
6485
6559
  if self.min_idle_instances is not None:
6486
6560
  body["min_idle_instances"] = self.min_idle_instances
6561
+ if self.node_type_flexibility:
6562
+ body["node_type_flexibility"] = self.node_type_flexibility
6487
6563
  if self.node_type_id is not None:
6488
6564
  body["node_type_id"] = self.node_type_id
6489
6565
  if self.preloaded_docker_images:
@@ -6514,6 +6590,7 @@ class InstancePoolAndStats:
6514
6590
  instance_pool_name=d.get("instance_pool_name", None),
6515
6591
  max_capacity=d.get("max_capacity", None),
6516
6592
  min_idle_instances=d.get("min_idle_instances", None),
6593
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
6517
6594
  node_type_id=d.get("node_type_id", None),
6518
6595
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
6519
6596
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -7976,6 +8053,28 @@ class NodeType:
7976
8053
  )
7977
8054
 
7978
8055
 
8056
+ @dataclass
8057
+ class NodeTypeFlexibility:
8058
+ """For Fleet-V2 using classic clusters, this object contains the information about the alternate
8059
+ node type ids to use when attempting to launch a cluster. It can be used with both the driver
8060
+ and worker node types."""
8061
+
8062
+ def as_dict(self) -> dict:
8063
+ """Serializes the NodeTypeFlexibility into a dictionary suitable for use as a JSON request body."""
8064
+ body = {}
8065
+ return body
8066
+
8067
+ def as_shallow_dict(self) -> dict:
8068
+ """Serializes the NodeTypeFlexibility into a shallow dictionary of its immediate attributes."""
8069
+ body = {}
8070
+ return body
8071
+
8072
+ @classmethod
8073
+ def from_dict(cls, d: Dict[str, Any]) -> NodeTypeFlexibility:
8074
+ """Deserializes the NodeTypeFlexibility from a dictionary."""
8075
+ return cls()
8076
+
8077
+
7979
8078
  @dataclass
7980
8079
  class PendingInstanceError:
7981
8080
  """Error message of a failed pending instances"""
@@ -9005,6 +9104,7 @@ class TerminationReasonCode(Enum):
9005
9104
  DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
9006
9105
  DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
9007
9106
  DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
9107
+ DNS_RESOLUTION_ERROR = "DNS_RESOLUTION_ERROR"
9008
9108
  DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION"
9009
9109
  DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
9010
9110
  DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
@@ -9023,6 +9123,7 @@ class TerminationReasonCode(Enum):
9023
9123
  EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
9024
9124
  EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
9025
9125
  GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
9126
+ GCP_DENIED_BY_ORG_POLICY = "GCP_DENIED_BY_ORG_POLICY"
9026
9127
  GCP_FORBIDDEN = "GCP_FORBIDDEN"
9027
9128
  GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
9028
9129
  GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
@@ -10947,6 +11048,8 @@ class ClustersAPI:
10947
11048
  limit: Optional[int] = None,
10948
11049
  offset: Optional[int] = None,
10949
11050
  order: Optional[GetEventsOrder] = None,
11051
+ page_size: Optional[int] = None,
11052
+ page_token: Optional[str] = None,
10950
11053
  start_time: Optional[int] = None,
10951
11054
  ) -> Iterator[ClusterEvent]:
10952
11055
  """List cluster activity events.
@@ -10961,13 +11064,25 @@ class ClustersAPI:
10961
11064
  :param event_types: List[:class:`EventType`] (optional)
10962
11065
  An optional set of event types to filter on. If empty, all event types are returned.
10963
11066
  :param limit: int (optional)
11067
+ Deprecated: use page_token in combination with page_size instead.
11068
+
10964
11069
  The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
10965
11070
  value is 500.
10966
11071
  :param offset: int (optional)
11072
+ Deprecated: use page_token in combination with page_size instead.
11073
+
10967
11074
  The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results
10968
11075
  are requested in descending order, the end_time field is required.
10969
11076
  :param order: :class:`GetEventsOrder` (optional)
10970
11077
  The order to list events in; either "ASC" or "DESC". Defaults to "DESC".
11078
+ :param page_size: int (optional)
11079
+ The maximum number of events to include in a page of events. The server may further constrain the
11080
+ maximum number of results returned in a single page. If the page_size is empty or 0, the server will
11081
+ decide the number of results to be returned. The field has to be in the range [0,500]. If the value
11082
+ is outside the range, the server enforces 0 or 500.
11083
+ :param page_token: str (optional)
11084
+ Use next_page_token or prev_page_token returned from the previous request to list the next or
11085
+ previous page of events respectively. If page_token is empty, the first page is returned.
10971
11086
  :param start_time: int (optional)
10972
11087
  The start time in epoch milliseconds. If empty, returns events starting from the beginning of time.
10973
11088
 
@@ -10986,6 +11101,10 @@ class ClustersAPI:
10986
11101
  body["offset"] = offset
10987
11102
  if order is not None:
10988
11103
  body["order"] = order.value
11104
+ if page_size is not None:
11105
+ body["page_size"] = page_size
11106
+ if page_token is not None:
11107
+ body["page_token"] = page_token
10989
11108
  if start_time is not None:
10990
11109
  body["start_time"] = start_time
10991
11110
  headers = {
@@ -12101,6 +12220,7 @@ class InstancePoolsAPI:
12101
12220
  idle_instance_autotermination_minutes: Optional[int] = None,
12102
12221
  max_capacity: Optional[int] = None,
12103
12222
  min_idle_instances: Optional[int] = None,
12223
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None,
12104
12224
  ):
12105
12225
  """Edit an existing instance pool.
12106
12226
 
@@ -12133,6 +12253,9 @@ class InstancePoolsAPI:
12133
12253
  upsize requests.
12134
12254
  :param min_idle_instances: int (optional)
12135
12255
  Minimum number of idle instances to keep in the instance pool
12256
+ :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional)
12257
+ For Fleet-pool V2, this object contains the information about the alternate node type ids to use
12258
+ when attempting to launch a cluster if the node type id is not available.
12136
12259
 
12137
12260
 
12138
12261
  """
@@ -12149,6 +12272,8 @@ class InstancePoolsAPI:
12149
12272
  body["max_capacity"] = max_capacity
12150
12273
  if min_idle_instances is not None:
12151
12274
  body["min_idle_instances"] = min_idle_instances
12275
+ if node_type_flexibility is not None:
12276
+ body["node_type_flexibility"] = node_type_flexibility.as_dict()
12152
12277
  if node_type_id is not None:
12153
12278
  body["node_type_id"] = node_type_id
12154
12279
  headers = {
@@ -926,7 +926,7 @@ class GenieResultMetadata:
926
926
  @dataclass
927
927
  class GenieSpace:
928
928
  space_id: str
929
- """Space ID"""
929
+ """Genie space ID"""
930
930
 
931
931
  title: str
932
932
  """Title of the Genie Space"""
@@ -2172,15 +2172,14 @@ class GenieAPI:
2172
2172
  ) -> GenieGenerateDownloadFullQueryResultResponse:
2173
2173
  """Generate full query result download.
2174
2174
 
2175
- Initiate full SQL query result download and obtain a `download_id` to track the download progress.
2176
- This call initiates a new SQL execution to generate the query result. The result is stored in an
2177
- external link can be retrieved using the [Get Download Full Query
2178
- Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks strongly recommends that
2179
- you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. See [Execute
2180
- Statement](:method:statementexecution/executestatement) for more details.
2175
+ Initiates a new SQL execution and returns a `download_id` that you can use to track the progress of
2176
+ the download. The query result is stored in an external link and can be retrieved using the [Get
2177
+ Download Full Query Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks
2178
+ strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition.
2179
+ See [Execute Statement](:method:statementexecution/executestatement) for more details.
2181
2180
 
2182
2181
  :param space_id: str
2183
- Space ID
2182
+ Genie space ID
2184
2183
  :param conversation_id: str
2185
2184
  Conversation ID
2186
2185
  :param message_id: str
@@ -2208,17 +2207,15 @@ class GenieAPI:
2208
2207
  """Get download full query result.
2209
2208
 
2210
2209
  After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult) and
2211
- successfully receiving a `download_id`, use this API to Poll download progress and retrieve the SQL
2212
- query result external link(s) upon completion. Warning: Databricks strongly recommends that you
2213
- protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. When you use the
2214
- `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, which can be used to download
2215
- the results directly from Amazon S3. As a short-lived access credential is embedded in this presigned
2216
- URL, you should protect the URL. Because presigned URLs are already generated with embedded temporary
2217
- access credentials, you must not set an Authorization header in the download requests. See [Execute
2210
+ successfully receiving a `download_id`, use this API to poll the download progress. When the download
2211
+ is complete, the API returns one or more external links to the query result files. Warning: Databricks
2212
+ strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition.
2213
+ You must not set an Authorization header in download requests. When using the `EXTERNAL_LINKS`
2214
+ disposition, Databricks returns presigned URLs that grant temporary access to data. See [Execute
2218
2215
  Statement](:method:statementexecution/executestatement) for more details.
2219
2216
 
2220
2217
  :param space_id: str
2221
- Space ID
2218
+ Genie space ID
2222
2219
  :param conversation_id: str
2223
2220
  Conversation ID
2224
2221
  :param message_id: str
@@ -2422,12 +2419,12 @@ class LakeviewAPI:
2422
2419
  def __init__(self, api_client):
2423
2420
  self._api = api_client
2424
2421
 
2425
- def create(self, *, dashboard: Optional[Dashboard] = None) -> Dashboard:
2422
+ def create(self, dashboard: Dashboard) -> Dashboard:
2426
2423
  """Create dashboard.
2427
2424
 
2428
2425
  Create a draft dashboard.
2429
2426
 
2430
- :param dashboard: :class:`Dashboard` (optional)
2427
+ :param dashboard: :class:`Dashboard`
2431
2428
 
2432
2429
  :returns: :class:`Dashboard`
2433
2430
  """
@@ -2440,12 +2437,12 @@ class LakeviewAPI:
2440
2437
  res = self._api.do("POST", "/api/2.0/lakeview/dashboards", body=body, headers=headers)
2441
2438
  return Dashboard.from_dict(res)
2442
2439
 
2443
- def create_schedule(self, dashboard_id: str, *, schedule: Optional[Schedule] = None) -> Schedule:
2440
+ def create_schedule(self, dashboard_id: str, schedule: Schedule) -> Schedule:
2444
2441
  """Create dashboard schedule.
2445
2442
 
2446
2443
  :param dashboard_id: str
2447
2444
  UUID identifying the dashboard to which the schedule belongs.
2448
- :param schedule: :class:`Schedule` (optional)
2445
+ :param schedule: :class:`Schedule`
2449
2446
 
2450
2447
  :returns: :class:`Schedule`
2451
2448
  """
@@ -2458,16 +2455,14 @@ class LakeviewAPI:
2458
2455
  res = self._api.do("POST", f"/api/2.0/lakeview/dashboards/{dashboard_id}/schedules", body=body, headers=headers)
2459
2456
  return Schedule.from_dict(res)
2460
2457
 
2461
- def create_subscription(
2462
- self, dashboard_id: str, schedule_id: str, *, subscription: Optional[Subscription] = None
2463
- ) -> Subscription:
2458
+ def create_subscription(self, dashboard_id: str, schedule_id: str, subscription: Subscription) -> Subscription:
2464
2459
  """Create schedule subscription.
2465
2460
 
2466
2461
  :param dashboard_id: str
2467
2462
  UUID identifying the dashboard to which the subscription belongs.
2468
2463
  :param schedule_id: str
2469
2464
  UUID identifying the schedule to which the subscription belongs.
2470
- :param subscription: :class:`Subscription` (optional)
2465
+ :param subscription: :class:`Subscription`
2471
2466
 
2472
2467
  :returns: :class:`Subscription`
2473
2468
  """
@@ -2853,14 +2848,14 @@ class LakeviewAPI:
2853
2848
 
2854
2849
  self._api.do("DELETE", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published", headers=headers)
2855
2850
 
2856
- def update(self, dashboard_id: str, *, dashboard: Optional[Dashboard] = None) -> Dashboard:
2851
+ def update(self, dashboard_id: str, dashboard: Dashboard) -> Dashboard:
2857
2852
  """Update dashboard.
2858
2853
 
2859
2854
  Update a draft dashboard.
2860
2855
 
2861
2856
  :param dashboard_id: str
2862
2857
  UUID identifying the dashboard.
2863
- :param dashboard: :class:`Dashboard` (optional)
2858
+ :param dashboard: :class:`Dashboard`
2864
2859
 
2865
2860
  :returns: :class:`Dashboard`
2866
2861
  """
@@ -2873,14 +2868,14 @@ class LakeviewAPI:
2873
2868
  res = self._api.do("PATCH", f"/api/2.0/lakeview/dashboards/{dashboard_id}", body=body, headers=headers)
2874
2869
  return Dashboard.from_dict(res)
2875
2870
 
2876
- def update_schedule(self, dashboard_id: str, schedule_id: str, *, schedule: Optional[Schedule] = None) -> Schedule:
2871
+ def update_schedule(self, dashboard_id: str, schedule_id: str, schedule: Schedule) -> Schedule:
2877
2872
  """Update dashboard schedule.
2878
2873
 
2879
2874
  :param dashboard_id: str
2880
2875
  UUID identifying the dashboard to which the schedule belongs.
2881
2876
  :param schedule_id: str
2882
2877
  UUID identifying the schedule.
2883
- :param schedule: :class:`Schedule` (optional)
2878
+ :param schedule: :class:`Schedule`
2884
2879
 
2885
2880
  :returns: :class:`Schedule`
2886
2881
  """