databricks-sdk 0.49.0__py3-none-any.whl → 0.51.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -332,7 +332,7 @@ class CleanRoomAssetForeignTableLocalDetails:
332
332
  @dataclass
333
333
  class CleanRoomAssetNotebook:
334
334
  etag: Optional[str] = None
335
- """Server generated checksum that represents the notebook version."""
335
+ """Server generated etag that represents the notebook version."""
336
336
 
337
337
  notebook_content: Optional[str] = None
338
338
  """Base 64 representation of the notebook contents. This is the same format as returned by
@@ -1097,7 +1097,7 @@ class CleanRoomAssetsAPI:
1097
1097
  def __init__(self, api_client):
1098
1098
  self._api = api_client
1099
1099
 
1100
- def create(self, clean_room_name: str, *, asset: Optional[CleanRoomAsset] = None) -> CleanRoomAsset:
1100
+ def create(self, clean_room_name: str, asset: CleanRoomAsset) -> CleanRoomAsset:
1101
1101
  """Create an asset.
1102
1102
 
1103
1103
  Create a clean room asset —share an asset like a notebook or table into the clean room. For each UC
@@ -1107,7 +1107,7 @@ class CleanRoomAssetsAPI:
1107
1107
 
1108
1108
  :param clean_room_name: str
1109
1109
  Name of the clean room.
1110
- :param asset: :class:`CleanRoomAsset` (optional)
1110
+ :param asset: :class:`CleanRoomAsset`
1111
1111
  Metadata of the clean room asset
1112
1112
 
1113
1113
  :returns: :class:`CleanRoomAsset`
@@ -1200,12 +1200,7 @@ class CleanRoomAssetsAPI:
1200
1200
  query["page_token"] = json["next_page_token"]
1201
1201
 
1202
1202
  def update(
1203
- self,
1204
- clean_room_name: str,
1205
- asset_type: CleanRoomAssetAssetType,
1206
- name: str,
1207
- *,
1208
- asset: Optional[CleanRoomAsset] = None,
1203
+ self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, asset: CleanRoomAsset
1209
1204
  ) -> CleanRoomAsset:
1210
1205
  """Update an asset.
1211
1206
 
@@ -1224,7 +1219,7 @@ class CleanRoomAssetsAPI:
1224
1219
  *shared_catalog*.*shared_schema*.*asset_name*
1225
1220
 
1226
1221
  For notebooks, the name is the notebook file name.
1227
- :param asset: :class:`CleanRoomAsset` (optional)
1222
+ :param asset: :class:`CleanRoomAsset`
1228
1223
  Metadata of the clean room asset
1229
1224
 
1230
1225
  :returns: :class:`CleanRoomAsset`
@@ -1303,7 +1298,7 @@ class CleanRoomsAPI:
1303
1298
  def __init__(self, api_client):
1304
1299
  self._api = api_client
1305
1300
 
1306
- def create(self, *, clean_room: Optional[CleanRoom] = None) -> CleanRoom:
1301
+ def create(self, clean_room: CleanRoom) -> CleanRoom:
1307
1302
  """Create a clean room.
1308
1303
 
1309
1304
  Create a new clean room with the specified collaborators. This method is asynchronous; the returned
@@ -1314,7 +1309,7 @@ class CleanRoomsAPI:
1314
1309
 
1315
1310
  The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore.
1316
1311
 
1317
- :param clean_room: :class:`CleanRoom` (optional)
1312
+ :param clean_room: :class:`CleanRoom`
1318
1313
 
1319
1314
  :returns: :class:`CleanRoom`
1320
1315
  """
@@ -1328,7 +1323,7 @@ class CleanRoomsAPI:
1328
1323
  return CleanRoom.from_dict(res)
1329
1324
 
1330
1325
  def create_output_catalog(
1331
- self, clean_room_name: str, *, output_catalog: Optional[CleanRoomOutputCatalog] = None
1326
+ self, clean_room_name: str, output_catalog: CleanRoomOutputCatalog
1332
1327
  ) -> CreateCleanRoomOutputCatalogResponse:
1333
1328
  """Create an output catalog.
1334
1329
 
@@ -1336,7 +1331,7 @@ class CleanRoomsAPI:
1336
1331
 
1337
1332
  :param clean_room_name: str
1338
1333
  Name of the clean room.
1339
- :param output_catalog: :class:`CleanRoomOutputCatalog` (optional)
1334
+ :param output_catalog: :class:`CleanRoomOutputCatalog`
1340
1335
 
1341
1336
  :returns: :class:`CreateCleanRoomOutputCatalogResponse`
1342
1337
  """
@@ -4499,6 +4499,10 @@ class EditInstancePool:
4499
4499
  min_idle_instances: Optional[int] = None
4500
4500
  """Minimum number of idle instances to keep in the instance pool"""
4501
4501
 
4502
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
4503
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
4504
+ when attempting to launch a cluster if the node type id is not available."""
4505
+
4502
4506
  def as_dict(self) -> dict:
4503
4507
  """Serializes the EditInstancePool into a dictionary suitable for use as a JSON request body."""
4504
4508
  body = {}
@@ -4514,6 +4518,8 @@ class EditInstancePool:
4514
4518
  body["max_capacity"] = self.max_capacity
4515
4519
  if self.min_idle_instances is not None:
4516
4520
  body["min_idle_instances"] = self.min_idle_instances
4521
+ if self.node_type_flexibility:
4522
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
4517
4523
  if self.node_type_id is not None:
4518
4524
  body["node_type_id"] = self.node_type_id
4519
4525
  return body
@@ -4533,6 +4539,8 @@ class EditInstancePool:
4533
4539
  body["max_capacity"] = self.max_capacity
4534
4540
  if self.min_idle_instances is not None:
4535
4541
  body["min_idle_instances"] = self.min_idle_instances
4542
+ if self.node_type_flexibility:
4543
+ body["node_type_flexibility"] = self.node_type_flexibility
4536
4544
  if self.node_type_id is not None:
4537
4545
  body["node_type_id"] = self.node_type_id
4538
4546
  return body
@@ -4547,6 +4555,7 @@ class EditInstancePool:
4547
4555
  instance_pool_name=d.get("instance_pool_name", None),
4548
4556
  max_capacity=d.get("max_capacity", None),
4549
4557
  min_idle_instances=d.get("min_idle_instances", None),
4558
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
4550
4559
  node_type_id=d.get("node_type_id", None),
4551
4560
  )
4552
4561
 
@@ -4772,8 +4781,11 @@ class EnforceClusterComplianceResponse:
4772
4781
 
4773
4782
  @dataclass
4774
4783
  class Environment:
4775
- """The environment entity used to preserve serverless environment side panel and jobs' environment
4776
- for non-notebook task. In this minimal environment spec, only pip dependencies are supported."""
4784
+ """The environment entity used to preserve serverless environment side panel, jobs' environment for
4785
+ non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a
4786
+ copied version of the Environment proto below, at
4787
+ //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal
4788
+ environment spec, only pip dependencies are supported."""
4777
4789
 
4778
4790
  client: str
4779
4791
  """Client version used by the environment The client is the user-facing environment of the runtime.
@@ -4788,6 +4800,10 @@ class Environment:
4788
4800
  Databricks), <vcs project url> E.g. dependencies: ["foo==0.0.1", "-r
4789
4801
  /Workspace/test/requirements.txt"]"""
4790
4802
 
4803
+ jar_dependencies: Optional[List[str]] = None
4804
+ """List of jar dependencies, should be string representing volume paths. For example:
4805
+ `/Volumes/path/to/test.jar`."""
4806
+
4791
4807
  def as_dict(self) -> dict:
4792
4808
  """Serializes the Environment into a dictionary suitable for use as a JSON request body."""
4793
4809
  body = {}
@@ -4795,6 +4811,8 @@ class Environment:
4795
4811
  body["client"] = self.client
4796
4812
  if self.dependencies:
4797
4813
  body["dependencies"] = [v for v in self.dependencies]
4814
+ if self.jar_dependencies:
4815
+ body["jar_dependencies"] = [v for v in self.jar_dependencies]
4798
4816
  return body
4799
4817
 
4800
4818
  def as_shallow_dict(self) -> dict:
@@ -4804,12 +4822,18 @@ class Environment:
4804
4822
  body["client"] = self.client
4805
4823
  if self.dependencies:
4806
4824
  body["dependencies"] = self.dependencies
4825
+ if self.jar_dependencies:
4826
+ body["jar_dependencies"] = self.jar_dependencies
4807
4827
  return body
4808
4828
 
4809
4829
  @classmethod
4810
4830
  def from_dict(cls, d: Dict[str, Any]) -> Environment:
4811
4831
  """Deserializes the Environment from a dictionary."""
4812
- return cls(client=d.get("client", None), dependencies=d.get("dependencies", None))
4832
+ return cls(
4833
+ client=d.get("client", None),
4834
+ dependencies=d.get("dependencies", None),
4835
+ jar_dependencies=d.get("jar_dependencies", None),
4836
+ )
4813
4837
 
4814
4838
 
4815
4839
  @dataclass
@@ -5249,16 +5273,30 @@ class GetEvents:
5249
5273
  """An optional set of event types to filter on. If empty, all event types are returned."""
5250
5274
 
5251
5275
  limit: Optional[int] = None
5252
- """The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
5276
+ """Deprecated: use page_token in combination with page_size instead.
5277
+
5278
+ The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
5253
5279
  value is 500."""
5254
5280
 
5255
5281
  offset: Optional[int] = None
5256
- """The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
5282
+ """Deprecated: use page_token in combination with page_size instead.
5283
+
5284
+ The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
5257
5285
  results are requested in descending order, the end_time field is required."""
5258
5286
 
5259
5287
  order: Optional[GetEventsOrder] = None
5260
5288
  """The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
5261
5289
 
5290
+ page_size: Optional[int] = None
5291
+ """The maximum number of events to include in a page of events. The server may further constrain
5292
+ the maximum number of results returned in a single page. If the page_size is empty or 0, the
5293
+ server will decide the number of results to be returned. The field has to be in the range
5294
+ [0,500]. If the value is outside the range, the server enforces 0 or 500."""
5295
+
5296
+ page_token: Optional[str] = None
5297
+ """Use next_page_token or prev_page_token returned from the previous request to list the next or
5298
+ previous page of events respectively. If page_token is empty, the first page is returned."""
5299
+
5262
5300
  start_time: Optional[int] = None
5263
5301
  """The start time in epoch milliseconds. If empty, returns events starting from the beginning of
5264
5302
  time."""
@@ -5278,6 +5316,10 @@ class GetEvents:
5278
5316
  body["offset"] = self.offset
5279
5317
  if self.order is not None:
5280
5318
  body["order"] = self.order.value
5319
+ if self.page_size is not None:
5320
+ body["page_size"] = self.page_size
5321
+ if self.page_token is not None:
5322
+ body["page_token"] = self.page_token
5281
5323
  if self.start_time is not None:
5282
5324
  body["start_time"] = self.start_time
5283
5325
  return body
@@ -5297,6 +5339,10 @@ class GetEvents:
5297
5339
  body["offset"] = self.offset
5298
5340
  if self.order is not None:
5299
5341
  body["order"] = self.order
5342
+ if self.page_size is not None:
5343
+ body["page_size"] = self.page_size
5344
+ if self.page_token is not None:
5345
+ body["page_token"] = self.page_token
5300
5346
  if self.start_time is not None:
5301
5347
  body["start_time"] = self.start_time
5302
5348
  return body
@@ -5311,6 +5357,8 @@ class GetEvents:
5311
5357
  limit=d.get("limit", None),
5312
5358
  offset=d.get("offset", None),
5313
5359
  order=_enum(d, "order", GetEventsOrder),
5360
+ page_size=d.get("page_size", None),
5361
+ page_token=d.get("page_token", None),
5314
5362
  start_time=d.get("start_time", None),
5315
5363
  )
5316
5364
 
@@ -5326,11 +5374,24 @@ class GetEventsResponse:
5326
5374
  events: Optional[List[ClusterEvent]] = None
5327
5375
 
5328
5376
  next_page: Optional[GetEvents] = None
5329
- """The parameters required to retrieve the next page of events. Omitted if there are no more events
5377
+ """Deprecated: use next_page_token or prev_page_token instead.
5378
+
5379
+ The parameters required to retrieve the next page of events. Omitted if there are no more events
5330
5380
  to read."""
5331
5381
 
5382
+ next_page_token: Optional[str] = None
5383
+ """This field represents the pagination token to retrieve the next page of results. If the value is
5384
+ "", it means no further results for the request."""
5385
+
5386
+ prev_page_token: Optional[str] = None
5387
+ """This field represents the pagination token to retrieve the previous page of results. If the
5388
+ value is "", it means no further results for the request."""
5389
+
5332
5390
  total_count: Optional[int] = None
5333
- """The total number of events filtered by the start_time, end_time, and event_types."""
5391
+ """Deprecated: Returns 0 when request uses page_token. Will start returning zero when request uses
5392
+ offset/limit soon.
5393
+
5394
+ The total number of events filtered by the start_time, end_time, and event_types."""
5334
5395
 
5335
5396
  def as_dict(self) -> dict:
5336
5397
  """Serializes the GetEventsResponse into a dictionary suitable for use as a JSON request body."""
@@ -5339,6 +5400,10 @@ class GetEventsResponse:
5339
5400
  body["events"] = [v.as_dict() for v in self.events]
5340
5401
  if self.next_page:
5341
5402
  body["next_page"] = self.next_page.as_dict()
5403
+ if self.next_page_token is not None:
5404
+ body["next_page_token"] = self.next_page_token
5405
+ if self.prev_page_token is not None:
5406
+ body["prev_page_token"] = self.prev_page_token
5342
5407
  if self.total_count is not None:
5343
5408
  body["total_count"] = self.total_count
5344
5409
  return body
@@ -5350,6 +5415,10 @@ class GetEventsResponse:
5350
5415
  body["events"] = self.events
5351
5416
  if self.next_page:
5352
5417
  body["next_page"] = self.next_page
5418
+ if self.next_page_token is not None:
5419
+ body["next_page_token"] = self.next_page_token
5420
+ if self.prev_page_token is not None:
5421
+ body["prev_page_token"] = self.prev_page_token
5353
5422
  if self.total_count is not None:
5354
5423
  body["total_count"] = self.total_count
5355
5424
  return body
@@ -5360,6 +5429,8 @@ class GetEventsResponse:
5360
5429
  return cls(
5361
5430
  events=_repeated_dict(d, "events", ClusterEvent),
5362
5431
  next_page=_from_dict(d, "next_page", GetEvents),
5432
+ next_page_token=d.get("next_page_token", None),
5433
+ prev_page_token=d.get("prev_page_token", None),
5363
5434
  total_count=d.get("total_count", None),
5364
5435
  )
5365
5436
 
@@ -5426,6 +5497,10 @@ class GetInstancePool:
5426
5497
  min_idle_instances: Optional[int] = None
5427
5498
  """Minimum number of idle instances to keep in the instance pool"""
5428
5499
 
5500
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
5501
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
5502
+ when attempting to launch a cluster if the node type id is not available."""
5503
+
5429
5504
  node_type_id: Optional[str] = None
5430
5505
  """This field encodes, through a single value, the resources available to each of the Spark nodes
5431
5506
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -5476,6 +5551,8 @@ class GetInstancePool:
5476
5551
  body["max_capacity"] = self.max_capacity
5477
5552
  if self.min_idle_instances is not None:
5478
5553
  body["min_idle_instances"] = self.min_idle_instances
5554
+ if self.node_type_flexibility:
5555
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
5479
5556
  if self.node_type_id is not None:
5480
5557
  body["node_type_id"] = self.node_type_id
5481
5558
  if self.preloaded_docker_images:
@@ -5517,6 +5594,8 @@ class GetInstancePool:
5517
5594
  body["max_capacity"] = self.max_capacity
5518
5595
  if self.min_idle_instances is not None:
5519
5596
  body["min_idle_instances"] = self.min_idle_instances
5597
+ if self.node_type_flexibility:
5598
+ body["node_type_flexibility"] = self.node_type_flexibility
5520
5599
  if self.node_type_id is not None:
5521
5600
  body["node_type_id"] = self.node_type_id
5522
5601
  if self.preloaded_docker_images:
@@ -5547,6 +5626,7 @@ class GetInstancePool:
5547
5626
  instance_pool_name=d.get("instance_pool_name", None),
5548
5627
  max_capacity=d.get("max_capacity", None),
5549
5628
  min_idle_instances=d.get("min_idle_instances", None),
5629
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
5550
5630
  node_type_id=d.get("node_type_id", None),
5551
5631
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
5552
5632
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -6381,6 +6461,10 @@ class InstancePoolAndStats:
6381
6461
  min_idle_instances: Optional[int] = None
6382
6462
  """Minimum number of idle instances to keep in the instance pool"""
6383
6463
 
6464
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None
6465
+ """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
6466
+ when attempting to launch a cluster if the node type id is not available."""
6467
+
6384
6468
  node_type_id: Optional[str] = None
6385
6469
  """This field encodes, through a single value, the resources available to each of the Spark nodes
6386
6470
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -6431,6 +6515,8 @@ class InstancePoolAndStats:
6431
6515
  body["max_capacity"] = self.max_capacity
6432
6516
  if self.min_idle_instances is not None:
6433
6517
  body["min_idle_instances"] = self.min_idle_instances
6518
+ if self.node_type_flexibility:
6519
+ body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
6434
6520
  if self.node_type_id is not None:
6435
6521
  body["node_type_id"] = self.node_type_id
6436
6522
  if self.preloaded_docker_images:
@@ -6472,6 +6558,8 @@ class InstancePoolAndStats:
6472
6558
  body["max_capacity"] = self.max_capacity
6473
6559
  if self.min_idle_instances is not None:
6474
6560
  body["min_idle_instances"] = self.min_idle_instances
6561
+ if self.node_type_flexibility:
6562
+ body["node_type_flexibility"] = self.node_type_flexibility
6475
6563
  if self.node_type_id is not None:
6476
6564
  body["node_type_id"] = self.node_type_id
6477
6565
  if self.preloaded_docker_images:
@@ -6502,6 +6590,7 @@ class InstancePoolAndStats:
6502
6590
  instance_pool_name=d.get("instance_pool_name", None),
6503
6591
  max_capacity=d.get("max_capacity", None),
6504
6592
  min_idle_instances=d.get("min_idle_instances", None),
6593
+ node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
6505
6594
  node_type_id=d.get("node_type_id", None),
6506
6595
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
6507
6596
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -7667,6 +7756,9 @@ class LogSyncStatus:
7667
7756
  return cls(last_attempted=d.get("last_attempted", None), last_exception=d.get("last_exception", None))
7668
7757
 
7669
7758
 
7759
+ MapAny = Dict[str, Any]
7760
+
7761
+
7670
7762
  @dataclass
7671
7763
  class MavenLibrary:
7672
7764
  coordinates: str
@@ -7961,6 +8053,28 @@ class NodeType:
7961
8053
  )
7962
8054
 
7963
8055
 
8056
+ @dataclass
8057
+ class NodeTypeFlexibility:
8058
+ """For Fleet-V2 using classic clusters, this object contains the information about the alternate
8059
+ node type ids to use when attempting to launch a cluster. It can be used with both the driver
8060
+ and worker node types."""
8061
+
8062
+ def as_dict(self) -> dict:
8063
+ """Serializes the NodeTypeFlexibility into a dictionary suitable for use as a JSON request body."""
8064
+ body = {}
8065
+ return body
8066
+
8067
+ def as_shallow_dict(self) -> dict:
8068
+ """Serializes the NodeTypeFlexibility into a shallow dictionary of its immediate attributes."""
8069
+ body = {}
8070
+ return body
8071
+
8072
+ @classmethod
8073
+ def from_dict(cls, d: Dict[str, Any]) -> NodeTypeFlexibility:
8074
+ """Deserializes the NodeTypeFlexibility from a dictionary."""
8075
+ return cls()
8076
+
8077
+
7964
8078
  @dataclass
7965
8079
  class PendingInstanceError:
7966
8080
  """Error message of a failed pending instances"""
@@ -8937,6 +9051,7 @@ class TerminationReasonCode(Enum):
8937
9051
  ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
8938
9052
  ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
8939
9053
  ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
9054
+ ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS"
8940
9055
  ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
8941
9056
  ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
8942
9057
  ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
@@ -8989,7 +9104,11 @@ class TerminationReasonCode(Enum):
8989
9104
  DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
8990
9105
  DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
8991
9106
  DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
9107
+ DNS_RESOLUTION_ERROR = "DNS_RESOLUTION_ERROR"
9108
+ DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION"
8992
9109
  DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
9110
+ DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
9111
+ DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION"
8993
9112
  DRIVER_EVICTION = "DRIVER_EVICTION"
8994
9113
  DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
8995
9114
  DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
@@ -9004,6 +9123,7 @@ class TerminationReasonCode(Enum):
9004
9123
  EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
9005
9124
  EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
9006
9125
  GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
9126
+ GCP_DENIED_BY_ORG_POLICY = "GCP_DENIED_BY_ORG_POLICY"
9007
9127
  GCP_FORBIDDEN = "GCP_FORBIDDEN"
9008
9128
  GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
9009
9129
  GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
@@ -10928,6 +11048,8 @@ class ClustersAPI:
10928
11048
  limit: Optional[int] = None,
10929
11049
  offset: Optional[int] = None,
10930
11050
  order: Optional[GetEventsOrder] = None,
11051
+ page_size: Optional[int] = None,
11052
+ page_token: Optional[str] = None,
10931
11053
  start_time: Optional[int] = None,
10932
11054
  ) -> Iterator[ClusterEvent]:
10933
11055
  """List cluster activity events.
@@ -10942,13 +11064,25 @@ class ClustersAPI:
10942
11064
  :param event_types: List[:class:`EventType`] (optional)
10943
11065
  An optional set of event types to filter on. If empty, all event types are returned.
10944
11066
  :param limit: int (optional)
11067
+ Deprecated: use page_token in combination with page_size instead.
11068
+
10945
11069
  The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed
10946
11070
  value is 500.
10947
11071
  :param offset: int (optional)
11072
+ Deprecated: use page_token in combination with page_size instead.
11073
+
10948
11074
  The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results
10949
11075
  are requested in descending order, the end_time field is required.
10950
11076
  :param order: :class:`GetEventsOrder` (optional)
10951
11077
  The order to list events in; either "ASC" or "DESC". Defaults to "DESC".
11078
+ :param page_size: int (optional)
11079
+ The maximum number of events to include in a page of events. The server may further constrain the
11080
+ maximum number of results returned in a single page. If the page_size is empty or 0, the server will
11081
+ decide the number of results to be returned. The field has to be in the range [0,500]. If the value
11082
+ is outside the range, the server enforces 0 or 500.
11083
+ :param page_token: str (optional)
11084
+ Use next_page_token or prev_page_token returned from the previous request to list the next or
11085
+ previous page of events respectively. If page_token is empty, the first page is returned.
10952
11086
  :param start_time: int (optional)
10953
11087
  The start time in epoch milliseconds. If empty, returns events starting from the beginning of time.
10954
11088
 
@@ -10967,6 +11101,10 @@ class ClustersAPI:
10967
11101
  body["offset"] = offset
10968
11102
  if order is not None:
10969
11103
  body["order"] = order.value
11104
+ if page_size is not None:
11105
+ body["page_size"] = page_size
11106
+ if page_token is not None:
11107
+ body["page_token"] = page_token
10970
11108
  if start_time is not None:
10971
11109
  body["start_time"] = start_time
10972
11110
  headers = {
@@ -12082,6 +12220,7 @@ class InstancePoolsAPI:
12082
12220
  idle_instance_autotermination_minutes: Optional[int] = None,
12083
12221
  max_capacity: Optional[int] = None,
12084
12222
  min_idle_instances: Optional[int] = None,
12223
+ node_type_flexibility: Optional[NodeTypeFlexibility] = None,
12085
12224
  ):
12086
12225
  """Edit an existing instance pool.
12087
12226
 
@@ -12114,6 +12253,9 @@ class InstancePoolsAPI:
12114
12253
  upsize requests.
12115
12254
  :param min_idle_instances: int (optional)
12116
12255
  Minimum number of idle instances to keep in the instance pool
12256
+ :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional)
12257
+ For Fleet-pool V2, this object contains the information about the alternate node type ids to use
12258
+ when attempting to launch a cluster if the node type id is not available.
12117
12259
 
12118
12260
 
12119
12261
  """
@@ -12130,6 +12272,8 @@ class InstancePoolsAPI:
12130
12272
  body["max_capacity"] = max_capacity
12131
12273
  if min_idle_instances is not None:
12132
12274
  body["min_idle_instances"] = min_idle_instances
12275
+ if node_type_flexibility is not None:
12276
+ body["node_type_flexibility"] = node_type_flexibility.as_dict()
12133
12277
  if node_type_id is not None:
12134
12278
  body["node_type_id"] = node_type_id
12135
12279
  headers = {