databricks-sdk 0.27.1__py3-none-any.whl → 0.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (32) hide show
  1. databricks/sdk/__init__.py +16 -12
  2. databricks/sdk/azure.py +0 -27
  3. databricks/sdk/config.py +71 -19
  4. databricks/sdk/core.py +27 -0
  5. databricks/sdk/credentials_provider.py +121 -44
  6. databricks/sdk/dbutils.py +81 -3
  7. databricks/sdk/environments.py +34 -1
  8. databricks/sdk/errors/__init__.py +1 -0
  9. databricks/sdk/errors/mapper.py +4 -0
  10. databricks/sdk/errors/private_link.py +60 -0
  11. databricks/sdk/oauth.py +8 -6
  12. databricks/sdk/service/catalog.py +774 -632
  13. databricks/sdk/service/compute.py +91 -116
  14. databricks/sdk/service/dashboards.py +707 -2
  15. databricks/sdk/service/jobs.py +126 -163
  16. databricks/sdk/service/marketplace.py +145 -31
  17. databricks/sdk/service/oauth2.py +22 -0
  18. databricks/sdk/service/pipelines.py +119 -4
  19. databricks/sdk/service/serving.py +217 -64
  20. databricks/sdk/service/settings.py +1 -0
  21. databricks/sdk/service/sharing.py +36 -2
  22. databricks/sdk/service/sql.py +103 -24
  23. databricks/sdk/service/vectorsearch.py +263 -1
  24. databricks/sdk/service/workspace.py +8 -4
  25. databricks/sdk/version.py +1 -1
  26. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/METADATA +2 -1
  27. databricks_sdk-0.29.0.dist-info/RECORD +57 -0
  28. databricks_sdk-0.27.1.dist-info/RECORD +0 -56
  29. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/LICENSE +0 -0
  30. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/NOTICE +0 -0
  31. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/WHEEL +0 -0
  32. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/top_level.txt +0 -0
@@ -448,7 +448,7 @@ class CatalogInfo:
448
448
  full_name: Optional[str] = None
449
449
  """The full name of the catalog. Corresponds with the name field."""
450
450
 
451
- isolation_mode: Optional[IsolationMode] = None
451
+ isolation_mode: Optional[CatalogIsolationMode] = None
452
452
  """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
453
453
 
454
454
  metastore_id: Optional[str] = None
@@ -541,7 +541,7 @@ class CatalogInfo:
541
541
  enable_predictive_optimization=_enum(d, 'enable_predictive_optimization',
542
542
  EnablePredictiveOptimization),
543
543
  full_name=d.get('full_name', None),
544
- isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
544
+ isolation_mode=_enum(d, 'isolation_mode', CatalogIsolationMode),
545
545
  metastore_id=d.get('metastore_id', None),
546
546
  name=d.get('name', None),
547
547
  options=d.get('options', None),
@@ -571,13 +571,18 @@ class CatalogInfoSecurableKind(Enum):
571
571
  CATALOG_FOREIGN_SQLDW = 'CATALOG_FOREIGN_SQLDW'
572
572
  CATALOG_FOREIGN_SQLSERVER = 'CATALOG_FOREIGN_SQLSERVER'
573
573
  CATALOG_INTERNAL = 'CATALOG_INTERNAL'
574
- CATALOG_ONLINE = 'CATALOG_ONLINE'
575
- CATALOG_ONLINE_INDEX = 'CATALOG_ONLINE_INDEX'
576
574
  CATALOG_STANDARD = 'CATALOG_STANDARD'
577
575
  CATALOG_SYSTEM = 'CATALOG_SYSTEM'
578
576
  CATALOG_SYSTEM_DELTASHARING = 'CATALOG_SYSTEM_DELTASHARING'
579
577
 
580
578
 
579
+ class CatalogIsolationMode(Enum):
580
+ """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
581
+
582
+ ISOLATED = 'ISOLATED'
583
+ OPEN = 'OPEN'
584
+
585
+
581
586
  class CatalogType(Enum):
582
587
  """The type of the catalog."""
583
588
 
@@ -1222,8 +1227,9 @@ class CreateMetastore:
1222
1227
  """The user-specified name of the metastore."""
1223
1228
 
1224
1229
  region: Optional[str] = None
1225
- """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). If this field is omitted,
1226
- the region of the workspace receiving the request will be used."""
1230
+ """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted
1231
+ in the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is
1232
+ omitted, the region of the workspace receiving the request will be used."""
1227
1233
 
1228
1234
  storage_root: Optional[str] = None
1229
1235
  """The storage root URL for metastore"""
@@ -1494,7 +1500,7 @@ class CreateStorageCredential:
1494
1500
  """Comment associated with the credential."""
1495
1501
 
1496
1502
  databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None
1497
- """The <Databricks> managed GCP service account configuration."""
1503
+ """The Databricks managed GCP service account configuration."""
1498
1504
 
1499
1505
  read_only: Optional[bool] = None
1500
1506
  """Whether the storage credential is only usable for read operations."""
@@ -1627,14 +1633,28 @@ class DataSourceFormat(Enum):
1627
1633
  """Data source format"""
1628
1634
 
1629
1635
  AVRO = 'AVRO'
1636
+ BIGQUERY_FORMAT = 'BIGQUERY_FORMAT'
1630
1637
  CSV = 'CSV'
1638
+ DATABRICKS_FORMAT = 'DATABRICKS_FORMAT'
1631
1639
  DELTA = 'DELTA'
1632
1640
  DELTASHARING = 'DELTASHARING'
1641
+ HIVE_CUSTOM = 'HIVE_CUSTOM'
1642
+ HIVE_SERDE = 'HIVE_SERDE'
1633
1643
  JSON = 'JSON'
1644
+ MYSQL_FORMAT = 'MYSQL_FORMAT'
1645
+ NETSUITE_FORMAT = 'NETSUITE_FORMAT'
1634
1646
  ORC = 'ORC'
1635
1647
  PARQUET = 'PARQUET'
1648
+ POSTGRESQL_FORMAT = 'POSTGRESQL_FORMAT'
1649
+ REDSHIFT_FORMAT = 'REDSHIFT_FORMAT'
1650
+ SALESFORCE_FORMAT = 'SALESFORCE_FORMAT'
1651
+ SNOWFLAKE_FORMAT = 'SNOWFLAKE_FORMAT'
1652
+ SQLDW_FORMAT = 'SQLDW_FORMAT'
1653
+ SQLSERVER_FORMAT = 'SQLSERVER_FORMAT'
1636
1654
  TEXT = 'TEXT'
1637
1655
  UNITY_CATALOG = 'UNITY_CATALOG'
1656
+ VECTOR_INDEX_FORMAT = 'VECTOR_INDEX_FORMAT'
1657
+ WORKDAY_RAAS_FORMAT = 'WORKDAY_RAAS_FORMAT'
1638
1658
 
1639
1659
 
1640
1660
  @dataclass
@@ -1778,14 +1798,6 @@ class DisableResponse:
1778
1798
  return cls()
1779
1799
 
1780
1800
 
1781
- class DisableSchemaName(Enum):
1782
-
1783
- ACCESS = 'access'
1784
- BILLING = 'billing'
1785
- LINEAGE = 'lineage'
1786
- OPERATIONAL_DATA = 'operational_data'
1787
-
1788
-
1789
1801
  @dataclass
1790
1802
  class EffectivePermissionsList:
1791
1803
  privilege_assignments: Optional[List[EffectivePrivilegeAssignment]] = None
@@ -1916,14 +1928,6 @@ class EnableResponse:
1916
1928
  return cls()
1917
1929
 
1918
1930
 
1919
- class EnableSchemaName(Enum):
1920
-
1921
- ACCESS = 'access'
1922
- BILLING = 'billing'
1923
- LINEAGE = 'lineage'
1924
- OPERATIONAL_DATA = 'operational_data'
1925
-
1926
-
1927
1931
  @dataclass
1928
1932
  class EncryptionDetails:
1929
1933
  """Encryption options that apply to clients connecting to cloud storage."""
@@ -1970,6 +1974,9 @@ class ExternalLocationInfo:
1970
1974
  encryption_details: Optional[EncryptionDetails] = None
1971
1975
  """Encryption options that apply to clients connecting to cloud storage."""
1972
1976
 
1977
+ isolation_mode: Optional[IsolationMode] = None
1978
+ """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
1979
+
1973
1980
  metastore_id: Optional[str] = None
1974
1981
  """Unique identifier of metastore hosting the external location."""
1975
1982
 
@@ -2002,6 +2009,7 @@ class ExternalLocationInfo:
2002
2009
  if self.credential_id is not None: body['credential_id'] = self.credential_id
2003
2010
  if self.credential_name is not None: body['credential_name'] = self.credential_name
2004
2011
  if self.encryption_details: body['encryption_details'] = self.encryption_details.as_dict()
2012
+ if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value
2005
2013
  if self.metastore_id is not None: body['metastore_id'] = self.metastore_id
2006
2014
  if self.name is not None: body['name'] = self.name
2007
2015
  if self.owner is not None: body['owner'] = self.owner
@@ -2022,6 +2030,7 @@ class ExternalLocationInfo:
2022
2030
  credential_id=d.get('credential_id', None),
2023
2031
  credential_name=d.get('credential_name', None),
2024
2032
  encryption_details=_from_dict(d, 'encryption_details', EncryptionDetails),
2033
+ isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
2025
2034
  metastore_id=d.get('metastore_id', None),
2026
2035
  name=d.get('name', None),
2027
2036
  owner=d.get('owner', None),
@@ -2531,8 +2540,8 @@ class GetMetastoreSummaryResponseDeltaSharingScope(Enum):
2531
2540
  class IsolationMode(Enum):
2532
2541
  """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
2533
2542
 
2534
- ISOLATED = 'ISOLATED'
2535
- OPEN = 'OPEN'
2543
+ ISOLATION_MODE_ISOLATED = 'ISOLATION_MODE_ISOLATED'
2544
+ ISOLATION_MODE_OPEN = 'ISOLATION_MODE_OPEN'
2536
2545
 
2537
2546
 
2538
2547
  @dataclass
@@ -2553,21 +2562,45 @@ class ListAccountMetastoreAssignmentsResponse:
2553
2562
  return cls(workspace_ids=d.get('workspace_ids', None))
2554
2563
 
2555
2564
 
2565
+ @dataclass
2566
+ class ListAccountStorageCredentialsResponse:
2567
+ storage_credentials: Optional[List[StorageCredentialInfo]] = None
2568
+ """An array of metastore storage credentials."""
2569
+
2570
+ def as_dict(self) -> dict:
2571
+ """Serializes the ListAccountStorageCredentialsResponse into a dictionary suitable for use as a JSON request body."""
2572
+ body = {}
2573
+ if self.storage_credentials:
2574
+ body['storage_credentials'] = [v.as_dict() for v in self.storage_credentials]
2575
+ return body
2576
+
2577
+ @classmethod
2578
+ def from_dict(cls, d: Dict[str, any]) -> ListAccountStorageCredentialsResponse:
2579
+ """Deserializes the ListAccountStorageCredentialsResponse from a dictionary."""
2580
+ return cls(storage_credentials=_repeated_dict(d, 'storage_credentials', StorageCredentialInfo))
2581
+
2582
+
2556
2583
  @dataclass
2557
2584
  class ListCatalogsResponse:
2558
2585
  catalogs: Optional[List[CatalogInfo]] = None
2559
2586
  """An array of catalog information objects."""
2560
2587
 
2588
+ next_page_token: Optional[str] = None
2589
+ """Opaque token to retrieve the next page of results. Absent if there are no more pages.
2590
+ __page_token__ should be set to this value for the next request (for the next page of results)."""
2591
+
2561
2592
  def as_dict(self) -> dict:
2562
2593
  """Serializes the ListCatalogsResponse into a dictionary suitable for use as a JSON request body."""
2563
2594
  body = {}
2564
2595
  if self.catalogs: body['catalogs'] = [v.as_dict() for v in self.catalogs]
2596
+ if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
2565
2597
  return body
2566
2598
 
2567
2599
  @classmethod
2568
2600
  def from_dict(cls, d: Dict[str, any]) -> ListCatalogsResponse:
2569
2601
  """Deserializes the ListCatalogsResponse from a dictionary."""
2570
- return cls(catalogs=_repeated_dict(d, 'catalogs', CatalogInfo))
2602
+ return cls(catalogs=_repeated_dict(d, 'catalogs', CatalogInfo),
2603
+ next_page_token=d.get('next_page_token', None))
2571
2604
 
2572
2605
 
2573
2606
  @dataclass
@@ -2575,16 +2608,22 @@ class ListConnectionsResponse:
2575
2608
  connections: Optional[List[ConnectionInfo]] = None
2576
2609
  """An array of connection information objects."""
2577
2610
 
2611
+ next_page_token: Optional[str] = None
2612
+ """Opaque token to retrieve the next page of results. Absent if there are no more pages.
2613
+ __page_token__ should be set to this value for the next request (for the next page of results)."""
2614
+
2578
2615
  def as_dict(self) -> dict:
2579
2616
  """Serializes the ListConnectionsResponse into a dictionary suitable for use as a JSON request body."""
2580
2617
  body = {}
2581
2618
  if self.connections: body['connections'] = [v.as_dict() for v in self.connections]
2619
+ if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
2582
2620
  return body
2583
2621
 
2584
2622
  @classmethod
2585
2623
  def from_dict(cls, d: Dict[str, any]) -> ListConnectionsResponse:
2586
2624
  """Deserializes the ListConnectionsResponse from a dictionary."""
2587
- return cls(connections=_repeated_dict(d, 'connections', ConnectionInfo))
2625
+ return cls(connections=_repeated_dict(d, 'connections', ConnectionInfo),
2626
+ next_page_token=d.get('next_page_token', None))
2588
2627
 
2589
2628
 
2590
2629
  @dataclass
@@ -3500,6 +3539,23 @@ class MonitorRefreshInfoTrigger(Enum):
3500
3539
  SCHEDULE = 'SCHEDULE'
3501
3540
 
3502
3541
 
3542
+ @dataclass
3543
+ class MonitorRefreshListResponse:
3544
+ refreshes: Optional[List[MonitorRefreshInfo]] = None
3545
+ """List of refreshes."""
3546
+
3547
+ def as_dict(self) -> dict:
3548
+ """Serializes the MonitorRefreshListResponse into a dictionary suitable for use as a JSON request body."""
3549
+ body = {}
3550
+ if self.refreshes: body['refreshes'] = [v.as_dict() for v in self.refreshes]
3551
+ return body
3552
+
3553
+ @classmethod
3554
+ def from_dict(cls, d: Dict[str, any]) -> MonitorRefreshListResponse:
3555
+ """Deserializes the MonitorRefreshListResponse from a dictionary."""
3556
+ return cls(refreshes=_repeated_dict(d, 'refreshes', MonitorRefreshInfo))
3557
+
3558
+
3503
3559
  @dataclass
3504
3560
  class MonitorSnapshot:
3505
3561
 
@@ -3571,12 +3627,16 @@ class OnlineTable:
3571
3627
  status: Optional[OnlineTableStatus] = None
3572
3628
  """Online Table status"""
3573
3629
 
3630
+ table_serving_url: Optional[str] = None
3631
+ """Data serving REST API URL for this table"""
3632
+
3574
3633
  def as_dict(self) -> dict:
3575
3634
  """Serializes the OnlineTable into a dictionary suitable for use as a JSON request body."""
3576
3635
  body = {}
3577
3636
  if self.name is not None: body['name'] = self.name
3578
3637
  if self.spec: body['spec'] = self.spec.as_dict()
3579
3638
  if self.status: body['status'] = self.status.as_dict()
3639
+ if self.table_serving_url is not None: body['table_serving_url'] = self.table_serving_url
3580
3640
  return body
3581
3641
 
3582
3642
  @classmethod
@@ -3584,7 +3644,8 @@ class OnlineTable:
3584
3644
  """Deserializes the OnlineTable from a dictionary."""
3585
3645
  return cls(name=d.get('name', None),
3586
3646
  spec=_from_dict(d, 'spec', OnlineTableSpec),
3587
- status=_from_dict(d, 'status', OnlineTableStatus))
3647
+ status=_from_dict(d, 'status', OnlineTableStatus),
3648
+ table_serving_url=d.get('table_serving_url', None))
3588
3649
 
3589
3650
 
3590
3651
  @dataclass
@@ -4310,11 +4371,14 @@ class StorageCredentialInfo:
4310
4371
  """Username of credential creator."""
4311
4372
 
4312
4373
  databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountResponse] = None
4313
- """The <Databricks> managed GCP service account configuration."""
4374
+ """The Databricks managed GCP service account configuration."""
4314
4375
 
4315
4376
  id: Optional[str] = None
4316
4377
  """The unique identifier of the credential."""
4317
4378
 
4379
+ isolation_mode: Optional[IsolationMode] = None
4380
+ """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
4381
+
4318
4382
  metastore_id: Optional[str] = None
4319
4383
  """Unique identifier of parent metastore."""
4320
4384
 
@@ -4350,6 +4414,7 @@ class StorageCredentialInfo:
4350
4414
  if self.databricks_gcp_service_account:
4351
4415
  body['databricks_gcp_service_account'] = self.databricks_gcp_service_account.as_dict()
4352
4416
  if self.id is not None: body['id'] = self.id
4417
+ if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value
4353
4418
  if self.metastore_id is not None: body['metastore_id'] = self.metastore_id
4354
4419
  if self.name is not None: body['name'] = self.name
4355
4420
  if self.owner is not None: body['owner'] = self.owner
@@ -4374,6 +4439,7 @@ class StorageCredentialInfo:
4374
4439
  databricks_gcp_service_account=_from_dict(d, 'databricks_gcp_service_account',
4375
4440
  DatabricksGcpServiceAccountResponse),
4376
4441
  id=d.get('id', None),
4442
+ isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
4377
4443
  metastore_id=d.get('metastore_id', None),
4378
4444
  name=d.get('name', None),
4379
4445
  owner=d.get('owner', None),
@@ -4712,7 +4778,10 @@ class TableSummary:
4712
4778
  class TableType(Enum):
4713
4779
 
4714
4780
  EXTERNAL = 'EXTERNAL'
4781
+ EXTERNAL_SHALLOW_CLONE = 'EXTERNAL_SHALLOW_CLONE'
4782
+ FOREIGN = 'FOREIGN'
4715
4783
  MANAGED = 'MANAGED'
4784
+ MANAGED_SHALLOW_CLONE = 'MANAGED_SHALLOW_CLONE'
4716
4785
  MATERIALIZED_VIEW = 'MATERIALIZED_VIEW'
4717
4786
  STREAMING_TABLE = 'STREAMING_TABLE'
4718
4787
  VIEW = 'VIEW'
@@ -4788,7 +4857,7 @@ class UpdateCatalog:
4788
4857
  enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None
4789
4858
  """Whether predictive optimization should be enabled for this object and objects under it."""
4790
4859
 
4791
- isolation_mode: Optional[IsolationMode] = None
4860
+ isolation_mode: Optional[CatalogIsolationMode] = None
4792
4861
  """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
4793
4862
 
4794
4863
  name: Optional[str] = None
@@ -4822,7 +4891,7 @@ class UpdateCatalog:
4822
4891
  return cls(comment=d.get('comment', None),
4823
4892
  enable_predictive_optimization=_enum(d, 'enable_predictive_optimization',
4824
4893
  EnablePredictiveOptimization),
4825
- isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
4894
+ isolation_mode=_enum(d, 'isolation_mode', CatalogIsolationMode),
4826
4895
  name=d.get('name', None),
4827
4896
  new_name=d.get('new_name', None),
4828
4897
  owner=d.get('owner', None),
@@ -4878,6 +4947,9 @@ class UpdateExternalLocation:
4878
4947
  force: Optional[bool] = None
4879
4948
  """Force update even if changing url invalidates dependent external tables or mounts."""
4880
4949
 
4950
+ isolation_mode: Optional[IsolationMode] = None
4951
+ """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
4952
+
4881
4953
  name: Optional[str] = None
4882
4954
  """Name of the external location."""
4883
4955
 
@@ -4904,6 +4976,7 @@ class UpdateExternalLocation:
4904
4976
  if self.credential_name is not None: body['credential_name'] = self.credential_name
4905
4977
  if self.encryption_details: body['encryption_details'] = self.encryption_details.as_dict()
4906
4978
  if self.force is not None: body['force'] = self.force
4979
+ if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value
4907
4980
  if self.name is not None: body['name'] = self.name
4908
4981
  if self.new_name is not None: body['new_name'] = self.new_name
4909
4982
  if self.owner is not None: body['owner'] = self.owner
@@ -4920,6 +4993,7 @@ class UpdateExternalLocation:
4920
4993
  credential_name=d.get('credential_name', None),
4921
4994
  encryption_details=_from_dict(d, 'encryption_details', EncryptionDetails),
4922
4995
  force=d.get('force', None),
4996
+ isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
4923
4997
  name=d.get('name', None),
4924
4998
  new_name=d.get('new_name', None),
4925
4999
  owner=d.get('owner', None),
@@ -5084,6 +5158,10 @@ class UpdateMonitor:
5084
5158
  metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across
5085
5159
  time windows)."""
5086
5160
 
5161
+ dashboard_id: Optional[str] = None
5162
+ """Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in
5163
+ PENDING state."""
5164
+
5087
5165
  data_classification_config: Optional[MonitorDataClassificationConfig] = None
5088
5166
  """The data classification config for the monitor."""
5089
5167
 
@@ -5115,6 +5193,7 @@ class UpdateMonitor:
5115
5193
  body = {}
5116
5194
  if self.baseline_table_name is not None: body['baseline_table_name'] = self.baseline_table_name
5117
5195
  if self.custom_metrics: body['custom_metrics'] = [v.as_dict() for v in self.custom_metrics]
5196
+ if self.dashboard_id is not None: body['dashboard_id'] = self.dashboard_id
5118
5197
  if self.data_classification_config:
5119
5198
  body['data_classification_config'] = self.data_classification_config.as_dict()
5120
5199
  if self.inference_log: body['inference_log'] = self.inference_log.as_dict()
@@ -5132,6 +5211,7 @@ class UpdateMonitor:
5132
5211
  """Deserializes the UpdateMonitor from a dictionary."""
5133
5212
  return cls(baseline_table_name=d.get('baseline_table_name', None),
5134
5213
  custom_metrics=_repeated_dict(d, 'custom_metrics', MonitorMetric),
5214
+ dashboard_id=d.get('dashboard_id', None),
5135
5215
  data_classification_config=_from_dict(d, 'data_classification_config',
5136
5216
  MonitorDataClassificationConfig),
5137
5217
  inference_log=_from_dict(d, 'inference_log', MonitorInferenceLog),
@@ -5279,11 +5359,14 @@ class UpdateStorageCredential:
5279
5359
  """Comment associated with the credential."""
5280
5360
 
5281
5361
  databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None
5282
- """The <Databricks> managed GCP service account configuration."""
5362
+ """The Databricks managed GCP service account configuration."""
5283
5363
 
5284
5364
  force: Optional[bool] = None
5285
5365
  """Force update even if there are dependent external locations or external tables."""
5286
5366
 
5367
+ isolation_mode: Optional[IsolationMode] = None
5368
+ """Whether the current securable is accessible from all workspaces or a specific set of workspaces."""
5369
+
5287
5370
  name: Optional[str] = None
5288
5371
  """Name of the storage credential."""
5289
5372
 
@@ -5311,6 +5394,7 @@ class UpdateStorageCredential:
5311
5394
  if self.databricks_gcp_service_account:
5312
5395
  body['databricks_gcp_service_account'] = self.databricks_gcp_service_account.as_dict()
5313
5396
  if self.force is not None: body['force'] = self.force
5397
+ if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value
5314
5398
  if self.name is not None: body['name'] = self.name
5315
5399
  if self.new_name is not None: body['new_name'] = self.new_name
5316
5400
  if self.owner is not None: body['owner'] = self.owner
@@ -5330,6 +5414,7 @@ class UpdateStorageCredential:
5330
5414
  databricks_gcp_service_account=_from_dict(d, 'databricks_gcp_service_account',
5331
5415
  DatabricksGcpServiceAccountRequest),
5332
5416
  force=d.get('force', None),
5417
+ isolation_mode=_enum(d, 'isolation_mode', IsolationMode),
5333
5418
  name=d.get('name', None),
5334
5419
  new_name=d.get('new_name', None),
5335
5420
  owner=d.get('owner', None),
@@ -6027,11 +6112,12 @@ class AccountStorageCredentialsAPI:
6027
6112
 
6028
6113
  headers = {'Accept': 'application/json', }
6029
6114
 
6030
- res = self._api.do(
6115
+ json = self._api.do(
6031
6116
  'GET',
6032
6117
  f'/api/2.0/accounts/{self._api.account_id}/metastores/{metastore_id}/storage-credentials',
6033
6118
  headers=headers)
6034
- return [StorageCredentialInfo.from_dict(v) for v in res]
6119
+ parsed = ListAccountStorageCredentialsResponse.from_dict(json).storage_credentials
6120
+ return parsed if parsed is not None else []
6035
6121
 
6036
6122
  def update(self,
6037
6123
  metastore_id: str,
@@ -6218,7 +6304,11 @@ class CatalogsAPI:
6218
6304
  res = self._api.do('GET', f'/api/2.1/unity-catalog/catalogs/{name}', query=query, headers=headers)
6219
6305
  return CatalogInfo.from_dict(res)
6220
6306
 
6221
- def list(self, *, include_browse: Optional[bool] = None) -> Iterator[CatalogInfo]:
6307
+ def list(self,
6308
+ *,
6309
+ include_browse: Optional[bool] = None,
6310
+ max_results: Optional[int] = None,
6311
+ page_token: Optional[str] = None) -> Iterator[CatalogInfo]:
6222
6312
  """List catalogs.
6223
6313
 
6224
6314
  Gets an array of catalogs in the metastore. If the caller is the metastore admin, all catalogs will be
@@ -6229,24 +6319,41 @@ class CatalogsAPI:
6229
6319
  :param include_browse: bool (optional)
6230
6320
  Whether to include catalogs in the response for which the principal can only access selective
6231
6321
  metadata for
6322
+ :param max_results: int (optional)
6323
+ Maximum number of catalogs to return. - when set to 0, the page length is set to a server configured
6324
+ value (recommended); - when set to a value greater than 0, the page length is the minimum of this
6325
+ value and a server configured value; - when set to a value less than 0, an invalid parameter error
6326
+ is returned; - If not set, all valid catalogs are returned (not recommended). - Note: The number of
6327
+ returned catalogs might be less than the specified max_results size, even zero. The only definitive
6328
+ indication that no further catalogs can be fetched is when the next_page_token is unset from the
6329
+ response.
6330
+ :param page_token: str (optional)
6331
+ Opaque pagination token to go to next page based on previous query.
6232
6332
 
6233
6333
  :returns: Iterator over :class:`CatalogInfo`
6234
6334
  """
6235
6335
 
6236
6336
  query = {}
6237
6337
  if include_browse is not None: query['include_browse'] = include_browse
6338
+ if max_results is not None: query['max_results'] = max_results
6339
+ if page_token is not None: query['page_token'] = page_token
6238
6340
  headers = {'Accept': 'application/json', }
6239
6341
 
6240
- json = self._api.do('GET', '/api/2.1/unity-catalog/catalogs', query=query, headers=headers)
6241
- parsed = ListCatalogsResponse.from_dict(json).catalogs
6242
- return parsed if parsed is not None else []
6342
+ while True:
6343
+ json = self._api.do('GET', '/api/2.1/unity-catalog/catalogs', query=query, headers=headers)
6344
+ if 'catalogs' in json:
6345
+ for v in json['catalogs']:
6346
+ yield CatalogInfo.from_dict(v)
6347
+ if 'next_page_token' not in json or not json['next_page_token']:
6348
+ return
6349
+ query['page_token'] = json['next_page_token']
6243
6350
 
6244
6351
  def update(self,
6245
6352
  name: str,
6246
6353
  *,
6247
6354
  comment: Optional[str] = None,
6248
6355
  enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None,
6249
- isolation_mode: Optional[IsolationMode] = None,
6356
+ isolation_mode: Optional[CatalogIsolationMode] = None,
6250
6357
  new_name: Optional[str] = None,
6251
6358
  owner: Optional[str] = None,
6252
6359
  properties: Optional[Dict[str, str]] = None) -> CatalogInfo:
@@ -6261,7 +6368,7 @@ class CatalogsAPI:
6261
6368
  User-provided free-form text description.
6262
6369
  :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional)
6263
6370
  Whether predictive optimization should be enabled for this object and objects under it.
6264
- :param isolation_mode: :class:`IsolationMode` (optional)
6371
+ :param isolation_mode: :class:`CatalogIsolationMode` (optional)
6265
6372
  Whether the current securable is accessible from all workspaces or a specific set of workspaces.
6266
6373
  :param new_name: str (optional)
6267
6374
  New name for the catalog.
@@ -6372,19 +6479,38 @@ class ConnectionsAPI:
6372
6479
  res = self._api.do('GET', f'/api/2.1/unity-catalog/connections/{name}', headers=headers)
6373
6480
  return ConnectionInfo.from_dict(res)
6374
6481
 
6375
- def list(self) -> Iterator[ConnectionInfo]:
6482
+ def list(self,
6483
+ *,
6484
+ max_results: Optional[int] = None,
6485
+ page_token: Optional[str] = None) -> Iterator[ConnectionInfo]:
6376
6486
  """List connections.
6377
6487
 
6378
6488
  List all connections.
6379
6489
 
6490
+ :param max_results: int (optional)
6491
+ Maximum number of connections to return. - If not set, all connections are returned (not
6492
+ recommended). - when set to a value greater than 0, the page length is the minimum of this value and
6493
+ a server configured value; - when set to 0, the page length is set to a server configured value
6494
+ (recommended); - when set to a value less than 0, an invalid parameter error is returned;
6495
+ :param page_token: str (optional)
6496
+ Opaque pagination token to go to next page based on previous query.
6497
+
6380
6498
  :returns: Iterator over :class:`ConnectionInfo`
6381
6499
  """
6382
6500
 
6501
+ query = {}
6502
+ if max_results is not None: query['max_results'] = max_results
6503
+ if page_token is not None: query['page_token'] = page_token
6383
6504
  headers = {'Accept': 'application/json', }
6384
6505
 
6385
- json = self._api.do('GET', '/api/2.1/unity-catalog/connections', headers=headers)
6386
- parsed = ListConnectionsResponse.from_dict(json).connections
6387
- return parsed if parsed is not None else []
6506
+ while True:
6507
+ json = self._api.do('GET', '/api/2.1/unity-catalog/connections', query=query, headers=headers)
6508
+ if 'connections' in json:
6509
+ for v in json['connections']:
6510
+ yield ConnectionInfo.from_dict(v)
6511
+ if 'next_page_token' not in json or not json['next_page_token']:
6512
+ return
6513
+ query['page_token'] = json['next_page_token']
6388
6514
 
6389
6515
  def update(self,
6390
6516
  name: str,
@@ -6580,6 +6706,7 @@ class ExternalLocationsAPI:
6580
6706
  credential_name: Optional[str] = None,
6581
6707
  encryption_details: Optional[EncryptionDetails] = None,
6582
6708
  force: Optional[bool] = None,
6709
+ isolation_mode: Optional[IsolationMode] = None,
6583
6710
  new_name: Optional[str] = None,
6584
6711
  owner: Optional[str] = None,
6585
6712
  read_only: Optional[bool] = None,
@@ -6603,6 +6730,8 @@ class ExternalLocationsAPI:
6603
6730
  Encryption options that apply to clients connecting to cloud storage.
6604
6731
  :param force: bool (optional)
6605
6732
  Force update even if changing url invalidates dependent external tables or mounts.
6733
+ :param isolation_mode: :class:`IsolationMode` (optional)
6734
+ Whether the current securable is accessible from all workspaces or a specific set of workspaces.
6606
6735
  :param new_name: str (optional)
6607
6736
  New name for the external location.
6608
6737
  :param owner: str (optional)
@@ -6622,6 +6751,7 @@ class ExternalLocationsAPI:
6622
6751
  if credential_name is not None: body['credential_name'] = credential_name
6623
6752
  if encryption_details is not None: body['encryption_details'] = encryption_details.as_dict()
6624
6753
  if force is not None: body['force'] = force
6754
+ if isolation_mode is not None: body['isolation_mode'] = isolation_mode.value
6625
6755
  if new_name is not None: body['new_name'] = new_name
6626
6756
  if owner is not None: body['owner'] = owner
6627
6757
  if read_only is not None: body['read_only'] = read_only
@@ -6649,6 +6779,8 @@ class FunctionsAPI:
6649
6779
  def create(self, function_info: CreateFunction) -> FunctionInfo:
6650
6780
  """Create a function.
6651
6781
 
6782
+ **WARNING: This API is experimental and will change in future versions**
6783
+
6652
6784
  Creates a new function
6653
6785
 
6654
6786
  The user must have the following permissions in order for the function to be created: -
@@ -6896,542 +7028,214 @@ class GrantsAPI:
6896
7028
  return PermissionsList.from_dict(res)
6897
7029
 
6898
7030
 
6899
- class LakehouseMonitorsAPI:
6900
- """A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics
6901
- tables and a dashboard that you can use to monitor table health and set alerts.
7031
+ class MetastoresAPI:
7032
+ """A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and
7033
+ views) and the permissions that govern access to them. Databricks account admins can create metastores and
7034
+ assign them to Databricks workspaces to control which workloads use each metastore. For a workspace to use
7035
+ Unity Catalog, it must have a Unity Catalog metastore attached.
6902
7036
 
6903
- Most write operations require the user to be the owner of the table (or its parent schema or parent
6904
- catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires the user to have
6905
- **SELECT** privileges on the table (along with **USE_SCHEMA** and **USE_CATALOG**)."""
7037
+ Each metastore is configured with a root storage location in a cloud storage account. This storage
7038
+ location is used for metadata and managed tables data.
7039
+
7040
+ NOTE: This metastore is distinct from the metastore included in Databricks workspaces created before Unity
7041
+ Catalog was released. If your workspace includes a legacy Hive metastore, the data in that metastore is
7042
+ available in a catalog named hive_metastore."""
6906
7043
 
6907
7044
  def __init__(self, api_client):
6908
7045
  self._api = api_client
6909
7046
 
6910
- def cancel_refresh(self, table_name: str, refresh_id: str):
6911
- """Cancel refresh.
6912
-
6913
- Cancel an active monitor refresh for the given refresh ID.
6914
-
6915
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
6916
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
6917
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
6918
- owner of the table
7047
+ def assign(self, workspace_id: int, metastore_id: str, default_catalog_name: str):
7048
+ """Create an assignment.
6919
7049
 
6920
- Additionally, the call must be made from the workspace where the monitor was created.
7050
+ Creates a new metastore assignment. If an assignment for the same __workspace_id__ exists, it will be
7051
+ overwritten by the new __metastore_id__ and __default_catalog_name__. The caller must be an account
7052
+ admin.
6921
7053
 
6922
- :param table_name: str
6923
- Full name of the table.
6924
- :param refresh_id: str
6925
- ID of the refresh.
7054
+ :param workspace_id: int
7055
+ A workspace ID.
7056
+ :param metastore_id: str
7057
+ The unique ID of the metastore.
7058
+ :param default_catalog_name: str
7059
+ The name of the default catalog in the metastore.
6926
7060
 
6927
7061
 
6928
7062
  """
7063
+ body = {}
7064
+ if default_catalog_name is not None: body['default_catalog_name'] = default_catalog_name
7065
+ if metastore_id is not None: body['metastore_id'] = metastore_id
7066
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6929
7067
 
6930
- headers = {}
6931
-
6932
- self._api.do('POST',
6933
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes/{refresh_id}/cancel',
7068
+ self._api.do('PUT',
7069
+ f'/api/2.1/unity-catalog/workspaces/{workspace_id}/metastore',
7070
+ body=body,
6934
7071
  headers=headers)
6935
7072
 
6936
7073
  def create(self,
6937
- table_name: str,
6938
- assets_dir: str,
6939
- output_schema_name: str,
7074
+ name: str,
6940
7075
  *,
6941
- baseline_table_name: Optional[str] = None,
6942
- custom_metrics: Optional[List[MonitorMetric]] = None,
6943
- data_classification_config: Optional[MonitorDataClassificationConfig] = None,
6944
- inference_log: Optional[MonitorInferenceLog] = None,
6945
- notifications: Optional[MonitorNotifications] = None,
6946
- schedule: Optional[MonitorCronSchedule] = None,
6947
- skip_builtin_dashboard: Optional[bool] = None,
6948
- slicing_exprs: Optional[List[str]] = None,
6949
- snapshot: Optional[MonitorSnapshot] = None,
6950
- time_series: Optional[MonitorTimeSeries] = None,
6951
- warehouse_id: Optional[str] = None) -> MonitorInfo:
6952
- """Create a table monitor.
7076
+ region: Optional[str] = None,
7077
+ storage_root: Optional[str] = None) -> MetastoreInfo:
7078
+ """Create a metastore.
6953
7079
 
6954
- Creates a new monitor for the specified table.
7080
+ Creates a new metastore based on a provided name and optional storage root path. By default (if the
7081
+ __owner__ field is not set), the owner of the new metastore is the user calling the
7082
+ __createMetastore__ API. If the __owner__ field is set to the empty string (**""**), the ownership is
7083
+ assigned to the System User instead.
6955
7084
 
6956
- The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on the
6957
- table's parent schema, and have **SELECT** access on the table 2. have **USE_CATALOG** on the table's
6958
- parent catalog, be an owner of the table's parent schema, and have **SELECT** access on the table. 3.
6959
- have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on
6960
- the table's parent schema - be an owner of the table.
7085
+ :param name: str
7086
+ The user-specified name of the metastore.
7087
+ :param region: str (optional)
7088
+ Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted in
7089
+ the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is omitted,
7090
+ the region of the workspace receiving the request will be used.
7091
+ :param storage_root: str (optional)
7092
+ The storage root URL for metastore
6961
7093
 
6962
- Workspace assets, such as the dashboard, will be created in the workspace where this call was made.
7094
+ :returns: :class:`MetastoreInfo`
7095
+ """
7096
+ body = {}
7097
+ if name is not None: body['name'] = name
7098
+ if region is not None: body['region'] = region
7099
+ if storage_root is not None: body['storage_root'] = storage_root
7100
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7101
+
7102
+ res = self._api.do('POST', '/api/2.1/unity-catalog/metastores', body=body, headers=headers)
7103
+ return MetastoreInfo.from_dict(res)
7104
+
7105
+ def current(self) -> MetastoreAssignment:
7106
+ """Get metastore assignment for workspace.
6963
7107
 
6964
- :param table_name: str
6965
- Full name of the table.
6966
- :param assets_dir: str
6967
- The directory to store monitoring assets (e.g. dashboard, metric tables).
6968
- :param output_schema_name: str
6969
- Schema where output metric tables are created.
6970
- :param baseline_table_name: str (optional)
6971
- Name of the baseline table from which drift metrics are computed from. Columns in the monitored
6972
- table should also be present in the baseline table.
6973
- :param custom_metrics: List[:class:`MonitorMetric`] (optional)
6974
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
6975
- (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
6976
- :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
6977
- The data classification config for the monitor.
6978
- :param inference_log: :class:`MonitorInferenceLog` (optional)
6979
- Configuration for monitoring inference logs.
6980
- :param notifications: :class:`MonitorNotifications` (optional)
6981
- The notification settings for the monitor.
6982
- :param schedule: :class:`MonitorCronSchedule` (optional)
6983
- The schedule for automatically updating and refreshing metric tables.
6984
- :param skip_builtin_dashboard: bool (optional)
6985
- Whether to skip creating a default dashboard summarizing data quality metrics.
6986
- :param slicing_exprs: List[str] (optional)
6987
- List of column expressions to slice data with for targeted analysis. The data is grouped by each
6988
- expression independently, resulting in a separate slice for each predicate and its complements. For
6989
- high-cardinality columns, only the top 100 unique values by frequency will generate slices.
6990
- :param snapshot: :class:`MonitorSnapshot` (optional)
6991
- Configuration for monitoring snapshot tables.
6992
- :param time_series: :class:`MonitorTimeSeries` (optional)
6993
- Configuration for monitoring time series tables.
6994
- :param warehouse_id: str (optional)
6995
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first
6996
- running warehouse will be used.
7108
+ Gets the metastore assignment for the workspace being accessed.
6997
7109
 
6998
- :returns: :class:`MonitorInfo`
7110
+ :returns: :class:`MetastoreAssignment`
6999
7111
  """
7000
- body = {}
7001
- if assets_dir is not None: body['assets_dir'] = assets_dir
7002
- if baseline_table_name is not None: body['baseline_table_name'] = baseline_table_name
7003
- if custom_metrics is not None: body['custom_metrics'] = [v.as_dict() for v in custom_metrics]
7004
- if data_classification_config is not None:
7005
- body['data_classification_config'] = data_classification_config.as_dict()
7006
- if inference_log is not None: body['inference_log'] = inference_log.as_dict()
7007
- if notifications is not None: body['notifications'] = notifications.as_dict()
7008
- if output_schema_name is not None: body['output_schema_name'] = output_schema_name
7009
- if schedule is not None: body['schedule'] = schedule.as_dict()
7010
- if skip_builtin_dashboard is not None: body['skip_builtin_dashboard'] = skip_builtin_dashboard
7011
- if slicing_exprs is not None: body['slicing_exprs'] = [v for v in slicing_exprs]
7012
- if snapshot is not None: body['snapshot'] = snapshot.as_dict()
7013
- if time_series is not None: body['time_series'] = time_series.as_dict()
7014
- if warehouse_id is not None: body['warehouse_id'] = warehouse_id
7015
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7016
7112
 
7017
- res = self._api.do('POST',
7018
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor',
7019
- body=body,
7020
- headers=headers)
7021
- return MonitorInfo.from_dict(res)
7113
+ headers = {'Accept': 'application/json', }
7022
7114
 
7023
- def delete(self, table_name: str):
7024
- """Delete a table monitor.
7025
-
7026
- Deletes a monitor for the specified table.
7027
-
7028
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7029
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7030
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7031
- owner of the table.
7032
-
7033
- Additionally, the call must be made from the workspace where the monitor was created.
7115
+ res = self._api.do('GET', '/api/2.1/unity-catalog/current-metastore-assignment', headers=headers)
7116
+ return MetastoreAssignment.from_dict(res)
7117
+
7118
+ def delete(self, id: str, *, force: Optional[bool] = None):
7119
+ """Delete a metastore.
7034
7120
 
7035
- Note that the metric tables and dashboard will not be deleted as part of this call; those assets must
7036
- be manually cleaned up (if desired).
7121
+ Deletes a metastore. The caller must be a metastore admin.
7037
7122
 
7038
- :param table_name: str
7039
- Full name of the table.
7123
+ :param id: str
7124
+ Unique ID of the metastore.
7125
+ :param force: bool (optional)
7126
+ Force deletion even if the metastore is not empty. Default is false.
7040
7127
 
7041
7128
 
7042
7129
  """
7043
7130
 
7044
- headers = {}
7131
+ query = {}
7132
+ if force is not None: query['force'] = force
7133
+ headers = {'Accept': 'application/json', }
7045
7134
 
7046
- self._api.do('DELETE', f'/api/2.1/unity-catalog/tables/{table_name}/monitor', headers=headers)
7135
+ self._api.do('DELETE', f'/api/2.1/unity-catalog/metastores/{id}', query=query, headers=headers)
7047
7136
 
7048
- def get(self, table_name: str) -> MonitorInfo:
7049
- """Get a table monitor.
7050
-
7051
- Gets a monitor for the specified table.
7052
-
7053
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7054
- table's parent catalog and be an owner of the table's parent schema. 3. have the following
7055
- permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent
7056
- schema - **SELECT** privilege on the table.
7137
+ def get(self, id: str) -> MetastoreInfo:
7138
+ """Get a metastore.
7057
7139
 
7058
- The returned information includes configuration values, as well as information on assets created by
7059
- the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different
7060
- workspace than where the monitor was created.
7140
+ Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this
7141
+ info.
7061
7142
 
7062
- :param table_name: str
7063
- Full name of the table.
7143
+ :param id: str
7144
+ Unique ID of the metastore.
7064
7145
 
7065
- :returns: :class:`MonitorInfo`
7146
+ :returns: :class:`MetastoreInfo`
7066
7147
  """
7067
7148
 
7068
7149
  headers = {'Accept': 'application/json', }
7069
7150
 
7070
- res = self._api.do('GET', f'/api/2.1/unity-catalog/tables/{table_name}/monitor', headers=headers)
7071
- return MonitorInfo.from_dict(res)
7151
+ res = self._api.do('GET', f'/api/2.1/unity-catalog/metastores/{id}', headers=headers)
7152
+ return MetastoreInfo.from_dict(res)
7072
7153
 
7073
- def get_refresh(self, table_name: str, refresh_id: str) -> MonitorRefreshInfo:
7074
- """Get refresh.
7075
-
7076
- Gets info about a specific monitor refresh using the given refresh ID.
7077
-
7078
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7079
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7080
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
7081
- **SELECT** privilege on the table.
7082
-
7083
- Additionally, the call must be made from the workspace where the monitor was created.
7154
+ def list(self) -> Iterator[MetastoreInfo]:
7155
+ """List metastores.
7084
7156
 
7085
- :param table_name: str
7086
- Full name of the table.
7087
- :param refresh_id: str
7088
- ID of the refresh.
7157
+ Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin
7158
+ to retrieve this info. There is no guarantee of a specific ordering of the elements in the array.
7089
7159
 
7090
- :returns: :class:`MonitorRefreshInfo`
7160
+ :returns: Iterator over :class:`MetastoreInfo`
7091
7161
  """
7092
7162
 
7093
7163
  headers = {'Accept': 'application/json', }
7094
7164
 
7095
- res = self._api.do('GET',
7096
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes/{refresh_id}',
7097
- headers=headers)
7098
- return MonitorRefreshInfo.from_dict(res)
7165
+ json = self._api.do('GET', '/api/2.1/unity-catalog/metastores', headers=headers)
7166
+ parsed = ListMetastoresResponse.from_dict(json).metastores
7167
+ return parsed if parsed is not None else []
7099
7168
 
7100
- def list_refreshes(self, table_name: str) -> Iterator[MonitorRefreshInfo]:
7101
- """List refreshes.
7102
-
7103
- Gets an array containing the history of the most recent refreshes (up to 25) for this table.
7104
-
7105
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7106
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7107
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
7108
- **SELECT** privilege on the table.
7109
-
7110
- Additionally, the call must be made from the workspace where the monitor was created.
7169
+ def summary(self) -> GetMetastoreSummaryResponse:
7170
+ """Get a metastore summary.
7111
7171
 
7112
- :param table_name: str
7113
- Full name of the table.
7172
+ Gets information about a metastore. This summary includes the storage credential, the cloud vendor,
7173
+ the cloud region, and the global metastore ID.
7114
7174
 
7115
- :returns: Iterator over :class:`MonitorRefreshInfo`
7175
+ :returns: :class:`GetMetastoreSummaryResponse`
7116
7176
  """
7117
7177
 
7118
7178
  headers = {'Accept': 'application/json', }
7119
7179
 
7120
- res = self._api.do('GET',
7121
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes',
7122
- headers=headers)
7123
- return [MonitorRefreshInfo.from_dict(v) for v in res]
7180
+ res = self._api.do('GET', '/api/2.1/unity-catalog/metastore_summary', headers=headers)
7181
+ return GetMetastoreSummaryResponse.from_dict(res)
7124
7182
 
7125
- def run_refresh(self, table_name: str) -> MonitorRefreshInfo:
7126
- """Queue a metric refresh for a monitor.
7127
-
7128
- Queues a metric refresh on the monitor for the specified table. The refresh will execute in the
7129
- background.
7183
+ def unassign(self, workspace_id: int, metastore_id: str):
7184
+ """Delete an assignment.
7130
7185
 
7131
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7132
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7133
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7134
- owner of the table
7186
+ Deletes a metastore assignment. The caller must be an account administrator.
7135
7187
 
7136
- Additionally, the call must be made from the workspace where the monitor was created.
7188
+ :param workspace_id: int
7189
+ A workspace ID.
7190
+ :param metastore_id: str
7191
+ Query for the ID of the metastore to delete.
7137
7192
 
7138
- :param table_name: str
7139
- Full name of the table.
7140
7193
 
7141
- :returns: :class:`MonitorRefreshInfo`
7142
7194
  """
7143
7195
 
7196
+ query = {}
7197
+ if metastore_id is not None: query['metastore_id'] = metastore_id
7144
7198
  headers = {'Accept': 'application/json', }
7145
7199
 
7146
- res = self._api.do('POST',
7147
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes',
7148
- headers=headers)
7149
- return MonitorRefreshInfo.from_dict(res)
7200
+ self._api.do('DELETE',
7201
+ f'/api/2.1/unity-catalog/workspaces/{workspace_id}/metastore',
7202
+ query=query,
7203
+ headers=headers)
7150
7204
 
7151
7205
  def update(self,
7152
- table_name: str,
7153
- output_schema_name: str,
7206
+ id: str,
7154
7207
  *,
7155
- baseline_table_name: Optional[str] = None,
7156
- custom_metrics: Optional[List[MonitorMetric]] = None,
7157
- data_classification_config: Optional[MonitorDataClassificationConfig] = None,
7158
- inference_log: Optional[MonitorInferenceLog] = None,
7159
- notifications: Optional[MonitorNotifications] = None,
7160
- schedule: Optional[MonitorCronSchedule] = None,
7161
- slicing_exprs: Optional[List[str]] = None,
7162
- snapshot: Optional[MonitorSnapshot] = None,
7163
- time_series: Optional[MonitorTimeSeries] = None) -> MonitorInfo:
7164
- """Update a table monitor.
7165
-
7166
- Updates a monitor for the specified table.
7167
-
7168
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7169
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7170
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7171
- owner of the table.
7172
-
7173
- Additionally, the call must be made from the workspace where the monitor was created, and the caller
7174
- must be the original creator of the monitor.
7208
+ delta_sharing_organization_name: Optional[str] = None,
7209
+ delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None,
7210
+ delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope] = None,
7211
+ new_name: Optional[str] = None,
7212
+ owner: Optional[str] = None,
7213
+ privilege_model_version: Optional[str] = None,
7214
+ storage_root_credential_id: Optional[str] = None) -> MetastoreInfo:
7215
+ """Update a metastore.
7175
7216
 
7176
- Certain configuration fields, such as output asset identifiers, cannot be updated.
7217
+ Updates information for a specific metastore. The caller must be a metastore admin. If the __owner__
7218
+ field is set to the empty string (**""**), the ownership is updated to the System User.
7177
7219
 
7178
- :param table_name: str
7179
- Full name of the table.
7180
- :param output_schema_name: str
7181
- Schema where output metric tables are created.
7182
- :param baseline_table_name: str (optional)
7183
- Name of the baseline table from which drift metrics are computed from. Columns in the monitored
7184
- table should also be present in the baseline table.
7185
- :param custom_metrics: List[:class:`MonitorMetric`] (optional)
7186
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
7187
- (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
7188
- :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
7189
- The data classification config for the monitor.
7190
- :param inference_log: :class:`MonitorInferenceLog` (optional)
7191
- Configuration for monitoring inference logs.
7192
- :param notifications: :class:`MonitorNotifications` (optional)
7193
- The notification settings for the monitor.
7194
- :param schedule: :class:`MonitorCronSchedule` (optional)
7195
- The schedule for automatically updating and refreshing metric tables.
7196
- :param slicing_exprs: List[str] (optional)
7197
- List of column expressions to slice data with for targeted analysis. The data is grouped by each
7198
- expression independently, resulting in a separate slice for each predicate and its complements. For
7199
- high-cardinality columns, only the top 100 unique values by frequency will generate slices.
7200
- :param snapshot: :class:`MonitorSnapshot` (optional)
7201
- Configuration for monitoring snapshot tables.
7202
- :param time_series: :class:`MonitorTimeSeries` (optional)
7203
- Configuration for monitoring time series tables.
7220
+ :param id: str
7221
+ Unique ID of the metastore.
7222
+ :param delta_sharing_organization_name: str (optional)
7223
+ The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta
7224
+ Sharing as the official name.
7225
+ :param delta_sharing_recipient_token_lifetime_in_seconds: int (optional)
7226
+ The lifetime of delta sharing recipient token in seconds.
7227
+ :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional)
7228
+ The scope of Delta Sharing enabled for the metastore.
7229
+ :param new_name: str (optional)
7230
+ New name for the metastore.
7231
+ :param owner: str (optional)
7232
+ The owner of the metastore.
7233
+ :param privilege_model_version: str (optional)
7234
+ Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`).
7235
+ :param storage_root_credential_id: str (optional)
7236
+ UUID of storage credential to access the metastore storage_root.
7204
7237
 
7205
- :returns: :class:`MonitorInfo`
7206
- """
7207
- body = {}
7208
- if baseline_table_name is not None: body['baseline_table_name'] = baseline_table_name
7209
- if custom_metrics is not None: body['custom_metrics'] = [v.as_dict() for v in custom_metrics]
7210
- if data_classification_config is not None:
7211
- body['data_classification_config'] = data_classification_config.as_dict()
7212
- if inference_log is not None: body['inference_log'] = inference_log.as_dict()
7213
- if notifications is not None: body['notifications'] = notifications.as_dict()
7214
- if output_schema_name is not None: body['output_schema_name'] = output_schema_name
7215
- if schedule is not None: body['schedule'] = schedule.as_dict()
7216
- if slicing_exprs is not None: body['slicing_exprs'] = [v for v in slicing_exprs]
7217
- if snapshot is not None: body['snapshot'] = snapshot.as_dict()
7218
- if time_series is not None: body['time_series'] = time_series.as_dict()
7219
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7220
-
7221
- res = self._api.do('PUT',
7222
- f'/api/2.1/unity-catalog/tables/{table_name}/monitor',
7223
- body=body,
7224
- headers=headers)
7225
- return MonitorInfo.from_dict(res)
7226
-
7227
-
7228
- class MetastoresAPI:
7229
- """A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and
7230
- views) and the permissions that govern access to them. Databricks account admins can create metastores and
7231
- assign them to Databricks workspaces to control which workloads use each metastore. For a workspace to use
7232
- Unity Catalog, it must have a Unity Catalog metastore attached.
7233
-
7234
- Each metastore is configured with a root storage location in a cloud storage account. This storage
7235
- location is used for metadata and managed tables data.
7236
-
7237
- NOTE: This metastore is distinct from the metastore included in Databricks workspaces created before Unity
7238
- Catalog was released. If your workspace includes a legacy Hive metastore, the data in that metastore is
7239
- available in a catalog named hive_metastore."""
7240
-
7241
- def __init__(self, api_client):
7242
- self._api = api_client
7243
-
7244
- def assign(self, workspace_id: int, metastore_id: str, default_catalog_name: str):
7245
- """Create an assignment.
7246
-
7247
- Creates a new metastore assignment. If an assignment for the same __workspace_id__ exists, it will be
7248
- overwritten by the new __metastore_id__ and __default_catalog_name__. The caller must be an account
7249
- admin.
7250
-
7251
- :param workspace_id: int
7252
- A workspace ID.
7253
- :param metastore_id: str
7254
- The unique ID of the metastore.
7255
- :param default_catalog_name: str
7256
- The name of the default catalog in the metastore.
7257
-
7258
-
7259
- """
7260
- body = {}
7261
- if default_catalog_name is not None: body['default_catalog_name'] = default_catalog_name
7262
- if metastore_id is not None: body['metastore_id'] = metastore_id
7263
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7264
-
7265
- self._api.do('PUT',
7266
- f'/api/2.1/unity-catalog/workspaces/{workspace_id}/metastore',
7267
- body=body,
7268
- headers=headers)
7269
-
7270
- def create(self,
7271
- name: str,
7272
- *,
7273
- region: Optional[str] = None,
7274
- storage_root: Optional[str] = None) -> MetastoreInfo:
7275
- """Create a metastore.
7276
-
7277
- Creates a new metastore based on a provided name and optional storage root path. By default (if the
7278
- __owner__ field is not set), the owner of the new metastore is the user calling the
7279
- __createMetastore__ API. If the __owner__ field is set to the empty string (**""**), the ownership is
7280
- assigned to the System User instead.
7281
-
7282
- :param name: str
7283
- The user-specified name of the metastore.
7284
- :param region: str (optional)
7285
- Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). If this field is omitted, the
7286
- region of the workspace receiving the request will be used.
7287
- :param storage_root: str (optional)
7288
- The storage root URL for metastore
7289
-
7290
- :returns: :class:`MetastoreInfo`
7291
- """
7292
- body = {}
7293
- if name is not None: body['name'] = name
7294
- if region is not None: body['region'] = region
7295
- if storage_root is not None: body['storage_root'] = storage_root
7296
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7297
-
7298
- res = self._api.do('POST', '/api/2.1/unity-catalog/metastores', body=body, headers=headers)
7299
- return MetastoreInfo.from_dict(res)
7300
-
7301
- def current(self) -> MetastoreAssignment:
7302
- """Get metastore assignment for workspace.
7303
-
7304
- Gets the metastore assignment for the workspace being accessed.
7305
-
7306
- :returns: :class:`MetastoreAssignment`
7307
- """
7308
-
7309
- headers = {'Accept': 'application/json', }
7310
-
7311
- res = self._api.do('GET', '/api/2.1/unity-catalog/current-metastore-assignment', headers=headers)
7312
- return MetastoreAssignment.from_dict(res)
7313
-
7314
- def delete(self, id: str, *, force: Optional[bool] = None):
7315
- """Delete a metastore.
7316
-
7317
- Deletes a metastore. The caller must be a metastore admin.
7318
-
7319
- :param id: str
7320
- Unique ID of the metastore.
7321
- :param force: bool (optional)
7322
- Force deletion even if the metastore is not empty. Default is false.
7323
-
7324
-
7325
- """
7326
-
7327
- query = {}
7328
- if force is not None: query['force'] = force
7329
- headers = {'Accept': 'application/json', }
7330
-
7331
- self._api.do('DELETE', f'/api/2.1/unity-catalog/metastores/{id}', query=query, headers=headers)
7332
-
7333
- def get(self, id: str) -> MetastoreInfo:
7334
- """Get a metastore.
7335
-
7336
- Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this
7337
- info.
7338
-
7339
- :param id: str
7340
- Unique ID of the metastore.
7341
-
7342
- :returns: :class:`MetastoreInfo`
7343
- """
7344
-
7345
- headers = {'Accept': 'application/json', }
7346
-
7347
- res = self._api.do('GET', f'/api/2.1/unity-catalog/metastores/{id}', headers=headers)
7348
- return MetastoreInfo.from_dict(res)
7349
-
7350
- def list(self) -> Iterator[MetastoreInfo]:
7351
- """List metastores.
7352
-
7353
- Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin
7354
- to retrieve this info. There is no guarantee of a specific ordering of the elements in the array.
7355
-
7356
- :returns: Iterator over :class:`MetastoreInfo`
7357
- """
7358
-
7359
- headers = {'Accept': 'application/json', }
7360
-
7361
- json = self._api.do('GET', '/api/2.1/unity-catalog/metastores', headers=headers)
7362
- parsed = ListMetastoresResponse.from_dict(json).metastores
7363
- return parsed if parsed is not None else []
7364
-
7365
- def summary(self) -> GetMetastoreSummaryResponse:
7366
- """Get a metastore summary.
7367
-
7368
- Gets information about a metastore. This summary includes the storage credential, the cloud vendor,
7369
- the cloud region, and the global metastore ID.
7370
-
7371
- :returns: :class:`GetMetastoreSummaryResponse`
7372
- """
7373
-
7374
- headers = {'Accept': 'application/json', }
7375
-
7376
- res = self._api.do('GET', '/api/2.1/unity-catalog/metastore_summary', headers=headers)
7377
- return GetMetastoreSummaryResponse.from_dict(res)
7378
-
7379
- def unassign(self, workspace_id: int, metastore_id: str):
7380
- """Delete an assignment.
7381
-
7382
- Deletes a metastore assignment. The caller must be an account administrator.
7383
-
7384
- :param workspace_id: int
7385
- A workspace ID.
7386
- :param metastore_id: str
7387
- Query for the ID of the metastore to delete.
7388
-
7389
-
7390
- """
7391
-
7392
- query = {}
7393
- if metastore_id is not None: query['metastore_id'] = metastore_id
7394
- headers = {'Accept': 'application/json', }
7395
-
7396
- self._api.do('DELETE',
7397
- f'/api/2.1/unity-catalog/workspaces/{workspace_id}/metastore',
7398
- query=query,
7399
- headers=headers)
7400
-
7401
- def update(self,
7402
- id: str,
7403
- *,
7404
- delta_sharing_organization_name: Optional[str] = None,
7405
- delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None,
7406
- delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope] = None,
7407
- new_name: Optional[str] = None,
7408
- owner: Optional[str] = None,
7409
- privilege_model_version: Optional[str] = None,
7410
- storage_root_credential_id: Optional[str] = None) -> MetastoreInfo:
7411
- """Update a metastore.
7412
-
7413
- Updates information for a specific metastore. The caller must be a metastore admin. If the __owner__
7414
- field is set to the empty string (**""**), the ownership is updated to the System User.
7415
-
7416
- :param id: str
7417
- Unique ID of the metastore.
7418
- :param delta_sharing_organization_name: str (optional)
7419
- The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta
7420
- Sharing as the official name.
7421
- :param delta_sharing_recipient_token_lifetime_in_seconds: int (optional)
7422
- The lifetime of delta sharing recipient token in seconds.
7423
- :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional)
7424
- The scope of Delta Sharing enabled for the metastore.
7425
- :param new_name: str (optional)
7426
- New name for the metastore.
7427
- :param owner: str (optional)
7428
- The owner of the metastore.
7429
- :param privilege_model_version: str (optional)
7430
- Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`).
7431
- :param storage_root_credential_id: str (optional)
7432
- UUID of storage credential to access the metastore storage_root.
7433
-
7434
- :returns: :class:`MetastoreInfo`
7238
+ :returns: :class:`MetastoreInfo`
7435
7239
  """
7436
7240
  body = {}
7437
7241
  if delta_sharing_organization_name is not None:
@@ -7565,160 +7369,494 @@ class ModelVersionsAPI:
7565
7369
  :param alias: str
7566
7370
  The name of the alias
7567
7371
 
7568
- :returns: :class:`ModelVersionInfo`
7372
+ :returns: :class:`ModelVersionInfo`
7373
+ """
7374
+
7375
+ headers = {'Accept': 'application/json', }
7376
+
7377
+ res = self._api.do('GET',
7378
+ f'/api/2.1/unity-catalog/models/{full_name}/aliases/{alias}',
7379
+ headers=headers)
7380
+ return ModelVersionInfo.from_dict(res)
7381
+
7382
+ def list(self,
7383
+ full_name: str,
7384
+ *,
7385
+ include_browse: Optional[bool] = None,
7386
+ max_results: Optional[int] = None,
7387
+ page_token: Optional[str] = None) -> Iterator[ModelVersionInfo]:
7388
+ """List Model Versions.
7389
+
7390
+ List model versions. You can list model versions under a particular schema, or list all model versions
7391
+ in the current metastore.
7392
+
7393
+ The returned models are filtered based on the privileges of the calling user. For example, the
7394
+ metastore admin is able to list all the model versions. A regular user needs to be the owner or have
7395
+ the **EXECUTE** privilege on the parent registered model to recieve the model versions in the
7396
+ response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege
7397
+ on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.
7398
+
7399
+ There is no guarantee of a specific ordering of the elements in the response. The elements in the
7400
+ response will not contain any aliases or tags.
7401
+
7402
+ :param full_name: str
7403
+ The full three-level name of the registered model under which to list model versions
7404
+ :param include_browse: bool (optional)
7405
+ Whether to include model versions in the response for which the principal can only access selective
7406
+ metadata for
7407
+ :param max_results: int (optional)
7408
+ Maximum number of model versions to return. If not set, the page length is set to a server
7409
+ configured value (100, as of 1/3/2024). - when set to a value greater than 0, the page length is the
7410
+ minimum of this value and a server configured value(1000, as of 1/3/2024); - when set to 0, the page
7411
+ length is set to a server configured value (100, as of 1/3/2024) (recommended); - when set to a
7412
+ value less than 0, an invalid parameter error is returned;
7413
+ :param page_token: str (optional)
7414
+ Opaque pagination token to go to next page based on previous query.
7415
+
7416
+ :returns: Iterator over :class:`ModelVersionInfo`
7417
+ """
7418
+
7419
+ query = {}
7420
+ if include_browse is not None: query['include_browse'] = include_browse
7421
+ if max_results is not None: query['max_results'] = max_results
7422
+ if page_token is not None: query['page_token'] = page_token
7423
+ headers = {'Accept': 'application/json', }
7424
+
7425
+ while True:
7426
+ json = self._api.do('GET',
7427
+ f'/api/2.1/unity-catalog/models/{full_name}/versions',
7428
+ query=query,
7429
+ headers=headers)
7430
+ if 'model_versions' in json:
7431
+ for v in json['model_versions']:
7432
+ yield ModelVersionInfo.from_dict(v)
7433
+ if 'next_page_token' not in json or not json['next_page_token']:
7434
+ return
7435
+ query['page_token'] = json['next_page_token']
7436
+
7437
+ def update(self, full_name: str, version: int, *, comment: Optional[str] = None) -> ModelVersionInfo:
7438
+ """Update a Model Version.
7439
+
7440
+ Updates the specified model version.
7441
+
7442
+ The caller must be a metastore admin or an owner of the parent registered model. For the latter case,
7443
+ the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the
7444
+ **USE_SCHEMA** privilege on the parent schema.
7445
+
7446
+ Currently only the comment of the model version can be updated.
7447
+
7448
+ :param full_name: str
7449
+ The three-level (fully qualified) name of the model version
7450
+ :param version: int
7451
+ The integer version number of the model version
7452
+ :param comment: str (optional)
7453
+ The comment attached to the model version
7454
+
7455
+ :returns: :class:`ModelVersionInfo`
7456
+ """
7457
+ body = {}
7458
+ if comment is not None: body['comment'] = comment
7459
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7460
+
7461
+ res = self._api.do('PATCH',
7462
+ f'/api/2.1/unity-catalog/models/{full_name}/versions/{version}',
7463
+ body=body,
7464
+ headers=headers)
7465
+ return ModelVersionInfo.from_dict(res)
7466
+
7467
+
7468
+ class OnlineTablesAPI:
7469
+ """Online tables provide lower latency and higher QPS access to data from Delta tables."""
7470
+
7471
+ def __init__(self, api_client):
7472
+ self._api = api_client
7473
+
7474
+ def create(self, *, name: Optional[str] = None, spec: Optional[OnlineTableSpec] = None) -> OnlineTable:
7475
+ """Create an Online Table.
7476
+
7477
+ Create a new Online Table.
7478
+
7479
+ :param name: str (optional)
7480
+ Full three-part (catalog, schema, table) name of the table.
7481
+ :param spec: :class:`OnlineTableSpec` (optional)
7482
+ Specification of the online table.
7483
+
7484
+ :returns: :class:`OnlineTable`
7485
+ """
7486
+ body = {}
7487
+ if name is not None: body['name'] = name
7488
+ if spec is not None: body['spec'] = spec.as_dict()
7489
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7490
+
7491
+ res = self._api.do('POST', '/api/2.0/online-tables', body=body, headers=headers)
7492
+ return OnlineTable.from_dict(res)
7493
+
7494
+ def delete(self, name: str):
7495
+ """Delete an Online Table.
7496
+
7497
+ Delete an online table. Warning: This will delete all the data in the online table. If the source
7498
+ Delta table was deleted or modified since this Online Table was created, this will lose the data
7499
+ forever!
7500
+
7501
+ :param name: str
7502
+ Full three-part (catalog, schema, table) name of the table.
7503
+
7504
+
7505
+ """
7506
+
7507
+ headers = {'Accept': 'application/json', }
7508
+
7509
+ self._api.do('DELETE', f'/api/2.0/online-tables/{name}', headers=headers)
7510
+
7511
+ def get(self, name: str) -> OnlineTable:
7512
+ """Get an Online Table.
7513
+
7514
+ Get information about an existing online table and its status.
7515
+
7516
+ :param name: str
7517
+ Full three-part (catalog, schema, table) name of the table.
7518
+
7519
+ :returns: :class:`OnlineTable`
7520
+ """
7521
+
7522
+ headers = {'Accept': 'application/json', }
7523
+
7524
+ res = self._api.do('GET', f'/api/2.0/online-tables/{name}', headers=headers)
7525
+ return OnlineTable.from_dict(res)
7526
+
7527
+
7528
+ class QualityMonitorsAPI:
7529
+ """A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics
7530
+ tables and a dashboard that you can use to monitor table health and set alerts.
7531
+
7532
+ Most write operations require the user to be the owner of the table (or its parent schema or parent
7533
+ catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires the user to have
7534
+ **SELECT** privileges on the table (along with **USE_SCHEMA** and **USE_CATALOG**)."""
7535
+
7536
+ def __init__(self, api_client):
7537
+ self._api = api_client
7538
+
7539
+ def cancel_refresh(self, table_name: str, refresh_id: str):
7540
+ """Cancel refresh.
7541
+
7542
+ Cancel an active monitor refresh for the given refresh ID.
7543
+
7544
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7545
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7546
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7547
+ owner of the table
7548
+
7549
+ Additionally, the call must be made from the workspace where the monitor was created.
7550
+
7551
+ :param table_name: str
7552
+ Full name of the table.
7553
+ :param refresh_id: str
7554
+ ID of the refresh.
7555
+
7556
+
7557
+ """
7558
+
7559
+ headers = {}
7560
+
7561
+ self._api.do('POST',
7562
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes/{refresh_id}/cancel',
7563
+ headers=headers)
7564
+
7565
+ def create(self,
7566
+ table_name: str,
7567
+ assets_dir: str,
7568
+ output_schema_name: str,
7569
+ *,
7570
+ baseline_table_name: Optional[str] = None,
7571
+ custom_metrics: Optional[List[MonitorMetric]] = None,
7572
+ data_classification_config: Optional[MonitorDataClassificationConfig] = None,
7573
+ inference_log: Optional[MonitorInferenceLog] = None,
7574
+ notifications: Optional[MonitorNotifications] = None,
7575
+ schedule: Optional[MonitorCronSchedule] = None,
7576
+ skip_builtin_dashboard: Optional[bool] = None,
7577
+ slicing_exprs: Optional[List[str]] = None,
7578
+ snapshot: Optional[MonitorSnapshot] = None,
7579
+ time_series: Optional[MonitorTimeSeries] = None,
7580
+ warehouse_id: Optional[str] = None) -> MonitorInfo:
7581
+ """Create a table monitor.
7582
+
7583
+ Creates a new monitor for the specified table.
7584
+
7585
+ The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on the
7586
+ table's parent schema, and have **SELECT** access on the table 2. have **USE_CATALOG** on the table's
7587
+ parent catalog, be an owner of the table's parent schema, and have **SELECT** access on the table. 3.
7588
+ have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on
7589
+ the table's parent schema - be an owner of the table.
7590
+
7591
+ Workspace assets, such as the dashboard, will be created in the workspace where this call was made.
7592
+
7593
+ :param table_name: str
7594
+ Full name of the table.
7595
+ :param assets_dir: str
7596
+ The directory to store monitoring assets (e.g. dashboard, metric tables).
7597
+ :param output_schema_name: str
7598
+ Schema where output metric tables are created.
7599
+ :param baseline_table_name: str (optional)
7600
+ Name of the baseline table from which drift metrics are computed from. Columns in the monitored
7601
+ table should also be present in the baseline table.
7602
+ :param custom_metrics: List[:class:`MonitorMetric`] (optional)
7603
+ Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
7604
+ (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
7605
+ :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
7606
+ The data classification config for the monitor.
7607
+ :param inference_log: :class:`MonitorInferenceLog` (optional)
7608
+ Configuration for monitoring inference logs.
7609
+ :param notifications: :class:`MonitorNotifications` (optional)
7610
+ The notification settings for the monitor.
7611
+ :param schedule: :class:`MonitorCronSchedule` (optional)
7612
+ The schedule for automatically updating and refreshing metric tables.
7613
+ :param skip_builtin_dashboard: bool (optional)
7614
+ Whether to skip creating a default dashboard summarizing data quality metrics.
7615
+ :param slicing_exprs: List[str] (optional)
7616
+ List of column expressions to slice data with for targeted analysis. The data is grouped by each
7617
+ expression independently, resulting in a separate slice for each predicate and its complements. For
7618
+ high-cardinality columns, only the top 100 unique values by frequency will generate slices.
7619
+ :param snapshot: :class:`MonitorSnapshot` (optional)
7620
+ Configuration for monitoring snapshot tables.
7621
+ :param time_series: :class:`MonitorTimeSeries` (optional)
7622
+ Configuration for monitoring time series tables.
7623
+ :param warehouse_id: str (optional)
7624
+ Optional argument to specify the warehouse for dashboard creation. If not specified, the first
7625
+ running warehouse will be used.
7626
+
7627
+ :returns: :class:`MonitorInfo`
7628
+ """
7629
+ body = {}
7630
+ if assets_dir is not None: body['assets_dir'] = assets_dir
7631
+ if baseline_table_name is not None: body['baseline_table_name'] = baseline_table_name
7632
+ if custom_metrics is not None: body['custom_metrics'] = [v.as_dict() for v in custom_metrics]
7633
+ if data_classification_config is not None:
7634
+ body['data_classification_config'] = data_classification_config.as_dict()
7635
+ if inference_log is not None: body['inference_log'] = inference_log.as_dict()
7636
+ if notifications is not None: body['notifications'] = notifications.as_dict()
7637
+ if output_schema_name is not None: body['output_schema_name'] = output_schema_name
7638
+ if schedule is not None: body['schedule'] = schedule.as_dict()
7639
+ if skip_builtin_dashboard is not None: body['skip_builtin_dashboard'] = skip_builtin_dashboard
7640
+ if slicing_exprs is not None: body['slicing_exprs'] = [v for v in slicing_exprs]
7641
+ if snapshot is not None: body['snapshot'] = snapshot.as_dict()
7642
+ if time_series is not None: body['time_series'] = time_series.as_dict()
7643
+ if warehouse_id is not None: body['warehouse_id'] = warehouse_id
7644
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7645
+
7646
+ res = self._api.do('POST',
7647
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor',
7648
+ body=body,
7649
+ headers=headers)
7650
+ return MonitorInfo.from_dict(res)
7651
+
7652
+ def delete(self, table_name: str):
7653
+ """Delete a table monitor.
7654
+
7655
+ Deletes a monitor for the specified table.
7656
+
7657
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7658
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7659
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7660
+ owner of the table.
7661
+
7662
+ Additionally, the call must be made from the workspace where the monitor was created.
7663
+
7664
+ Note that the metric tables and dashboard will not be deleted as part of this call; those assets must
7665
+ be manually cleaned up (if desired).
7666
+
7667
+ :param table_name: str
7668
+ Full name of the table.
7669
+
7670
+
7569
7671
  """
7570
7672
 
7571
- headers = {'Accept': 'application/json', }
7673
+ headers = {}
7572
7674
 
7573
- res = self._api.do('GET',
7574
- f'/api/2.1/unity-catalog/models/{full_name}/aliases/{alias}',
7575
- headers=headers)
7576
- return ModelVersionInfo.from_dict(res)
7675
+ self._api.do('DELETE', f'/api/2.1/unity-catalog/tables/{table_name}/monitor', headers=headers)
7577
7676
 
7578
- def list(self,
7579
- full_name: str,
7580
- *,
7581
- include_browse: Optional[bool] = None,
7582
- max_results: Optional[int] = None,
7583
- page_token: Optional[str] = None) -> Iterator[ModelVersionInfo]:
7584
- """List Model Versions.
7677
+ def get(self, table_name: str) -> MonitorInfo:
7678
+ """Get a table monitor.
7585
7679
 
7586
- List model versions. You can list model versions under a particular schema, or list all model versions
7587
- in the current metastore.
7680
+ Gets a monitor for the specified table.
7588
7681
 
7589
- The returned models are filtered based on the privileges of the calling user. For example, the
7590
- metastore admin is able to list all the model versions. A regular user needs to be the owner or have
7591
- the **EXECUTE** privilege on the parent registered model to recieve the model versions in the
7592
- response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege
7593
- on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.
7682
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7683
+ table's parent catalog and be an owner of the table's parent schema. 3. have the following
7684
+ permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent
7685
+ schema - **SELECT** privilege on the table.
7594
7686
 
7595
- There is no guarantee of a specific ordering of the elements in the response. The elements in the
7596
- response will not contain any aliases or tags.
7687
+ The returned information includes configuration values, as well as information on assets created by
7688
+ the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different
7689
+ workspace than where the monitor was created.
7597
7690
 
7598
- :param full_name: str
7599
- The full three-level name of the registered model under which to list model versions
7600
- :param include_browse: bool (optional)
7601
- Whether to include model versions in the response for which the principal can only access selective
7602
- metadata for
7603
- :param max_results: int (optional)
7604
- Maximum number of model versions to return. If not set, the page length is set to a server
7605
- configured value (100, as of 1/3/2024). - when set to a value greater than 0, the page length is the
7606
- minimum of this value and a server configured value(1000, as of 1/3/2024); - when set to 0, the page
7607
- length is set to a server configured value (100, as of 1/3/2024) (recommended); - when set to a
7608
- value less than 0, an invalid parameter error is returned;
7609
- :param page_token: str (optional)
7610
- Opaque pagination token to go to next page based on previous query.
7691
+ :param table_name: str
7692
+ Full name of the table.
7611
7693
 
7612
- :returns: Iterator over :class:`ModelVersionInfo`
7694
+ :returns: :class:`MonitorInfo`
7613
7695
  """
7614
7696
 
7615
- query = {}
7616
- if include_browse is not None: query['include_browse'] = include_browse
7617
- if max_results is not None: query['max_results'] = max_results
7618
- if page_token is not None: query['page_token'] = page_token
7619
7697
  headers = {'Accept': 'application/json', }
7620
7698
 
7621
- while True:
7622
- json = self._api.do('GET',
7623
- f'/api/2.1/unity-catalog/models/{full_name}/versions',
7624
- query=query,
7625
- headers=headers)
7626
- if 'model_versions' in json:
7627
- for v in json['model_versions']:
7628
- yield ModelVersionInfo.from_dict(v)
7629
- if 'next_page_token' not in json or not json['next_page_token']:
7630
- return
7631
- query['page_token'] = json['next_page_token']
7699
+ res = self._api.do('GET', f'/api/2.1/unity-catalog/tables/{table_name}/monitor', headers=headers)
7700
+ return MonitorInfo.from_dict(res)
7632
7701
 
7633
- def update(self, full_name: str, version: int, *, comment: Optional[str] = None) -> ModelVersionInfo:
7634
- """Update a Model Version.
7702
+ def get_refresh(self, table_name: str, refresh_id: str) -> MonitorRefreshInfo:
7703
+ """Get refresh.
7635
7704
 
7636
- Updates the specified model version.
7705
+ Gets info about a specific monitor refresh using the given refresh ID.
7637
7706
 
7638
- The caller must be a metastore admin or an owner of the parent registered model. For the latter case,
7639
- the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the
7640
- **USE_SCHEMA** privilege on the parent schema.
7707
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7708
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7709
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
7710
+ **SELECT** privilege on the table.
7641
7711
 
7642
- Currently only the comment of the model version can be updated.
7712
+ Additionally, the call must be made from the workspace where the monitor was created.
7643
7713
 
7644
- :param full_name: str
7645
- The three-level (fully qualified) name of the model version
7646
- :param version: int
7647
- The integer version number of the model version
7648
- :param comment: str (optional)
7649
- The comment attached to the model version
7714
+ :param table_name: str
7715
+ Full name of the table.
7716
+ :param refresh_id: str
7717
+ ID of the refresh.
7650
7718
 
7651
- :returns: :class:`ModelVersionInfo`
7719
+ :returns: :class:`MonitorRefreshInfo`
7652
7720
  """
7653
- body = {}
7654
- if comment is not None: body['comment'] = comment
7655
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7656
-
7657
- res = self._api.do('PATCH',
7658
- f'/api/2.1/unity-catalog/models/{full_name}/versions/{version}',
7659
- body=body,
7660
- headers=headers)
7661
- return ModelVersionInfo.from_dict(res)
7662
-
7663
7721
 
7664
- class OnlineTablesAPI:
7665
- """Online tables provide lower latency and higher QPS access to data from Delta tables."""
7722
+ headers = {'Accept': 'application/json', }
7666
7723
 
7667
- def __init__(self, api_client):
7668
- self._api = api_client
7724
+ res = self._api.do('GET',
7725
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes/{refresh_id}',
7726
+ headers=headers)
7727
+ return MonitorRefreshInfo.from_dict(res)
7669
7728
 
7670
- def create(self, *, name: Optional[str] = None, spec: Optional[OnlineTableSpec] = None) -> OnlineTable:
7671
- """Create an Online Table.
7729
+ def list_refreshes(self, table_name: str) -> MonitorRefreshListResponse:
7730
+ """List refreshes.
7672
7731
 
7673
- Create a new Online Table.
7732
+ Gets an array containing the history of the most recent refreshes (up to 25) for this table.
7674
7733
 
7675
- :param name: str (optional)
7676
- Full three-part (catalog, schema, table) name of the table.
7677
- :param spec: :class:`OnlineTableSpec` (optional)
7678
- Specification of the online table.
7734
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7735
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7736
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
7737
+ **SELECT** privilege on the table.
7679
7738
 
7680
- :returns: :class:`OnlineTable`
7739
+ Additionally, the call must be made from the workspace where the monitor was created.
7740
+
7741
+ :param table_name: str
7742
+ Full name of the table.
7743
+
7744
+ :returns: :class:`MonitorRefreshListResponse`
7681
7745
  """
7682
- body = {}
7683
- if name is not None: body['name'] = name
7684
- if spec is not None: body['spec'] = spec.as_dict()
7685
- headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7686
7746
 
7687
- res = self._api.do('POST', '/api/2.0/online-tables', body=body, headers=headers)
7688
- return OnlineTable.from_dict(res)
7747
+ headers = {'Accept': 'application/json', }
7689
7748
 
7690
- def delete(self, name: str):
7691
- """Delete an Online Table.
7749
+ res = self._api.do('GET',
7750
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes',
7751
+ headers=headers)
7752
+ return MonitorRefreshListResponse.from_dict(res)
7753
+
7754
+ def run_refresh(self, table_name: str) -> MonitorRefreshInfo:
7755
+ """Queue a metric refresh for a monitor.
7692
7756
 
7693
- Delete an online table. Warning: This will delete all the data in the online table. If the source
7694
- Delta table was deleted or modified since this Online Table was created, this will lose the data
7695
- forever!
7757
+ Queues a metric refresh on the monitor for the specified table. The refresh will execute in the
7758
+ background.
7696
7759
 
7697
- :param name: str
7698
- Full three-part (catalog, schema, table) name of the table.
7760
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7761
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7762
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7763
+ owner of the table
7699
7764
 
7765
+ Additionally, the call must be made from the workspace where the monitor was created.
7766
+
7767
+ :param table_name: str
7768
+ Full name of the table.
7700
7769
 
7770
+ :returns: :class:`MonitorRefreshInfo`
7701
7771
  """
7702
7772
 
7703
7773
  headers = {'Accept': 'application/json', }
7704
7774
 
7705
- self._api.do('DELETE', f'/api/2.0/online-tables/{name}', headers=headers)
7775
+ res = self._api.do('POST',
7776
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes',
7777
+ headers=headers)
7778
+ return MonitorRefreshInfo.from_dict(res)
7706
7779
 
7707
- def get(self, name: str) -> OnlineTable:
7708
- """Get an Online Table.
7780
+ def update(self,
7781
+ table_name: str,
7782
+ output_schema_name: str,
7783
+ *,
7784
+ baseline_table_name: Optional[str] = None,
7785
+ custom_metrics: Optional[List[MonitorMetric]] = None,
7786
+ dashboard_id: Optional[str] = None,
7787
+ data_classification_config: Optional[MonitorDataClassificationConfig] = None,
7788
+ inference_log: Optional[MonitorInferenceLog] = None,
7789
+ notifications: Optional[MonitorNotifications] = None,
7790
+ schedule: Optional[MonitorCronSchedule] = None,
7791
+ slicing_exprs: Optional[List[str]] = None,
7792
+ snapshot: Optional[MonitorSnapshot] = None,
7793
+ time_series: Optional[MonitorTimeSeries] = None) -> MonitorInfo:
7794
+ """Update a table monitor.
7709
7795
 
7710
- Get information about an existing online table and its status.
7796
+ Updates a monitor for the specified table.
7711
7797
 
7712
- :param name: str
7713
- Full three-part (catalog, schema, table) name of the table.
7798
+ The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
7799
+ table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
7800
+ - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
7801
+ owner of the table.
7714
7802
 
7715
- :returns: :class:`OnlineTable`
7803
+ Additionally, the call must be made from the workspace where the monitor was created, and the caller
7804
+ must be the original creator of the monitor.
7805
+
7806
+ Certain configuration fields, such as output asset identifiers, cannot be updated.
7807
+
7808
+ :param table_name: str
7809
+ Full name of the table.
7810
+ :param output_schema_name: str
7811
+ Schema where output metric tables are created.
7812
+ :param baseline_table_name: str (optional)
7813
+ Name of the baseline table from which drift metrics are computed from. Columns in the monitored
7814
+ table should also be present in the baseline table.
7815
+ :param custom_metrics: List[:class:`MonitorMetric`] (optional)
7816
+ Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
7817
+ (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
7818
+ :param dashboard_id: str (optional)
7819
+ Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in PENDING
7820
+ state.
7821
+ :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
7822
+ The data classification config for the monitor.
7823
+ :param inference_log: :class:`MonitorInferenceLog` (optional)
7824
+ Configuration for monitoring inference logs.
7825
+ :param notifications: :class:`MonitorNotifications` (optional)
7826
+ The notification settings for the monitor.
7827
+ :param schedule: :class:`MonitorCronSchedule` (optional)
7828
+ The schedule for automatically updating and refreshing metric tables.
7829
+ :param slicing_exprs: List[str] (optional)
7830
+ List of column expressions to slice data with for targeted analysis. The data is grouped by each
7831
+ expression independently, resulting in a separate slice for each predicate and its complements. For
7832
+ high-cardinality columns, only the top 100 unique values by frequency will generate slices.
7833
+ :param snapshot: :class:`MonitorSnapshot` (optional)
7834
+ Configuration for monitoring snapshot tables.
7835
+ :param time_series: :class:`MonitorTimeSeries` (optional)
7836
+ Configuration for monitoring time series tables.
7837
+
7838
+ :returns: :class:`MonitorInfo`
7716
7839
  """
7840
+ body = {}
7841
+ if baseline_table_name is not None: body['baseline_table_name'] = baseline_table_name
7842
+ if custom_metrics is not None: body['custom_metrics'] = [v.as_dict() for v in custom_metrics]
7843
+ if dashboard_id is not None: body['dashboard_id'] = dashboard_id
7844
+ if data_classification_config is not None:
7845
+ body['data_classification_config'] = data_classification_config.as_dict()
7846
+ if inference_log is not None: body['inference_log'] = inference_log.as_dict()
7847
+ if notifications is not None: body['notifications'] = notifications.as_dict()
7848
+ if output_schema_name is not None: body['output_schema_name'] = output_schema_name
7849
+ if schedule is not None: body['schedule'] = schedule.as_dict()
7850
+ if slicing_exprs is not None: body['slicing_exprs'] = [v for v in slicing_exprs]
7851
+ if snapshot is not None: body['snapshot'] = snapshot.as_dict()
7852
+ if time_series is not None: body['time_series'] = time_series.as_dict()
7853
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7717
7854
 
7718
- headers = {'Accept': 'application/json', }
7719
-
7720
- res = self._api.do('GET', f'/api/2.0/online-tables/{name}', headers=headers)
7721
- return OnlineTable.from_dict(res)
7855
+ res = self._api.do('PUT',
7856
+ f'/api/2.1/unity-catalog/tables/{table_name}/monitor',
7857
+ body=body,
7858
+ headers=headers)
7859
+ return MonitorInfo.from_dict(res)
7722
7860
 
7723
7861
 
7724
7862
  class RegisteredModelsAPI:
@@ -8203,7 +8341,7 @@ class StorageCredentialsAPI:
8203
8341
  :param comment: str (optional)
8204
8342
  Comment associated with the credential.
8205
8343
  :param databricks_gcp_service_account: :class:`DatabricksGcpServiceAccountRequest` (optional)
8206
- The <Databricks> managed GCP service account configuration.
8344
+ The Databricks managed GCP service account configuration.
8207
8345
  :param read_only: bool (optional)
8208
8346
  Whether the storage credential is only usable for read operations.
8209
8347
  :param skip_validation: bool (optional)
@@ -8319,6 +8457,7 @@ class StorageCredentialsAPI:
8319
8457
  comment: Optional[str] = None,
8320
8458
  databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None,
8321
8459
  force: Optional[bool] = None,
8460
+ isolation_mode: Optional[IsolationMode] = None,
8322
8461
  new_name: Optional[str] = None,
8323
8462
  owner: Optional[str] = None,
8324
8463
  read_only: Optional[bool] = None,
@@ -8340,9 +8479,11 @@ class StorageCredentialsAPI:
8340
8479
  :param comment: str (optional)
8341
8480
  Comment associated with the credential.
8342
8481
  :param databricks_gcp_service_account: :class:`DatabricksGcpServiceAccountRequest` (optional)
8343
- The <Databricks> managed GCP service account configuration.
8482
+ The Databricks managed GCP service account configuration.
8344
8483
  :param force: bool (optional)
8345
8484
  Force update even if there are dependent external locations or external tables.
8485
+ :param isolation_mode: :class:`IsolationMode` (optional)
8486
+ Whether the current securable is accessible from all workspaces or a specific set of workspaces.
8346
8487
  :param new_name: str (optional)
8347
8488
  New name for the storage credential.
8348
8489
  :param owner: str (optional)
@@ -8365,6 +8506,7 @@ class StorageCredentialsAPI:
8365
8506
  if databricks_gcp_service_account is not None:
8366
8507
  body['databricks_gcp_service_account'] = databricks_gcp_service_account.as_dict()
8367
8508
  if force is not None: body['force'] = force
8509
+ if isolation_mode is not None: body['isolation_mode'] = isolation_mode.value
8368
8510
  if new_name is not None: body['new_name'] = new_name
8369
8511
  if owner is not None: body['owner'] = owner
8370
8512
  if read_only is not None: body['read_only'] = read_only
@@ -8450,7 +8592,7 @@ class SystemSchemasAPI:
8450
8592
  def __init__(self, api_client):
8451
8593
  self._api = api_client
8452
8594
 
8453
- def disable(self, metastore_id: str, schema_name: DisableSchemaName):
8595
+ def disable(self, metastore_id: str, schema_name: str):
8454
8596
  """Disable a system schema.
8455
8597
 
8456
8598
  Disables the system schema and removes it from the system catalog. The caller must be an account admin
@@ -8458,7 +8600,7 @@ class SystemSchemasAPI:
8458
8600
 
8459
8601
  :param metastore_id: str
8460
8602
  The metastore ID under which the system schema lives.
8461
- :param schema_name: :class:`DisableSchemaName`
8603
+ :param schema_name: str
8462
8604
  Full name of the system schema.
8463
8605
 
8464
8606
 
@@ -8467,10 +8609,10 @@ class SystemSchemasAPI:
8467
8609
  headers = {'Accept': 'application/json', }
8468
8610
 
8469
8611
  self._api.do('DELETE',
8470
- f'/api/2.1/unity-catalog/metastores/{metastore_id}/systemschemas/{schema_name.value}',
8612
+ f'/api/2.1/unity-catalog/metastores/{metastore_id}/systemschemas/{schema_name}',
8471
8613
  headers=headers)
8472
8614
 
8473
- def enable(self, metastore_id: str, schema_name: EnableSchemaName):
8615
+ def enable(self, metastore_id: str, schema_name: str):
8474
8616
  """Enable a system schema.
8475
8617
 
8476
8618
  Enables the system schema and adds it to the system catalog. The caller must be an account admin or a
@@ -8478,7 +8620,7 @@ class SystemSchemasAPI:
8478
8620
 
8479
8621
  :param metastore_id: str
8480
8622
  The metastore ID under which the system schema lives.
8481
- :param schema_name: :class:`EnableSchemaName`
8623
+ :param schema_name: str
8482
8624
  Full name of the system schema.
8483
8625
 
8484
8626
 
@@ -8487,7 +8629,7 @@ class SystemSchemasAPI:
8487
8629
  headers = {'Accept': 'application/json', }
8488
8630
 
8489
8631
  self._api.do('PUT',
8490
- f'/api/2.1/unity-catalog/metastores/{metastore_id}/systemschemas/{schema_name.value}',
8632
+ f'/api/2.1/unity-catalog/metastores/{metastore_id}/systemschemas/{schema_name}',
8491
8633
  headers=headers)
8492
8634
 
8493
8635
  def list(self, metastore_id: str) -> Iterator[SystemSchemaInfo]: