databricks-sdk 0.45.0__py3-none-any.whl → 0.46.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +13 -2
- databricks/sdk/config.py +4 -0
- databricks/sdk/credentials_provider.py +6 -1
- databricks/sdk/data_plane.py +1 -59
- databricks/sdk/oauth.py +7 -2
- databricks/sdk/service/compute.py +38 -21
- databricks/sdk/service/dashboards.py +34 -17
- databricks/sdk/service/files.py +4 -0
- databricks/sdk/service/ml.py +476 -2
- databricks/sdk/service/oauth2.py +0 -12
- databricks/sdk/service/serving.py +29 -21
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/RECORD +18 -18
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/LICENSE +0 -0
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/NOTICE +0 -0
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.46.0.dist-info}/top_level.txt +0 -0
databricks/sdk/__init__.py
CHANGED
|
@@ -8,6 +8,7 @@ import databricks.sdk.dbutils as dbutils
|
|
|
8
8
|
import databricks.sdk.service as service
|
|
9
9
|
from databricks.sdk import azure
|
|
10
10
|
from databricks.sdk.credentials_provider import CredentialsStrategy
|
|
11
|
+
from databricks.sdk.data_plane import DataPlaneTokenSource
|
|
11
12
|
from databricks.sdk.mixins.compute import ClustersExt
|
|
12
13
|
from databricks.sdk.mixins.files import DbfsExt, FilesExt
|
|
13
14
|
from databricks.sdk.mixins.jobs import JobsExt
|
|
@@ -63,7 +64,8 @@ from databricks.sdk.service.marketplace import (
|
|
|
63
64
|
ProviderExchangeFiltersAPI, ProviderExchangesAPI, ProviderFilesAPI,
|
|
64
65
|
ProviderListingsAPI, ProviderPersonalizationRequestsAPI,
|
|
65
66
|
ProviderProviderAnalyticsDashboardsAPI, ProviderProvidersAPI)
|
|
66
|
-
from databricks.sdk.service.ml import ExperimentsAPI,
|
|
67
|
+
from databricks.sdk.service.ml import (ExperimentsAPI, ForecastingAPI,
|
|
68
|
+
ModelRegistryAPI)
|
|
67
69
|
from databricks.sdk.service.oauth2 import (AccountFederationPolicyAPI,
|
|
68
70
|
CustomAppIntegrationAPI,
|
|
69
71
|
OAuthPublishedAppsAPI,
|
|
@@ -284,8 +286,11 @@ class WorkspaceClient:
|
|
|
284
286
|
self._secrets = service.workspace.SecretsAPI(self._api_client)
|
|
285
287
|
self._service_principals = service.iam.ServicePrincipalsAPI(self._api_client)
|
|
286
288
|
self._serving_endpoints = serving_endpoints
|
|
289
|
+
serving_endpoints_data_plane_token_source = DataPlaneTokenSource(
|
|
290
|
+
self._config.host, self._config.oauth_token, not self._config.enable_experimental_async_token_refresh
|
|
291
|
+
)
|
|
287
292
|
self._serving_endpoints_data_plane = service.serving.ServingEndpointsDataPlaneAPI(
|
|
288
|
-
self._api_client, serving_endpoints
|
|
293
|
+
self._api_client, serving_endpoints, serving_endpoints_data_plane_token_source
|
|
289
294
|
)
|
|
290
295
|
self._settings = service.settings.SettingsAPI(self._api_client)
|
|
291
296
|
self._shares = service.sharing.SharesAPI(self._api_client)
|
|
@@ -305,6 +310,7 @@ class WorkspaceClient:
|
|
|
305
310
|
self._workspace = WorkspaceExt(self._api_client)
|
|
306
311
|
self._workspace_bindings = service.catalog.WorkspaceBindingsAPI(self._api_client)
|
|
307
312
|
self._workspace_conf = service.settings.WorkspaceConfAPI(self._api_client)
|
|
313
|
+
self._forecasting = service.ml.ForecastingAPI(self._api_client)
|
|
308
314
|
|
|
309
315
|
@property
|
|
310
316
|
def config(self) -> client.Config:
|
|
@@ -808,6 +814,11 @@ class WorkspaceClient:
|
|
|
808
814
|
"""This API allows updating known workspace settings for advanced users."""
|
|
809
815
|
return self._workspace_conf
|
|
810
816
|
|
|
817
|
+
@property
|
|
818
|
+
def forecasting(self) -> service.ml.ForecastingAPI:
|
|
819
|
+
"""The Forecasting API allows you to create and get serverless forecasting experiments."""
|
|
820
|
+
return self._forecasting
|
|
821
|
+
|
|
811
822
|
def get_workspace_id(self) -> int:
|
|
812
823
|
"""Get the workspace ID of the workspace that this client is connected to."""
|
|
813
824
|
response = self._api_client.do("GET", "/api/2.0/preview/scim/v2/Me", response_headers=["X-Databricks-Org-Id"])
|
databricks/sdk/config.py
CHANGED
|
@@ -95,6 +95,10 @@ class Config:
|
|
|
95
95
|
max_connections_per_pool: int = ConfigAttribute()
|
|
96
96
|
databricks_environment: Optional[DatabricksEnvironment] = None
|
|
97
97
|
|
|
98
|
+
enable_experimental_async_token_refresh: bool = ConfigAttribute(
|
|
99
|
+
env="DATABRICKS_ENABLE_EXPERIMENTAL_ASYNC_TOKEN_REFRESH"
|
|
100
|
+
)
|
|
101
|
+
|
|
98
102
|
enable_experimental_files_api_client: bool = ConfigAttribute(env="DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT")
|
|
99
103
|
files_api_client_download_max_total_recovers = None
|
|
100
104
|
files_api_client_download_max_total_recovers_without_progressing = 1
|
|
@@ -191,6 +191,7 @@ def oauth_service_principal(cfg: "Config") -> Optional[CredentialsProvider]:
|
|
|
191
191
|
token_url=oidc.token_endpoint,
|
|
192
192
|
scopes=["all-apis"],
|
|
193
193
|
use_header=True,
|
|
194
|
+
disable_async=not cfg.enable_experimental_async_token_refresh,
|
|
194
195
|
)
|
|
195
196
|
|
|
196
197
|
def inner() -> Dict[str, str]:
|
|
@@ -290,6 +291,7 @@ def azure_service_principal(cfg: "Config") -> CredentialsProvider:
|
|
|
290
291
|
token_url=f"{aad_endpoint}{cfg.azure_tenant_id}/oauth2/token",
|
|
291
292
|
endpoint_params={"resource": resource},
|
|
292
293
|
use_params=True,
|
|
294
|
+
disable_async=not cfg.enable_experimental_async_token_refresh,
|
|
293
295
|
)
|
|
294
296
|
|
|
295
297
|
_ensure_host_present(cfg, token_source_for)
|
|
@@ -355,6 +357,7 @@ def github_oidc_azure(cfg: "Config") -> Optional[CredentialsProvider]:
|
|
|
355
357
|
token_url=f"{aad_endpoint}{cfg.azure_tenant_id}/oauth2/token",
|
|
356
358
|
endpoint_params=params,
|
|
357
359
|
use_params=True,
|
|
360
|
+
disable_async=not cfg.enable_experimental_async_token_refresh,
|
|
358
361
|
)
|
|
359
362
|
|
|
360
363
|
def refreshed_headers() -> Dict[str, str]:
|
|
@@ -458,8 +461,9 @@ class CliTokenSource(Refreshable):
|
|
|
458
461
|
token_type_field: str,
|
|
459
462
|
access_token_field: str,
|
|
460
463
|
expiry_field: str,
|
|
464
|
+
disable_async: bool = True,
|
|
461
465
|
):
|
|
462
|
-
super().__init__()
|
|
466
|
+
super().__init__(disable_async=disable_async)
|
|
463
467
|
self._cmd = cmd
|
|
464
468
|
self._token_type_field = token_type_field
|
|
465
469
|
self._access_token_field = access_token_field
|
|
@@ -690,6 +694,7 @@ class DatabricksCliTokenSource(CliTokenSource):
|
|
|
690
694
|
token_type_field="token_type",
|
|
691
695
|
access_token_field="access_token",
|
|
692
696
|
expiry_field="expiry",
|
|
697
|
+
disable_async=not cfg.enable_experimental_async_token_refresh,
|
|
693
698
|
)
|
|
694
699
|
|
|
695
700
|
@staticmethod
|
databricks/sdk/data_plane.py
CHANGED
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import threading
|
|
4
4
|
from dataclasses import dataclass
|
|
5
|
-
from typing import Callable,
|
|
5
|
+
from typing import Callable, Optional
|
|
6
6
|
from urllib import parse
|
|
7
7
|
|
|
8
8
|
from databricks.sdk import oauth
|
|
@@ -88,61 +88,3 @@ class DataPlaneDetails:
|
|
|
88
88
|
"""URL used to query the endpoint through the DataPlane."""
|
|
89
89
|
token: Token
|
|
90
90
|
"""Token to query the DataPlane endpoint."""
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
## Old implementation. #TODO: Remove after the new implementation is used
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
class DataPlaneService:
|
|
97
|
-
"""Helper class to fetch and manage DataPlane details."""
|
|
98
|
-
|
|
99
|
-
from .service.serving import DataPlaneInfo
|
|
100
|
-
|
|
101
|
-
def __init__(self):
|
|
102
|
-
self._data_plane_info = {}
|
|
103
|
-
self._tokens = {}
|
|
104
|
-
self._lock = threading.Lock()
|
|
105
|
-
|
|
106
|
-
def get_data_plane_details(
|
|
107
|
-
self,
|
|
108
|
-
method: str,
|
|
109
|
-
params: List[str],
|
|
110
|
-
info_getter: Callable[[], DataPlaneInfo],
|
|
111
|
-
refresh: Callable[[str], Token],
|
|
112
|
-
):
|
|
113
|
-
"""Get and cache information required to query a Data Plane endpoint using the provided methods.
|
|
114
|
-
|
|
115
|
-
Returns a cached DataPlaneDetails if the details have already been fetched previously and are still valid.
|
|
116
|
-
If not, it uses the provided functions to fetch the details.
|
|
117
|
-
|
|
118
|
-
:param method: method name. Used to construct a unique key for the cache.
|
|
119
|
-
:param params: path params used in the "get" operation which uniquely determine the object. Used to construct a unique key for the cache.
|
|
120
|
-
:param info_getter: function which returns the DataPlaneInfo. It will only be called if the information is not already present in the cache.
|
|
121
|
-
:param refresh: function to refresh the token. It will only be called if the token is missing or expired.
|
|
122
|
-
"""
|
|
123
|
-
all_elements = params.copy()
|
|
124
|
-
all_elements.insert(0, method)
|
|
125
|
-
map_key = "/".join(all_elements)
|
|
126
|
-
info = self._data_plane_info.get(map_key)
|
|
127
|
-
if not info:
|
|
128
|
-
self._lock.acquire()
|
|
129
|
-
try:
|
|
130
|
-
info = self._data_plane_info.get(map_key)
|
|
131
|
-
if not info:
|
|
132
|
-
info = info_getter()
|
|
133
|
-
self._data_plane_info[map_key] = info
|
|
134
|
-
finally:
|
|
135
|
-
self._lock.release()
|
|
136
|
-
|
|
137
|
-
token = self._tokens.get(map_key)
|
|
138
|
-
if not token or not token.valid:
|
|
139
|
-
self._lock.acquire()
|
|
140
|
-
token = self._tokens.get(map_key)
|
|
141
|
-
try:
|
|
142
|
-
if not token or not token.valid:
|
|
143
|
-
token = refresh(info.authorization_details)
|
|
144
|
-
self._tokens[map_key] = token
|
|
145
|
-
finally:
|
|
146
|
-
self._lock.release()
|
|
147
|
-
|
|
148
|
-
return DataPlaneDetails(endpoint_url=info.endpoint_url, token=token)
|
databricks/sdk/oauth.py
CHANGED
|
@@ -426,12 +426,16 @@ class SessionCredentials(Refreshable):
|
|
|
426
426
|
client_id: str,
|
|
427
427
|
client_secret: str = None,
|
|
428
428
|
redirect_url: str = None,
|
|
429
|
+
disable_async: bool = True,
|
|
429
430
|
):
|
|
430
431
|
self._token_endpoint = token_endpoint
|
|
431
432
|
self._client_id = client_id
|
|
432
433
|
self._client_secret = client_secret
|
|
433
434
|
self._redirect_url = redirect_url
|
|
434
|
-
super().__init__(
|
|
435
|
+
super().__init__(
|
|
436
|
+
token=token,
|
|
437
|
+
disable_async=disable_async,
|
|
438
|
+
)
|
|
435
439
|
|
|
436
440
|
def as_dict(self) -> dict:
|
|
437
441
|
return {"token": self.token().as_dict()}
|
|
@@ -708,9 +712,10 @@ class ClientCredentials(Refreshable):
|
|
|
708
712
|
scopes: List[str] = None
|
|
709
713
|
use_params: bool = False
|
|
710
714
|
use_header: bool = False
|
|
715
|
+
disable_async: bool = True
|
|
711
716
|
|
|
712
717
|
def __post_init__(self):
|
|
713
|
-
super().__init__()
|
|
718
|
+
super().__init__(disable_async=self.disable_async)
|
|
714
719
|
|
|
715
720
|
def refresh(self) -> Token:
|
|
716
721
|
params = {"grant_type": "client_credentials"}
|
|
@@ -3846,6 +3846,10 @@ class DestroyResponse:
|
|
|
3846
3846
|
|
|
3847
3847
|
@dataclass
|
|
3848
3848
|
class DiskSpec:
|
|
3849
|
+
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
3850
|
+
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
3851
|
+
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3852
|
+
|
|
3849
3853
|
disk_count: Optional[int] = None
|
|
3850
3854
|
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
3851
3855
|
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
@@ -3920,9 +3924,15 @@ class DiskSpec:
|
|
|
3920
3924
|
|
|
3921
3925
|
@dataclass
|
|
3922
3926
|
class DiskType:
|
|
3927
|
+
"""Describes the disk type."""
|
|
3928
|
+
|
|
3923
3929
|
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
3930
|
+
"""All Azure Disk types that Databricks supports. See
|
|
3931
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3924
3932
|
|
|
3925
3933
|
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
3934
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3935
|
+
details."""
|
|
3926
3936
|
|
|
3927
3937
|
def as_dict(self) -> dict:
|
|
3928
3938
|
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3952,12 +3962,16 @@ class DiskType:
|
|
|
3952
3962
|
|
|
3953
3963
|
|
|
3954
3964
|
class DiskTypeAzureDiskVolumeType(Enum):
|
|
3965
|
+
"""All Azure Disk types that Databricks supports. See
|
|
3966
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3955
3967
|
|
|
3956
3968
|
PREMIUM_LRS = "PREMIUM_LRS"
|
|
3957
3969
|
STANDARD_LRS = "STANDARD_LRS"
|
|
3958
3970
|
|
|
3959
3971
|
|
|
3960
3972
|
class DiskTypeEbsVolumeType(Enum):
|
|
3973
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3974
|
+
details."""
|
|
3961
3975
|
|
|
3962
3976
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
3963
3977
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -3998,6 +4012,7 @@ class DockerBasicAuth:
|
|
|
3998
4012
|
@dataclass
|
|
3999
4013
|
class DockerImage:
|
|
4000
4014
|
basic_auth: Optional[DockerBasicAuth] = None
|
|
4015
|
+
"""Basic auth with username and password"""
|
|
4001
4016
|
|
|
4002
4017
|
url: Optional[str] = None
|
|
4003
4018
|
"""URL of the docker image."""
|
|
@@ -5334,7 +5349,7 @@ class GetInstancePool:
|
|
|
5334
5349
|
- Currently, Databricks allows at most 45 custom tags"""
|
|
5335
5350
|
|
|
5336
5351
|
default_tags: Optional[Dict[str, str]] = None
|
|
5337
|
-
"""Tags that are added by Databricks regardless of any
|
|
5352
|
+
"""Tags that are added by Databricks regardless of any ``custom_tags``, including:
|
|
5338
5353
|
|
|
5339
5354
|
- Vendor: Databricks
|
|
5340
5355
|
|
|
@@ -6250,7 +6265,7 @@ class InstancePoolAndStats:
|
|
|
6250
6265
|
- Currently, Databricks allows at most 45 custom tags"""
|
|
6251
6266
|
|
|
6252
6267
|
default_tags: Optional[Dict[str, str]] = None
|
|
6253
|
-
"""Tags that are added by Databricks regardless of any
|
|
6268
|
+
"""Tags that are added by Databricks regardless of any ``custom_tags``, including:
|
|
6254
6269
|
|
|
6255
6270
|
- Vendor: Databricks
|
|
6256
6271
|
|
|
@@ -6427,10 +6442,10 @@ class InstancePoolAndStats:
|
|
|
6427
6442
|
|
|
6428
6443
|
@dataclass
|
|
6429
6444
|
class InstancePoolAwsAttributes:
|
|
6445
|
+
"""Attributes set during instance pool creation which are related to Amazon Web Services."""
|
|
6446
|
+
|
|
6430
6447
|
availability: Optional[InstancePoolAwsAttributesAvailability] = None
|
|
6431
|
-
"""Availability type used for the spot nodes.
|
|
6432
|
-
|
|
6433
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability"""
|
|
6448
|
+
"""Availability type used for the spot nodes."""
|
|
6434
6449
|
|
|
6435
6450
|
spot_bid_price_percent: Optional[int] = None
|
|
6436
6451
|
"""Calculates the bid price for AWS spot instances, as a percentage of the corresponding instance
|
|
@@ -6439,10 +6454,7 @@ class InstancePoolAwsAttributes:
|
|
|
6439
6454
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
6440
6455
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
6441
6456
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
6442
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
6443
|
-
|
|
6444
|
-
The default value and documentation here should be kept consistent with
|
|
6445
|
-
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
6457
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000."""
|
|
6446
6458
|
|
|
6447
6459
|
zone_id: Optional[str] = None
|
|
6448
6460
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -6485,9 +6497,7 @@ class InstancePoolAwsAttributes:
|
|
|
6485
6497
|
|
|
6486
6498
|
|
|
6487
6499
|
class InstancePoolAwsAttributesAvailability(Enum):
|
|
6488
|
-
"""
|
|
6489
|
-
|
|
6490
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability"""
|
|
6500
|
+
"""The set of AWS availability types supported when setting up nodes for a cluster."""
|
|
6491
6501
|
|
|
6492
6502
|
ON_DEMAND = "ON_DEMAND"
|
|
6493
6503
|
SPOT = "SPOT"
|
|
@@ -6495,14 +6505,16 @@ class InstancePoolAwsAttributesAvailability(Enum):
|
|
|
6495
6505
|
|
|
6496
6506
|
@dataclass
|
|
6497
6507
|
class InstancePoolAzureAttributes:
|
|
6508
|
+
"""Attributes set during instance pool creation which are related to Azure."""
|
|
6509
|
+
|
|
6498
6510
|
availability: Optional[InstancePoolAzureAttributesAvailability] = None
|
|
6499
|
-
"""
|
|
6500
|
-
|
|
6501
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability"""
|
|
6511
|
+
"""Availability type used for the spot nodes."""
|
|
6502
6512
|
|
|
6503
6513
|
spot_bid_max_price: Optional[float] = None
|
|
6504
|
-
"""
|
|
6505
|
-
|
|
6514
|
+
"""With variable pricing, you have option to set a max price, in US dollars (USD) For example, the
|
|
6515
|
+
value 2 would be a max price of $2.00 USD per hour. If you set the max price to be -1, the VM
|
|
6516
|
+
won't be evicted based on price. The price for the VM will be the current price for spot or the
|
|
6517
|
+
price for a standard VM, which ever is less, as long as there is capacity and quota available."""
|
|
6506
6518
|
|
|
6507
6519
|
def as_dict(self) -> dict:
|
|
6508
6520
|
"""Serializes the InstancePoolAzureAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6532,9 +6544,7 @@ class InstancePoolAzureAttributes:
|
|
|
6532
6544
|
|
|
6533
6545
|
|
|
6534
6546
|
class InstancePoolAzureAttributesAvailability(Enum):
|
|
6535
|
-
"""
|
|
6536
|
-
|
|
6537
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability"""
|
|
6547
|
+
"""The set of Azure availability types supported when setting up nodes for a cluster."""
|
|
6538
6548
|
|
|
6539
6549
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
6540
6550
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -6542,6 +6552,8 @@ class InstancePoolAzureAttributesAvailability(Enum):
|
|
|
6542
6552
|
|
|
6543
6553
|
@dataclass
|
|
6544
6554
|
class InstancePoolGcpAttributes:
|
|
6555
|
+
"""Attributes set during instance pool creation which are related to GCP."""
|
|
6556
|
+
|
|
6545
6557
|
gcp_availability: Optional[GcpAvailability] = None
|
|
6546
6558
|
"""This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or
|
|
6547
6559
|
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
@@ -6756,7 +6768,10 @@ class InstancePoolPermissionsRequest:
|
|
|
6756
6768
|
|
|
6757
6769
|
|
|
6758
6770
|
class InstancePoolState(Enum):
|
|
6759
|
-
"""
|
|
6771
|
+
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
6772
|
+
|
|
6773
|
+
- ``ACTIVE`` -> ``STOPPED`` - ``ACTIVE`` -> ``DELETED`` - ``STOPPED`` -> ``ACTIVE`` -
|
|
6774
|
+
``STOPPED`` -> ``DELETED``"""
|
|
6760
6775
|
|
|
6761
6776
|
ACTIVE = "ACTIVE"
|
|
6762
6777
|
DELETED = "DELETED"
|
|
@@ -7865,6 +7880,8 @@ class NodeType:
|
|
|
7865
7880
|
|
|
7866
7881
|
@dataclass
|
|
7867
7882
|
class PendingInstanceError:
|
|
7883
|
+
"""Error message of a failed pending instances"""
|
|
7884
|
+
|
|
7868
7885
|
instance_id: Optional[str] = None
|
|
7869
7886
|
|
|
7870
7887
|
message: Optional[str] = None
|
|
@@ -594,12 +594,15 @@ class GenieMessage:
|
|
|
594
594
|
`ASKING_AI`: Waiting for the LLM to respond to the user's question. * `PENDING_WAREHOUSE`:
|
|
595
595
|
Waiting for warehouse before the SQL query can start executing. * `EXECUTING_QUERY`: Executing a
|
|
596
596
|
generated SQL query. Get the SQL query result by calling
|
|
597
|
-
[
|
|
598
|
-
generation or query execution failed. See `error` field. * `COMPLETED`:
|
|
599
|
-
completed. Results are in the `attachments` field. Get the SQL query
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
597
|
+
[getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) API. *
|
|
598
|
+
`FAILED`: The response generation or query execution failed. See `error` field. * `COMPLETED`:
|
|
599
|
+
Message processing is completed. Results are in the `attachments` field. Get the SQL query
|
|
600
|
+
result by calling
|
|
601
|
+
[getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) API. *
|
|
602
|
+
`SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result is not available
|
|
603
|
+
anymore. The user needs to rerun the query. Rerun the SQL query result by calling
|
|
604
|
+
[executeMessageAttachmentQuery](:method:genie/executeMessageAttachmentQuery) API. * `CANCELLED`:
|
|
605
|
+
Message has been cancelled."""
|
|
603
606
|
|
|
604
607
|
user_id: Optional[int] = None
|
|
605
608
|
"""ID of the user who created the message"""
|
|
@@ -697,6 +700,10 @@ class GenieQueryAttachment:
|
|
|
697
700
|
query_result_metadata: Optional[GenieResultMetadata] = None
|
|
698
701
|
"""Metadata associated with the query result."""
|
|
699
702
|
|
|
703
|
+
statement_id: Optional[str] = None
|
|
704
|
+
"""Statement Execution API statement id. Use [Get status, manifest, and result first
|
|
705
|
+
chunk](:method:statementexecution/getstatement) to get the full result data."""
|
|
706
|
+
|
|
700
707
|
title: Optional[str] = None
|
|
701
708
|
"""Name of the query"""
|
|
702
709
|
|
|
@@ -713,6 +720,8 @@ class GenieQueryAttachment:
|
|
|
713
720
|
body["query"] = self.query
|
|
714
721
|
if self.query_result_metadata:
|
|
715
722
|
body["query_result_metadata"] = self.query_result_metadata.as_dict()
|
|
723
|
+
if self.statement_id is not None:
|
|
724
|
+
body["statement_id"] = self.statement_id
|
|
716
725
|
if self.title is not None:
|
|
717
726
|
body["title"] = self.title
|
|
718
727
|
return body
|
|
@@ -730,6 +739,8 @@ class GenieQueryAttachment:
|
|
|
730
739
|
body["query"] = self.query
|
|
731
740
|
if self.query_result_metadata:
|
|
732
741
|
body["query_result_metadata"] = self.query_result_metadata
|
|
742
|
+
if self.statement_id is not None:
|
|
743
|
+
body["statement_id"] = self.statement_id
|
|
733
744
|
if self.title is not None:
|
|
734
745
|
body["title"] = self.title
|
|
735
746
|
return body
|
|
@@ -743,6 +754,7 @@ class GenieQueryAttachment:
|
|
|
743
754
|
last_updated_timestamp=d.get("last_updated_timestamp", None),
|
|
744
755
|
query=d.get("query", None),
|
|
745
756
|
query_result_metadata=_from_dict(d, "query_result_metadata", GenieResultMetadata),
|
|
757
|
+
statement_id=d.get("statement_id", None),
|
|
746
758
|
title=d.get("title", None),
|
|
747
759
|
)
|
|
748
760
|
|
|
@@ -1062,6 +1074,7 @@ class MessageErrorType(Enum):
|
|
|
1062
1074
|
CHAT_COMPLETION_NETWORK_EXCEPTION = "CHAT_COMPLETION_NETWORK_EXCEPTION"
|
|
1063
1075
|
CONTENT_FILTER_EXCEPTION = "CONTENT_FILTER_EXCEPTION"
|
|
1064
1076
|
CONTEXT_EXCEEDED_EXCEPTION = "CONTEXT_EXCEEDED_EXCEPTION"
|
|
1077
|
+
COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION = "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION"
|
|
1065
1078
|
COULD_NOT_GET_UC_SCHEMA_EXCEPTION = "COULD_NOT_GET_UC_SCHEMA_EXCEPTION"
|
|
1066
1079
|
DEPLOYMENT_NOT_FOUND_EXCEPTION = "DEPLOYMENT_NOT_FOUND_EXCEPTION"
|
|
1067
1080
|
FUNCTIONS_NOT_AVAILABLE_EXCEPTION = "FUNCTIONS_NOT_AVAILABLE_EXCEPTION"
|
|
@@ -1107,12 +1120,15 @@ class MessageStatus(Enum):
|
|
|
1107
1120
|
`ASKING_AI`: Waiting for the LLM to respond to the user's question. * `PENDING_WAREHOUSE`:
|
|
1108
1121
|
Waiting for warehouse before the SQL query can start executing. * `EXECUTING_QUERY`: Executing a
|
|
1109
1122
|
generated SQL query. Get the SQL query result by calling
|
|
1110
|
-
[
|
|
1111
|
-
generation or query execution failed. See `error` field. * `COMPLETED`:
|
|
1112
|
-
completed. Results are in the `attachments` field. Get the SQL query
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1123
|
+
[getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) API. *
|
|
1124
|
+
`FAILED`: The response generation or query execution failed. See `error` field. * `COMPLETED`:
|
|
1125
|
+
Message processing is completed. Results are in the `attachments` field. Get the SQL query
|
|
1126
|
+
result by calling
|
|
1127
|
+
[getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) API. *
|
|
1128
|
+
`SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result is not available
|
|
1129
|
+
anymore. The user needs to rerun the query. Rerun the SQL query result by calling
|
|
1130
|
+
[executeMessageAttachmentQuery](:method:genie/executeMessageAttachmentQuery) API. * `CANCELLED`:
|
|
1131
|
+
Message has been cancelled."""
|
|
1116
1132
|
|
|
1117
1133
|
ASKING_AI = "ASKING_AI"
|
|
1118
1134
|
CANCELLED = "CANCELLED"
|
|
@@ -1917,7 +1933,8 @@ class GenieAPI:
|
|
|
1917
1933
|
) -> GenieGetMessageQueryResultResponse:
|
|
1918
1934
|
"""Execute message attachment SQL query.
|
|
1919
1935
|
|
|
1920
|
-
Execute the SQL for a message query attachment.
|
|
1936
|
+
Execute the SQL for a message query attachment. Use this API when the query attachment has expired and
|
|
1937
|
+
needs to be re-executed.
|
|
1921
1938
|
|
|
1922
1939
|
:param space_id: str
|
|
1923
1940
|
Genie space ID
|
|
@@ -1945,7 +1962,7 @@ class GenieAPI:
|
|
|
1945
1962
|
def execute_message_query(
|
|
1946
1963
|
self, space_id: str, conversation_id: str, message_id: str
|
|
1947
1964
|
) -> GenieGetMessageQueryResultResponse:
|
|
1948
|
-
"""Execute SQL query in a conversation message.
|
|
1965
|
+
"""[Deprecated] Execute SQL query in a conversation message.
|
|
1949
1966
|
|
|
1950
1967
|
Execute the SQL query in the message.
|
|
1951
1968
|
|
|
@@ -2059,7 +2076,7 @@ class GenieAPI:
|
|
|
2059
2076
|
def get_message_query_result_by_attachment(
|
|
2060
2077
|
self, space_id: str, conversation_id: str, message_id: str, attachment_id: str
|
|
2061
2078
|
) -> GenieGetMessageQueryResultResponse:
|
|
2062
|
-
"""[
|
|
2079
|
+
"""[Deprecated] Get conversation message SQL query result.
|
|
2063
2080
|
|
|
2064
2081
|
Get the result of SQL query if the message has a query attachment. This is only available if a message
|
|
2065
2082
|
has a query attachment and the message status is `EXECUTING_QUERY` OR `COMPLETED`.
|
|
@@ -2088,9 +2105,9 @@ class GenieAPI:
|
|
|
2088
2105
|
return GenieGetMessageQueryResultResponse.from_dict(res)
|
|
2089
2106
|
|
|
2090
2107
|
def get_space(self, space_id: str) -> GenieSpace:
|
|
2091
|
-
"""Get
|
|
2108
|
+
"""Get Genie Space.
|
|
2092
2109
|
|
|
2093
|
-
Get a Genie Space.
|
|
2110
|
+
Get details of a Genie Space.
|
|
2094
2111
|
|
|
2095
2112
|
:param space_id: str
|
|
2096
2113
|
The ID associated with the Genie space
|
databricks/sdk/service/files.py
CHANGED
|
@@ -314,12 +314,14 @@ class DirectoryEntry:
|
|
|
314
314
|
@dataclass
|
|
315
315
|
class DownloadResponse:
|
|
316
316
|
content_length: Optional[int] = None
|
|
317
|
+
"""The length of the HTTP response body in bytes."""
|
|
317
318
|
|
|
318
319
|
content_type: Optional[str] = None
|
|
319
320
|
|
|
320
321
|
contents: Optional[BinaryIO] = None
|
|
321
322
|
|
|
322
323
|
last_modified: Optional[str] = None
|
|
324
|
+
"""The last modified time of the file in HTTP-date (RFC 7231) format."""
|
|
323
325
|
|
|
324
326
|
def as_dict(self) -> dict:
|
|
325
327
|
"""Serializes the DownloadResponse into a dictionary suitable for use as a JSON request body."""
|
|
@@ -430,10 +432,12 @@ class GetDirectoryMetadataResponse:
|
|
|
430
432
|
@dataclass
|
|
431
433
|
class GetMetadataResponse:
|
|
432
434
|
content_length: Optional[int] = None
|
|
435
|
+
"""The length of the HTTP response body in bytes."""
|
|
433
436
|
|
|
434
437
|
content_type: Optional[str] = None
|
|
435
438
|
|
|
436
439
|
last_modified: Optional[str] = None
|
|
440
|
+
"""The last modified time of the file in HTTP-date (RFC 7231) format."""
|
|
437
441
|
|
|
438
442
|
def as_dict(self) -> dict:
|
|
439
443
|
"""Serializes the GetMetadataResponse into a dictionary suitable for use as a JSON request body."""
|
databricks/sdk/service/ml.py
CHANGED
|
@@ -3,11 +3,15 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import logging
|
|
6
|
+
import random
|
|
7
|
+
import time
|
|
6
8
|
from dataclasses import dataclass
|
|
9
|
+
from datetime import timedelta
|
|
7
10
|
from enum import Enum
|
|
8
|
-
from typing import Any, Dict, Iterator, List, Optional
|
|
11
|
+
from typing import Any, Callable, Dict, Iterator, List, Optional
|
|
9
12
|
|
|
10
|
-
from
|
|
13
|
+
from ..errors import OperationFailed
|
|
14
|
+
from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum
|
|
11
15
|
|
|
12
16
|
_LOG = logging.getLogger("databricks.sdk")
|
|
13
17
|
|
|
@@ -482,6 +486,197 @@ class CreateExperimentResponse:
|
|
|
482
486
|
return cls(experiment_id=d.get("experiment_id", None))
|
|
483
487
|
|
|
484
488
|
|
|
489
|
+
@dataclass
|
|
490
|
+
class CreateForecastingExperimentRequest:
|
|
491
|
+
train_data_path: str
|
|
492
|
+
"""The three-level (fully qualified) name of a unity catalog table. This table serves as the
|
|
493
|
+
training data for the forecasting model."""
|
|
494
|
+
|
|
495
|
+
target_column: str
|
|
496
|
+
"""Name of the column in the input training table that serves as the prediction target. The values
|
|
497
|
+
in this column will be used as the ground truth for model training."""
|
|
498
|
+
|
|
499
|
+
time_column: str
|
|
500
|
+
"""Name of the column in the input training table that represents the timestamp of each row."""
|
|
501
|
+
|
|
502
|
+
data_granularity_unit: str
|
|
503
|
+
"""The time unit of the input data granularity. Together with data_granularity_quantity field, this
|
|
504
|
+
defines the time interval between consecutive rows in the time series data. Possible values: *
|
|
505
|
+
'W' (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' /
|
|
506
|
+
'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' /
|
|
507
|
+
'quarter' / 'quarters' * 'Y' / 'year' / 'years'"""
|
|
508
|
+
|
|
509
|
+
forecast_horizon: int
|
|
510
|
+
"""The number of time steps into the future for which predictions should be made. This value
|
|
511
|
+
represents a multiple of data_granularity_unit and data_granularity_quantity determining how far
|
|
512
|
+
ahead the model will forecast."""
|
|
513
|
+
|
|
514
|
+
custom_weights_column: Optional[str] = None
|
|
515
|
+
"""Name of the column in the input training table used to customize the weight for each time series
|
|
516
|
+
to calculate weighted metrics."""
|
|
517
|
+
|
|
518
|
+
data_granularity_quantity: Optional[int] = None
|
|
519
|
+
"""The quantity of the input data granularity. Together with data_granularity_unit field, this
|
|
520
|
+
defines the time interval between consecutive rows in the time series data. For now, only 1
|
|
521
|
+
second, 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported."""
|
|
522
|
+
|
|
523
|
+
experiment_path: Optional[str] = None
|
|
524
|
+
"""The path to the created experiment. This is the path where the experiment will be stored in the
|
|
525
|
+
workspace."""
|
|
526
|
+
|
|
527
|
+
holiday_regions: Optional[List[str]] = None
|
|
528
|
+
"""Region code(s) to consider when automatically adding holiday features. When empty, no holiday
|
|
529
|
+
features are added. Only supports 1 holiday region for now."""
|
|
530
|
+
|
|
531
|
+
max_runtime: Optional[int] = None
|
|
532
|
+
"""The maximum duration in minutes for which the experiment is allowed to run. If the experiment
|
|
533
|
+
exceeds this time limit it will be stopped automatically."""
|
|
534
|
+
|
|
535
|
+
prediction_data_path: Optional[str] = None
|
|
536
|
+
"""The three-level (fully qualified) path to a unity catalog table. This table path serves to store
|
|
537
|
+
the predictions."""
|
|
538
|
+
|
|
539
|
+
primary_metric: Optional[str] = None
|
|
540
|
+
"""The evaluation metric used to optimize the forecasting model."""
|
|
541
|
+
|
|
542
|
+
register_to: Optional[str] = None
|
|
543
|
+
"""The three-level (fully qualified) path to a unity catalog model. This model path serves to store
|
|
544
|
+
the best model."""
|
|
545
|
+
|
|
546
|
+
split_column: Optional[str] = None
|
|
547
|
+
"""Name of the column in the input training table used for custom data splits. The values in this
|
|
548
|
+
column must be "train", "validate", or "test" to indicate which split each row belongs to."""
|
|
549
|
+
|
|
550
|
+
timeseries_identifier_columns: Optional[List[str]] = None
|
|
551
|
+
"""Name of the column in the input training table used to group the dataset to predict individual
|
|
552
|
+
time series"""
|
|
553
|
+
|
|
554
|
+
training_frameworks: Optional[List[str]] = None
|
|
555
|
+
"""The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA',
|
|
556
|
+
'DeepAR'. An empty list will include all supported frameworks."""
|
|
557
|
+
|
|
558
|
+
def as_dict(self) -> dict:
|
|
559
|
+
"""Serializes the CreateForecastingExperimentRequest into a dictionary suitable for use as a JSON request body."""
|
|
560
|
+
body = {}
|
|
561
|
+
if self.custom_weights_column is not None:
|
|
562
|
+
body["custom_weights_column"] = self.custom_weights_column
|
|
563
|
+
if self.data_granularity_quantity is not None:
|
|
564
|
+
body["data_granularity_quantity"] = self.data_granularity_quantity
|
|
565
|
+
if self.data_granularity_unit is not None:
|
|
566
|
+
body["data_granularity_unit"] = self.data_granularity_unit
|
|
567
|
+
if self.experiment_path is not None:
|
|
568
|
+
body["experiment_path"] = self.experiment_path
|
|
569
|
+
if self.forecast_horizon is not None:
|
|
570
|
+
body["forecast_horizon"] = self.forecast_horizon
|
|
571
|
+
if self.holiday_regions:
|
|
572
|
+
body["holiday_regions"] = [v for v in self.holiday_regions]
|
|
573
|
+
if self.max_runtime is not None:
|
|
574
|
+
body["max_runtime"] = self.max_runtime
|
|
575
|
+
if self.prediction_data_path is not None:
|
|
576
|
+
body["prediction_data_path"] = self.prediction_data_path
|
|
577
|
+
if self.primary_metric is not None:
|
|
578
|
+
body["primary_metric"] = self.primary_metric
|
|
579
|
+
if self.register_to is not None:
|
|
580
|
+
body["register_to"] = self.register_to
|
|
581
|
+
if self.split_column is not None:
|
|
582
|
+
body["split_column"] = self.split_column
|
|
583
|
+
if self.target_column is not None:
|
|
584
|
+
body["target_column"] = self.target_column
|
|
585
|
+
if self.time_column is not None:
|
|
586
|
+
body["time_column"] = self.time_column
|
|
587
|
+
if self.timeseries_identifier_columns:
|
|
588
|
+
body["timeseries_identifier_columns"] = [v for v in self.timeseries_identifier_columns]
|
|
589
|
+
if self.train_data_path is not None:
|
|
590
|
+
body["train_data_path"] = self.train_data_path
|
|
591
|
+
if self.training_frameworks:
|
|
592
|
+
body["training_frameworks"] = [v for v in self.training_frameworks]
|
|
593
|
+
return body
|
|
594
|
+
|
|
595
|
+
def as_shallow_dict(self) -> dict:
|
|
596
|
+
"""Serializes the CreateForecastingExperimentRequest into a shallow dictionary of its immediate attributes."""
|
|
597
|
+
body = {}
|
|
598
|
+
if self.custom_weights_column is not None:
|
|
599
|
+
body["custom_weights_column"] = self.custom_weights_column
|
|
600
|
+
if self.data_granularity_quantity is not None:
|
|
601
|
+
body["data_granularity_quantity"] = self.data_granularity_quantity
|
|
602
|
+
if self.data_granularity_unit is not None:
|
|
603
|
+
body["data_granularity_unit"] = self.data_granularity_unit
|
|
604
|
+
if self.experiment_path is not None:
|
|
605
|
+
body["experiment_path"] = self.experiment_path
|
|
606
|
+
if self.forecast_horizon is not None:
|
|
607
|
+
body["forecast_horizon"] = self.forecast_horizon
|
|
608
|
+
if self.holiday_regions:
|
|
609
|
+
body["holiday_regions"] = self.holiday_regions
|
|
610
|
+
if self.max_runtime is not None:
|
|
611
|
+
body["max_runtime"] = self.max_runtime
|
|
612
|
+
if self.prediction_data_path is not None:
|
|
613
|
+
body["prediction_data_path"] = self.prediction_data_path
|
|
614
|
+
if self.primary_metric is not None:
|
|
615
|
+
body["primary_metric"] = self.primary_metric
|
|
616
|
+
if self.register_to is not None:
|
|
617
|
+
body["register_to"] = self.register_to
|
|
618
|
+
if self.split_column is not None:
|
|
619
|
+
body["split_column"] = self.split_column
|
|
620
|
+
if self.target_column is not None:
|
|
621
|
+
body["target_column"] = self.target_column
|
|
622
|
+
if self.time_column is not None:
|
|
623
|
+
body["time_column"] = self.time_column
|
|
624
|
+
if self.timeseries_identifier_columns:
|
|
625
|
+
body["timeseries_identifier_columns"] = self.timeseries_identifier_columns
|
|
626
|
+
if self.train_data_path is not None:
|
|
627
|
+
body["train_data_path"] = self.train_data_path
|
|
628
|
+
if self.training_frameworks:
|
|
629
|
+
body["training_frameworks"] = self.training_frameworks
|
|
630
|
+
return body
|
|
631
|
+
|
|
632
|
+
@classmethod
|
|
633
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateForecastingExperimentRequest:
|
|
634
|
+
"""Deserializes the CreateForecastingExperimentRequest from a dictionary."""
|
|
635
|
+
return cls(
|
|
636
|
+
custom_weights_column=d.get("custom_weights_column", None),
|
|
637
|
+
data_granularity_quantity=d.get("data_granularity_quantity", None),
|
|
638
|
+
data_granularity_unit=d.get("data_granularity_unit", None),
|
|
639
|
+
experiment_path=d.get("experiment_path", None),
|
|
640
|
+
forecast_horizon=d.get("forecast_horizon", None),
|
|
641
|
+
holiday_regions=d.get("holiday_regions", None),
|
|
642
|
+
max_runtime=d.get("max_runtime", None),
|
|
643
|
+
prediction_data_path=d.get("prediction_data_path", None),
|
|
644
|
+
primary_metric=d.get("primary_metric", None),
|
|
645
|
+
register_to=d.get("register_to", None),
|
|
646
|
+
split_column=d.get("split_column", None),
|
|
647
|
+
target_column=d.get("target_column", None),
|
|
648
|
+
time_column=d.get("time_column", None),
|
|
649
|
+
timeseries_identifier_columns=d.get("timeseries_identifier_columns", None),
|
|
650
|
+
train_data_path=d.get("train_data_path", None),
|
|
651
|
+
training_frameworks=d.get("training_frameworks", None),
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
@dataclass
|
|
656
|
+
class CreateForecastingExperimentResponse:
|
|
657
|
+
experiment_id: Optional[str] = None
|
|
658
|
+
"""The unique ID of the created forecasting experiment"""
|
|
659
|
+
|
|
660
|
+
def as_dict(self) -> dict:
|
|
661
|
+
"""Serializes the CreateForecastingExperimentResponse into a dictionary suitable for use as a JSON request body."""
|
|
662
|
+
body = {}
|
|
663
|
+
if self.experiment_id is not None:
|
|
664
|
+
body["experiment_id"] = self.experiment_id
|
|
665
|
+
return body
|
|
666
|
+
|
|
667
|
+
def as_shallow_dict(self) -> dict:
|
|
668
|
+
"""Serializes the CreateForecastingExperimentResponse into a shallow dictionary of its immediate attributes."""
|
|
669
|
+
body = {}
|
|
670
|
+
if self.experiment_id is not None:
|
|
671
|
+
body["experiment_id"] = self.experiment_id
|
|
672
|
+
return body
|
|
673
|
+
|
|
674
|
+
@classmethod
|
|
675
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateForecastingExperimentResponse:
|
|
676
|
+
"""Deserializes the CreateForecastingExperimentResponse from a dictionary."""
|
|
677
|
+
return cls(experiment_id=d.get("experiment_id", None))
|
|
678
|
+
|
|
679
|
+
|
|
485
680
|
@dataclass
|
|
486
681
|
class CreateModelRequest:
|
|
487
682
|
name: str
|
|
@@ -1800,6 +1995,60 @@ class FileInfo:
|
|
|
1800
1995
|
return cls(file_size=d.get("file_size", None), is_dir=d.get("is_dir", None), path=d.get("path", None))
|
|
1801
1996
|
|
|
1802
1997
|
|
|
1998
|
+
@dataclass
|
|
1999
|
+
class ForecastingExperiment:
|
|
2000
|
+
"""Represents a forecasting experiment with its unique identifier, URL, and state."""
|
|
2001
|
+
|
|
2002
|
+
experiment_id: Optional[str] = None
|
|
2003
|
+
"""The unique ID for the forecasting experiment."""
|
|
2004
|
+
|
|
2005
|
+
experiment_page_url: Optional[str] = None
|
|
2006
|
+
"""The URL to the forecasting experiment page."""
|
|
2007
|
+
|
|
2008
|
+
state: Optional[ForecastingExperimentState] = None
|
|
2009
|
+
"""The current state of the forecasting experiment."""
|
|
2010
|
+
|
|
2011
|
+
def as_dict(self) -> dict:
|
|
2012
|
+
"""Serializes the ForecastingExperiment into a dictionary suitable for use as a JSON request body."""
|
|
2013
|
+
body = {}
|
|
2014
|
+
if self.experiment_id is not None:
|
|
2015
|
+
body["experiment_id"] = self.experiment_id
|
|
2016
|
+
if self.experiment_page_url is not None:
|
|
2017
|
+
body["experiment_page_url"] = self.experiment_page_url
|
|
2018
|
+
if self.state is not None:
|
|
2019
|
+
body["state"] = self.state.value
|
|
2020
|
+
return body
|
|
2021
|
+
|
|
2022
|
+
def as_shallow_dict(self) -> dict:
|
|
2023
|
+
"""Serializes the ForecastingExperiment into a shallow dictionary of its immediate attributes."""
|
|
2024
|
+
body = {}
|
|
2025
|
+
if self.experiment_id is not None:
|
|
2026
|
+
body["experiment_id"] = self.experiment_id
|
|
2027
|
+
if self.experiment_page_url is not None:
|
|
2028
|
+
body["experiment_page_url"] = self.experiment_page_url
|
|
2029
|
+
if self.state is not None:
|
|
2030
|
+
body["state"] = self.state
|
|
2031
|
+
return body
|
|
2032
|
+
|
|
2033
|
+
@classmethod
|
|
2034
|
+
def from_dict(cls, d: Dict[str, Any]) -> ForecastingExperiment:
|
|
2035
|
+
"""Deserializes the ForecastingExperiment from a dictionary."""
|
|
2036
|
+
return cls(
|
|
2037
|
+
experiment_id=d.get("experiment_id", None),
|
|
2038
|
+
experiment_page_url=d.get("experiment_page_url", None),
|
|
2039
|
+
state=_enum(d, "state", ForecastingExperimentState),
|
|
2040
|
+
)
|
|
2041
|
+
|
|
2042
|
+
|
|
2043
|
+
class ForecastingExperimentState(Enum):
|
|
2044
|
+
|
|
2045
|
+
CANCELLED = "CANCELLED"
|
|
2046
|
+
FAILED = "FAILED"
|
|
2047
|
+
PENDING = "PENDING"
|
|
2048
|
+
RUNNING = "RUNNING"
|
|
2049
|
+
SUCCEEDED = "SUCCEEDED"
|
|
2050
|
+
|
|
2051
|
+
|
|
1803
2052
|
@dataclass
|
|
1804
2053
|
class GetExperimentByNameResponse:
|
|
1805
2054
|
experiment: Optional[Experiment] = None
|
|
@@ -6705,6 +6954,231 @@ class ExperimentsAPI:
|
|
|
6705
6954
|
return UpdateRunResponse.from_dict(res)
|
|
6706
6955
|
|
|
6707
6956
|
|
|
6957
|
+
class ForecastingAPI:
|
|
6958
|
+
"""The Forecasting API allows you to create and get serverless forecasting experiments"""
|
|
6959
|
+
|
|
6960
|
+
def __init__(self, api_client):
|
|
6961
|
+
self._api = api_client
|
|
6962
|
+
|
|
6963
|
+
def wait_get_experiment_forecasting_succeeded(
|
|
6964
|
+
self,
|
|
6965
|
+
experiment_id: str,
|
|
6966
|
+
timeout=timedelta(minutes=120),
|
|
6967
|
+
callback: Optional[Callable[[ForecastingExperiment], None]] = None,
|
|
6968
|
+
) -> ForecastingExperiment:
|
|
6969
|
+
deadline = time.time() + timeout.total_seconds()
|
|
6970
|
+
target_states = (ForecastingExperimentState.SUCCEEDED,)
|
|
6971
|
+
failure_states = (
|
|
6972
|
+
ForecastingExperimentState.FAILED,
|
|
6973
|
+
ForecastingExperimentState.CANCELLED,
|
|
6974
|
+
)
|
|
6975
|
+
status_message = "polling..."
|
|
6976
|
+
attempt = 1
|
|
6977
|
+
while time.time() < deadline:
|
|
6978
|
+
poll = self.get_experiment(experiment_id=experiment_id)
|
|
6979
|
+
status = poll.state
|
|
6980
|
+
status_message = f"current status: {status}"
|
|
6981
|
+
if status in target_states:
|
|
6982
|
+
return poll
|
|
6983
|
+
if callback:
|
|
6984
|
+
callback(poll)
|
|
6985
|
+
if status in failure_states:
|
|
6986
|
+
msg = f"failed to reach SUCCEEDED, got {status}: {status_message}"
|
|
6987
|
+
raise OperationFailed(msg)
|
|
6988
|
+
prefix = f"experiment_id={experiment_id}"
|
|
6989
|
+
sleep = attempt
|
|
6990
|
+
if sleep > 10:
|
|
6991
|
+
# sleep 10s max per attempt
|
|
6992
|
+
sleep = 10
|
|
6993
|
+
_LOG.debug(f"{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)")
|
|
6994
|
+
time.sleep(sleep + random.random())
|
|
6995
|
+
attempt += 1
|
|
6996
|
+
raise TimeoutError(f"timed out after {timeout}: {status_message}")
|
|
6997
|
+
|
|
6998
|
+
def create_experiment(
|
|
6999
|
+
self,
|
|
7000
|
+
train_data_path: str,
|
|
7001
|
+
target_column: str,
|
|
7002
|
+
time_column: str,
|
|
7003
|
+
data_granularity_unit: str,
|
|
7004
|
+
forecast_horizon: int,
|
|
7005
|
+
*,
|
|
7006
|
+
custom_weights_column: Optional[str] = None,
|
|
7007
|
+
data_granularity_quantity: Optional[int] = None,
|
|
7008
|
+
experiment_path: Optional[str] = None,
|
|
7009
|
+
holiday_regions: Optional[List[str]] = None,
|
|
7010
|
+
max_runtime: Optional[int] = None,
|
|
7011
|
+
prediction_data_path: Optional[str] = None,
|
|
7012
|
+
primary_metric: Optional[str] = None,
|
|
7013
|
+
register_to: Optional[str] = None,
|
|
7014
|
+
split_column: Optional[str] = None,
|
|
7015
|
+
timeseries_identifier_columns: Optional[List[str]] = None,
|
|
7016
|
+
training_frameworks: Optional[List[str]] = None,
|
|
7017
|
+
) -> Wait[ForecastingExperiment]:
|
|
7018
|
+
"""Create a forecasting experiment.
|
|
7019
|
+
|
|
7020
|
+
Creates a serverless forecasting experiment. Returns the experiment ID.
|
|
7021
|
+
|
|
7022
|
+
:param train_data_path: str
|
|
7023
|
+
The three-level (fully qualified) name of a unity catalog table. This table serves as the training
|
|
7024
|
+
data for the forecasting model.
|
|
7025
|
+
:param target_column: str
|
|
7026
|
+
Name of the column in the input training table that serves as the prediction target. The values in
|
|
7027
|
+
this column will be used as the ground truth for model training.
|
|
7028
|
+
:param time_column: str
|
|
7029
|
+
Name of the column in the input training table that represents the timestamp of each row.
|
|
7030
|
+
:param data_granularity_unit: str
|
|
7031
|
+
The time unit of the input data granularity. Together with data_granularity_quantity field, this
|
|
7032
|
+
defines the time interval between consecutive rows in the time series data. Possible values: * 'W'
|
|
7033
|
+
(weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes'
|
|
7034
|
+
/ 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' / 'quarter' / 'quarters'
|
|
7035
|
+
* 'Y' / 'year' / 'years'
|
|
7036
|
+
:param forecast_horizon: int
|
|
7037
|
+
The number of time steps into the future for which predictions should be made. This value represents
|
|
7038
|
+
a multiple of data_granularity_unit and data_granularity_quantity determining how far ahead the
|
|
7039
|
+
model will forecast.
|
|
7040
|
+
:param custom_weights_column: str (optional)
|
|
7041
|
+
Name of the column in the input training table used to customize the weight for each time series to
|
|
7042
|
+
calculate weighted metrics.
|
|
7043
|
+
:param data_granularity_quantity: int (optional)
|
|
7044
|
+
The quantity of the input data granularity. Together with data_granularity_unit field, this defines
|
|
7045
|
+
the time interval between consecutive rows in the time series data. For now, only 1 second,
|
|
7046
|
+
1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported.
|
|
7047
|
+
:param experiment_path: str (optional)
|
|
7048
|
+
The path to the created experiment. This is the path where the experiment will be stored in the
|
|
7049
|
+
workspace.
|
|
7050
|
+
:param holiday_regions: List[str] (optional)
|
|
7051
|
+
Region code(s) to consider when automatically adding holiday features. When empty, no holiday
|
|
7052
|
+
features are added. Only supports 1 holiday region for now.
|
|
7053
|
+
:param max_runtime: int (optional)
|
|
7054
|
+
The maximum duration in minutes for which the experiment is allowed to run. If the experiment
|
|
7055
|
+
exceeds this time limit it will be stopped automatically.
|
|
7056
|
+
:param prediction_data_path: str (optional)
|
|
7057
|
+
The three-level (fully qualified) path to a unity catalog table. This table path serves to store the
|
|
7058
|
+
predictions.
|
|
7059
|
+
:param primary_metric: str (optional)
|
|
7060
|
+
The evaluation metric used to optimize the forecasting model.
|
|
7061
|
+
:param register_to: str (optional)
|
|
7062
|
+
The three-level (fully qualified) path to a unity catalog model. This model path serves to store the
|
|
7063
|
+
best model.
|
|
7064
|
+
:param split_column: str (optional)
|
|
7065
|
+
Name of the column in the input training table used for custom data splits. The values in this
|
|
7066
|
+
column must be "train", "validate", or "test" to indicate which split each row belongs to.
|
|
7067
|
+
:param timeseries_identifier_columns: List[str] (optional)
|
|
7068
|
+
Name of the column in the input training table used to group the dataset to predict individual time
|
|
7069
|
+
series
|
|
7070
|
+
:param training_frameworks: List[str] (optional)
|
|
7071
|
+
The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA', 'DeepAR'.
|
|
7072
|
+
An empty list will include all supported frameworks.
|
|
7073
|
+
|
|
7074
|
+
:returns:
|
|
7075
|
+
Long-running operation waiter for :class:`ForecastingExperiment`.
|
|
7076
|
+
See :method:wait_get_experiment_forecasting_succeeded for more details.
|
|
7077
|
+
"""
|
|
7078
|
+
body = {}
|
|
7079
|
+
if custom_weights_column is not None:
|
|
7080
|
+
body["custom_weights_column"] = custom_weights_column
|
|
7081
|
+
if data_granularity_quantity is not None:
|
|
7082
|
+
body["data_granularity_quantity"] = data_granularity_quantity
|
|
7083
|
+
if data_granularity_unit is not None:
|
|
7084
|
+
body["data_granularity_unit"] = data_granularity_unit
|
|
7085
|
+
if experiment_path is not None:
|
|
7086
|
+
body["experiment_path"] = experiment_path
|
|
7087
|
+
if forecast_horizon is not None:
|
|
7088
|
+
body["forecast_horizon"] = forecast_horizon
|
|
7089
|
+
if holiday_regions is not None:
|
|
7090
|
+
body["holiday_regions"] = [v for v in holiday_regions]
|
|
7091
|
+
if max_runtime is not None:
|
|
7092
|
+
body["max_runtime"] = max_runtime
|
|
7093
|
+
if prediction_data_path is not None:
|
|
7094
|
+
body["prediction_data_path"] = prediction_data_path
|
|
7095
|
+
if primary_metric is not None:
|
|
7096
|
+
body["primary_metric"] = primary_metric
|
|
7097
|
+
if register_to is not None:
|
|
7098
|
+
body["register_to"] = register_to
|
|
7099
|
+
if split_column is not None:
|
|
7100
|
+
body["split_column"] = split_column
|
|
7101
|
+
if target_column is not None:
|
|
7102
|
+
body["target_column"] = target_column
|
|
7103
|
+
if time_column is not None:
|
|
7104
|
+
body["time_column"] = time_column
|
|
7105
|
+
if timeseries_identifier_columns is not None:
|
|
7106
|
+
body["timeseries_identifier_columns"] = [v for v in timeseries_identifier_columns]
|
|
7107
|
+
if train_data_path is not None:
|
|
7108
|
+
body["train_data_path"] = train_data_path
|
|
7109
|
+
if training_frameworks is not None:
|
|
7110
|
+
body["training_frameworks"] = [v for v in training_frameworks]
|
|
7111
|
+
headers = {
|
|
7112
|
+
"Accept": "application/json",
|
|
7113
|
+
"Content-Type": "application/json",
|
|
7114
|
+
}
|
|
7115
|
+
|
|
7116
|
+
op_response = self._api.do("POST", "/api/2.0/automl/create-forecasting-experiment", body=body, headers=headers)
|
|
7117
|
+
return Wait(
|
|
7118
|
+
self.wait_get_experiment_forecasting_succeeded,
|
|
7119
|
+
response=CreateForecastingExperimentResponse.from_dict(op_response),
|
|
7120
|
+
experiment_id=op_response["experiment_id"],
|
|
7121
|
+
)
|
|
7122
|
+
|
|
7123
|
+
def create_experiment_and_wait(
|
|
7124
|
+
self,
|
|
7125
|
+
train_data_path: str,
|
|
7126
|
+
target_column: str,
|
|
7127
|
+
time_column: str,
|
|
7128
|
+
data_granularity_unit: str,
|
|
7129
|
+
forecast_horizon: int,
|
|
7130
|
+
*,
|
|
7131
|
+
custom_weights_column: Optional[str] = None,
|
|
7132
|
+
data_granularity_quantity: Optional[int] = None,
|
|
7133
|
+
experiment_path: Optional[str] = None,
|
|
7134
|
+
holiday_regions: Optional[List[str]] = None,
|
|
7135
|
+
max_runtime: Optional[int] = None,
|
|
7136
|
+
prediction_data_path: Optional[str] = None,
|
|
7137
|
+
primary_metric: Optional[str] = None,
|
|
7138
|
+
register_to: Optional[str] = None,
|
|
7139
|
+
split_column: Optional[str] = None,
|
|
7140
|
+
timeseries_identifier_columns: Optional[List[str]] = None,
|
|
7141
|
+
training_frameworks: Optional[List[str]] = None,
|
|
7142
|
+
timeout=timedelta(minutes=120),
|
|
7143
|
+
) -> ForecastingExperiment:
|
|
7144
|
+
return self.create_experiment(
|
|
7145
|
+
custom_weights_column=custom_weights_column,
|
|
7146
|
+
data_granularity_quantity=data_granularity_quantity,
|
|
7147
|
+
data_granularity_unit=data_granularity_unit,
|
|
7148
|
+
experiment_path=experiment_path,
|
|
7149
|
+
forecast_horizon=forecast_horizon,
|
|
7150
|
+
holiday_regions=holiday_regions,
|
|
7151
|
+
max_runtime=max_runtime,
|
|
7152
|
+
prediction_data_path=prediction_data_path,
|
|
7153
|
+
primary_metric=primary_metric,
|
|
7154
|
+
register_to=register_to,
|
|
7155
|
+
split_column=split_column,
|
|
7156
|
+
target_column=target_column,
|
|
7157
|
+
time_column=time_column,
|
|
7158
|
+
timeseries_identifier_columns=timeseries_identifier_columns,
|
|
7159
|
+
train_data_path=train_data_path,
|
|
7160
|
+
training_frameworks=training_frameworks,
|
|
7161
|
+
).result(timeout=timeout)
|
|
7162
|
+
|
|
7163
|
+
def get_experiment(self, experiment_id: str) -> ForecastingExperiment:
|
|
7164
|
+
"""Get a forecasting experiment.
|
|
7165
|
+
|
|
7166
|
+
Public RPC to get forecasting experiment
|
|
7167
|
+
|
|
7168
|
+
:param experiment_id: str
|
|
7169
|
+
The unique ID of a forecasting experiment
|
|
7170
|
+
|
|
7171
|
+
:returns: :class:`ForecastingExperiment`
|
|
7172
|
+
"""
|
|
7173
|
+
|
|
7174
|
+
headers = {
|
|
7175
|
+
"Accept": "application/json",
|
|
7176
|
+
}
|
|
7177
|
+
|
|
7178
|
+
res = self._api.do("GET", f"/api/2.0/automl/get-forecasting-experiment/{experiment_id}", headers=headers)
|
|
7179
|
+
return ForecastingExperiment.from_dict(res)
|
|
7180
|
+
|
|
7181
|
+
|
|
6708
7182
|
class ModelRegistryAPI:
|
|
6709
7183
|
"""Note: This API reference documents APIs for the Workspace Model Registry. Databricks recommends using
|
|
6710
7184
|
[Models in Unity Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog provides
|
databricks/sdk/service/oauth2.py
CHANGED
|
@@ -776,13 +776,6 @@ class OidcFederationPolicy:
|
|
|
776
776
|
endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
|
|
777
777
|
discovering public keys."""
|
|
778
778
|
|
|
779
|
-
jwks_uri: Optional[str] = None
|
|
780
|
-
"""URL of the public keys used to validate the signature of federated tokens, in JWKS format. Most
|
|
781
|
-
use cases should not need to specify this field. If jwks_uri and jwks_json are both unspecified
|
|
782
|
-
(recommended), Databricks automatically fetches the public keys from your issuer’s well known
|
|
783
|
-
endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
|
|
784
|
-
discovering public keys."""
|
|
785
|
-
|
|
786
779
|
subject: Optional[str] = None
|
|
787
780
|
"""The required token subject, as specified in the subject claim of federated tokens. Must be
|
|
788
781
|
specified for service principal federation policies. Must not be specified for account
|
|
@@ -800,8 +793,6 @@ class OidcFederationPolicy:
|
|
|
800
793
|
body["issuer"] = self.issuer
|
|
801
794
|
if self.jwks_json is not None:
|
|
802
795
|
body["jwks_json"] = self.jwks_json
|
|
803
|
-
if self.jwks_uri is not None:
|
|
804
|
-
body["jwks_uri"] = self.jwks_uri
|
|
805
796
|
if self.subject is not None:
|
|
806
797
|
body["subject"] = self.subject
|
|
807
798
|
if self.subject_claim is not None:
|
|
@@ -817,8 +808,6 @@ class OidcFederationPolicy:
|
|
|
817
808
|
body["issuer"] = self.issuer
|
|
818
809
|
if self.jwks_json is not None:
|
|
819
810
|
body["jwks_json"] = self.jwks_json
|
|
820
|
-
if self.jwks_uri is not None:
|
|
821
|
-
body["jwks_uri"] = self.jwks_uri
|
|
822
811
|
if self.subject is not None:
|
|
823
812
|
body["subject"] = self.subject
|
|
824
813
|
if self.subject_claim is not None:
|
|
@@ -832,7 +821,6 @@ class OidcFederationPolicy:
|
|
|
832
821
|
audiences=d.get("audiences", None),
|
|
833
822
|
issuer=d.get("issuer", None),
|
|
834
823
|
jwks_json=d.get("jwks_json", None),
|
|
835
|
-
jwks_uri=d.get("jwks_uri", None),
|
|
836
824
|
subject=d.get("subject", None),
|
|
837
825
|
subject_claim=d.get("subject_claim", None),
|
|
838
826
|
)
|
|
@@ -4,6 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import logging
|
|
6
6
|
import random
|
|
7
|
+
import threading
|
|
7
8
|
import time
|
|
8
9
|
from dataclasses import dataclass
|
|
9
10
|
from datetime import timedelta
|
|
@@ -4657,12 +4658,31 @@ class ServingEndpointsDataPlaneAPI:
|
|
|
4657
4658
|
"""Serving endpoints DataPlane provides a set of operations to interact with data plane endpoints for Serving
|
|
4658
4659
|
endpoints service."""
|
|
4659
4660
|
|
|
4660
|
-
def __init__(self, api_client,
|
|
4661
|
+
def __init__(self, api_client, control_plane_service, dpts):
|
|
4661
4662
|
self._api = api_client
|
|
4662
|
-
self.
|
|
4663
|
-
|
|
4664
|
-
|
|
4665
|
-
self.
|
|
4663
|
+
self._lock = threading.Lock()
|
|
4664
|
+
self._control_plane_service = control_plane_service
|
|
4665
|
+
self._dpts = dpts
|
|
4666
|
+
self._data_plane_details = {}
|
|
4667
|
+
|
|
4668
|
+
def _data_plane_info_query(self, name: str) -> DataPlaneInfo:
|
|
4669
|
+
key = "query" + "/".join(
|
|
4670
|
+
[
|
|
4671
|
+
str(name),
|
|
4672
|
+
]
|
|
4673
|
+
)
|
|
4674
|
+
with self._lock:
|
|
4675
|
+
if key in self._data_plane_details:
|
|
4676
|
+
return self._data_plane_details[key]
|
|
4677
|
+
response = self._control_plane_service.get(
|
|
4678
|
+
name=name,
|
|
4679
|
+
)
|
|
4680
|
+
if response.data_plane_info is None:
|
|
4681
|
+
raise Exception("Resource does not support direct Data Plane access")
|
|
4682
|
+
result = response.data_plane_info.query_info
|
|
4683
|
+
with self._lock:
|
|
4684
|
+
self._data_plane_details[key] = result
|
|
4685
|
+
return result
|
|
4666
4686
|
|
|
4667
4687
|
def query(
|
|
4668
4688
|
self,
|
|
@@ -4757,22 +4777,10 @@ class ServingEndpointsDataPlaneAPI:
|
|
|
4757
4777
|
body["stream"] = stream
|
|
4758
4778
|
if temperature is not None:
|
|
4759
4779
|
body["temperature"] = temperature
|
|
4760
|
-
|
|
4761
|
-
|
|
4762
|
-
response = self._control_plane.get(
|
|
4763
|
-
name=name,
|
|
4764
|
-
)
|
|
4765
|
-
if response.data_plane_info is None:
|
|
4766
|
-
raise Exception("Resource does not support direct Data Plane access")
|
|
4767
|
-
return response.data_plane_info.query_info
|
|
4768
|
-
|
|
4769
|
-
get_params = [
|
|
4770
|
-
name,
|
|
4771
|
-
]
|
|
4772
|
-
data_plane_details = self._data_plane_service.get_data_plane_details(
|
|
4773
|
-
"query", get_params, info_getter, self._api.get_oauth_token
|
|
4780
|
+
data_plane_info = self._data_plane_info_query(
|
|
4781
|
+
name=name,
|
|
4774
4782
|
)
|
|
4775
|
-
token =
|
|
4783
|
+
token = self._dpts.token(data_plane_info.endpoint_url, data_plane_info.authorization_details)
|
|
4776
4784
|
|
|
4777
4785
|
def auth(r: requests.PreparedRequest) -> requests.PreparedRequest:
|
|
4778
4786
|
authorization = f"{token.token_type} {token.access_token}"
|
|
@@ -4788,7 +4796,7 @@ class ServingEndpointsDataPlaneAPI:
|
|
|
4788
4796
|
]
|
|
4789
4797
|
res = self._api.do(
|
|
4790
4798
|
"POST",
|
|
4791
|
-
url=
|
|
4799
|
+
url=data_plane_info.endpoint_url,
|
|
4792
4800
|
body=body,
|
|
4793
4801
|
headers=headers,
|
|
4794
4802
|
response_headers=response_headers,
|
databricks/sdk/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.46.0"
|
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
databricks/__init__.py,sha256=CF2MJcZFwbpn9TwQER8qnCDhkPooBGQNVkX4v7g6p3g,537
|
|
2
|
-
databricks/sdk/__init__.py,sha256=
|
|
2
|
+
databricks/sdk/__init__.py,sha256=qCmezvkTnXqC1wVdQpE2oyM-W-QfC0dYnTeWaSLhoOI,55541
|
|
3
3
|
databricks/sdk/_base_client.py,sha256=IMHtzC5BhWt-lBVjifewR1Ah5fegGDMv0__-O1hCxWI,15850
|
|
4
4
|
databricks/sdk/_property.py,sha256=ccbxhkXZmZOxbx2sqKMTzhVZDuvWXG0WPHFRgac6JAM,1701
|
|
5
5
|
databricks/sdk/azure.py,sha256=sN_ARpmP9h1JovtiHIsDLtrVQP_K11eNDDtHS6PD19k,1015
|
|
6
6
|
databricks/sdk/casing.py,sha256=gZy-FlI7og5WNVX88Vb_7S1WeInwJLGws80CGj_9s48,1137
|
|
7
7
|
databricks/sdk/clock.py,sha256=Ivlow0r_TkXcTJ8UXkxSA0czKrY0GvwHAeOvjPkJnAQ,1360
|
|
8
|
-
databricks/sdk/config.py,sha256=
|
|
8
|
+
databricks/sdk/config.py,sha256=xBSaO4CasFR_Bd0nNMwYicyAouvMl5BtcIRv2jC7HCM,22956
|
|
9
9
|
databricks/sdk/core.py,sha256=6lsRl6BL3pLgqMMVFrOnQsx-RxxaJJL_Gt2jJfWUovs,3724
|
|
10
|
-
databricks/sdk/credentials_provider.py,sha256=
|
|
11
|
-
databricks/sdk/data_plane.py,sha256=
|
|
10
|
+
databricks/sdk/credentials_provider.py,sha256=sFqCsVOe9-pKSTzkZELnIgeOalnM4h6EGw8TN3yKUeQ,37872
|
|
11
|
+
databricks/sdk/data_plane.py,sha256=br5IPnOdE611IBubxP8xkUR9_qzbSRSYyVWSua6znWs,3109
|
|
12
12
|
databricks/sdk/dbutils.py,sha256=PoDIwNAYGZhVZC7krox7tsudUDNVSk0gsFjFWlKJXVk,15753
|
|
13
13
|
databricks/sdk/environments.py,sha256=9eVeb68cksqY2Lqwth2PJNmK0JEGdIjh-ebrrmUbqCc,3963
|
|
14
|
-
databricks/sdk/oauth.py,sha256=
|
|
14
|
+
databricks/sdk/oauth.py,sha256=rXvPGPSXmm2ZL0HkmrvkdCaSD-uM02jTbx58L1fm-J0,28186
|
|
15
15
|
databricks/sdk/py.typed,sha256=pSvaHpbY1UPNEXyVFUjlgBhjPFZMmVC_UNrPC7eMOHI,74
|
|
16
16
|
databricks/sdk/retries.py,sha256=7k2kEexGqGKXHNAWHbPFSZSugU8UIU0qtyly_hix22Q,2581
|
|
17
17
|
databricks/sdk/useragent.py,sha256=boEgzTv-Zmo6boipZKjSopNy0CXg4GShC1_lTKpJgqs,7361
|
|
18
|
-
databricks/sdk/version.py,sha256=
|
|
18
|
+
databricks/sdk/version.py,sha256=VTDTPpzZ6KwzjCPZhHgalFxPlpuy4ZGRkHB_n3WGwYs,23
|
|
19
19
|
databricks/sdk/_widgets/__init__.py,sha256=VhI-VvLlr3rKUT1nbROslHJIbmZX_tPJ9rRhrdFsYUA,2811
|
|
20
20
|
databricks/sdk/_widgets/default_widgets_utils.py,sha256=_hwCbptLbRzWEmknco0H1wQNAYcuy2pjFO9NiRbvFeo,1127
|
|
21
21
|
databricks/sdk/_widgets/ipywidgets_utils.py,sha256=mg3rEPG9z76e0yVjGgcLybUvd_zSuN5ziGeKiZ-c8Ew,2927
|
|
@@ -46,25 +46,25 @@ databricks/sdk/service/apps.py,sha256=NdPhpycOpsYlXlBB5JTvt-AmiaCxIqFKMz7370CKKQ
|
|
|
46
46
|
databricks/sdk/service/billing.py,sha256=s-QN57TyMMfJSA40CfIjjno8iWHBssjpxcy0xcEcwyw,98590
|
|
47
47
|
databricks/sdk/service/catalog.py,sha256=eu1zLzoeQxirFPMbthS5d5Mt0EjdbuqYP4sESotNwHc,602387
|
|
48
48
|
databricks/sdk/service/cleanrooms.py,sha256=TON2V0-T_TOk31K9bUypuRJaAJhHMM0bQDbMjJWlCDQ,58613
|
|
49
|
-
databricks/sdk/service/compute.py,sha256=
|
|
50
|
-
databricks/sdk/service/dashboards.py,sha256=
|
|
51
|
-
databricks/sdk/service/files.py,sha256=
|
|
49
|
+
databricks/sdk/service/compute.py,sha256=R8GilWUVYVljZHfdNuiP4AJiAFWqKG4_XKn6Wg-um68,553503
|
|
50
|
+
databricks/sdk/service/dashboards.py,sha256=MhKFjglM4odyOiNzagII4pprkPMh8FOfzvwm9Ntlyr8,105263
|
|
51
|
+
databricks/sdk/service/files.py,sha256=gWjtORuRe7B1VZol0rr_dcSHq1gMmuKszGHaMqunslU,46234
|
|
52
52
|
databricks/sdk/service/iam.py,sha256=TFKlhh7A1INGVcfAZQ1f0ORpAIzWLJ78l23vkerTe4M,174846
|
|
53
53
|
databricks/sdk/service/jobs.py,sha256=2DQIqaeYq2tOeRzeeq23Q8_ntXaeJNK0LHhlcbd_8A4,443424
|
|
54
54
|
databricks/sdk/service/marketplace.py,sha256=h-hGo2GHrjbVe0lXuengvqIDEN5VGhBdna4di9at5hA,175736
|
|
55
|
-
databricks/sdk/service/ml.py,sha256=
|
|
56
|
-
databricks/sdk/service/oauth2.py,sha256=
|
|
55
|
+
databricks/sdk/service/ml.py,sha256=q5vdOguyLiTpGqT6R5w41l_cCendUx4Hm-fQeW1-08k,316482
|
|
56
|
+
databricks/sdk/service/oauth2.py,sha256=LLp1uMvxfPOXzuekLjnLH6si6YOCvie7qZSpjSNDYd4,79788
|
|
57
57
|
databricks/sdk/service/pipelines.py,sha256=_2RpczckyhexbCtLoB_ewVKgZNFseN8bCwABod6YUqs,166666
|
|
58
58
|
databricks/sdk/service/provisioning.py,sha256=-Ly2o02i-jhNmiP9zLPeYF8H2usoB-oTG0RLF5gkIpc,169311
|
|
59
|
-
databricks/sdk/service/serving.py,sha256=
|
|
59
|
+
databricks/sdk/service/serving.py,sha256=eHPRoxeOpqV-dyVf7V8_6_FHBoTTwXqr5W9Y6b_iZtI,204406
|
|
60
60
|
databricks/sdk/service/settings.py,sha256=9I8NHnVTtTn_u91fYQi-QpDMjKPF2c2Sec0gEub6on0,323112
|
|
61
61
|
databricks/sdk/service/sharing.py,sha256=jP3XWl3qojFWmZE9xkgCixqEZPuGZAJC_M-eGXMrPQs,142473
|
|
62
62
|
databricks/sdk/service/sql.py,sha256=2t8QhrEUyLekRYHnRYKkxVY8qpDbxE7JayfMSds6OKM,409593
|
|
63
63
|
databricks/sdk/service/vectorsearch.py,sha256=stdRmV-iE8L0rGzhSSG5tmoVRXb9bPItSE_ss6HQVDo,79897
|
|
64
64
|
databricks/sdk/service/workspace.py,sha256=T0ZbnG1qcPjKysGO_tBzl5x1PyalydeYJRBZbooYNm0,130893
|
|
65
|
-
databricks_sdk-0.
|
|
66
|
-
databricks_sdk-0.
|
|
67
|
-
databricks_sdk-0.
|
|
68
|
-
databricks_sdk-0.
|
|
69
|
-
databricks_sdk-0.
|
|
70
|
-
databricks_sdk-0.
|
|
65
|
+
databricks_sdk-0.46.0.dist-info/LICENSE,sha256=afBgTZo-JsYqj4VOjnejBetMuHKcFR30YobDdpVFkqY,11411
|
|
66
|
+
databricks_sdk-0.46.0.dist-info/METADATA,sha256=f3lrq-aclP8VS34ht65GoP5mUPPaaogzFQYVoi7Eue8,38311
|
|
67
|
+
databricks_sdk-0.46.0.dist-info/NOTICE,sha256=tkRcQYA1k68wDLcnOWbg2xJDsUOJw8G8DGBhb8dnI3w,1588
|
|
68
|
+
databricks_sdk-0.46.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
|
69
|
+
databricks_sdk-0.46.0.dist-info/top_level.txt,sha256=7kRdatoSgU0EUurRQJ_3F1Nv4EOSHWAr6ng25tJOJKU,11
|
|
70
|
+
databricks_sdk-0.46.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|