databricks-sdk 0.49.0__py3-none-any.whl → 0.51.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +20 -8
- databricks/sdk/config.py +2 -3
- databricks/sdk/credentials_provider.py +61 -15
- databricks/sdk/oidc_token_supplier.py +28 -0
- databricks/sdk/service/apps.py +8 -10
- databricks/sdk/service/billing.py +12 -3
- databricks/sdk/service/catalog.py +73 -4
- databricks/sdk/service/cleanrooms.py +9 -14
- databricks/sdk/service/compute.py +151 -7
- databricks/sdk/service/dashboards.py +253 -42
- databricks/sdk/service/jobs.py +602 -83
- databricks/sdk/service/ml.py +408 -72
- databricks/sdk/service/oauth2.py +8 -13
- databricks/sdk/service/pipelines.py +0 -32
- databricks/sdk/service/serving.py +26 -26
- databricks/sdk/service/settings.py +670 -113
- databricks/sdk/service/sql.py +881 -6
- databricks/sdk/service/vectorsearch.py +355 -159
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/METADATA +11 -11
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/RECORD +25 -24
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.51.0.dist-info}/top_level.txt +0 -0
databricks/sdk/service/oauth2.py
CHANGED
|
@@ -1191,10 +1191,10 @@ class AccountFederationPolicyAPI:
|
|
|
1191
1191
|
def __init__(self, api_client):
|
|
1192
1192
|
self._api = api_client
|
|
1193
1193
|
|
|
1194
|
-
def create(self,
|
|
1194
|
+
def create(self, policy: FederationPolicy, *, policy_id: Optional[str] = None) -> FederationPolicy:
|
|
1195
1195
|
"""Create account federation policy.
|
|
1196
1196
|
|
|
1197
|
-
:param policy: :class:`FederationPolicy`
|
|
1197
|
+
:param policy: :class:`FederationPolicy`
|
|
1198
1198
|
:param policy_id: str (optional)
|
|
1199
1199
|
The identifier for the federation policy. The identifier must contain only lowercase alphanumeric
|
|
1200
1200
|
characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks.
|
|
@@ -1284,13 +1284,13 @@ class AccountFederationPolicyAPI:
|
|
|
1284
1284
|
query["page_token"] = json["next_page_token"]
|
|
1285
1285
|
|
|
1286
1286
|
def update(
|
|
1287
|
-
self, policy_id: str,
|
|
1287
|
+
self, policy_id: str, policy: FederationPolicy, *, update_mask: Optional[str] = None
|
|
1288
1288
|
) -> FederationPolicy:
|
|
1289
1289
|
"""Update account federation policy.
|
|
1290
1290
|
|
|
1291
1291
|
:param policy_id: str
|
|
1292
1292
|
The identifier for the federation policy.
|
|
1293
|
-
:param policy: :class:`FederationPolicy`
|
|
1293
|
+
:param policy: :class:`FederationPolicy`
|
|
1294
1294
|
:param update_mask: str (optional)
|
|
1295
1295
|
The field mask specifies which fields of the policy to update. To specify multiple fields in the
|
|
1296
1296
|
field mask, use comma as the separator (no space). The special value '*' indicates that all fields
|
|
@@ -1758,13 +1758,13 @@ class ServicePrincipalFederationPolicyAPI:
|
|
|
1758
1758
|
self._api = api_client
|
|
1759
1759
|
|
|
1760
1760
|
def create(
|
|
1761
|
-
self, service_principal_id: int,
|
|
1761
|
+
self, service_principal_id: int, policy: FederationPolicy, *, policy_id: Optional[str] = None
|
|
1762
1762
|
) -> FederationPolicy:
|
|
1763
1763
|
"""Create service principal federation policy.
|
|
1764
1764
|
|
|
1765
1765
|
:param service_principal_id: int
|
|
1766
1766
|
The service principal id for the federation policy.
|
|
1767
|
-
:param policy: :class:`FederationPolicy`
|
|
1767
|
+
:param policy: :class:`FederationPolicy`
|
|
1768
1768
|
:param policy_id: str (optional)
|
|
1769
1769
|
The identifier for the federation policy. The identifier must contain only lowercase alphanumeric
|
|
1770
1770
|
characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks.
|
|
@@ -1869,12 +1869,7 @@ class ServicePrincipalFederationPolicyAPI:
|
|
|
1869
1869
|
query["page_token"] = json["next_page_token"]
|
|
1870
1870
|
|
|
1871
1871
|
def update(
|
|
1872
|
-
self,
|
|
1873
|
-
service_principal_id: int,
|
|
1874
|
-
policy_id: str,
|
|
1875
|
-
*,
|
|
1876
|
-
policy: Optional[FederationPolicy] = None,
|
|
1877
|
-
update_mask: Optional[str] = None,
|
|
1872
|
+
self, service_principal_id: int, policy_id: str, policy: FederationPolicy, *, update_mask: Optional[str] = None
|
|
1878
1873
|
) -> FederationPolicy:
|
|
1879
1874
|
"""Update service principal federation policy.
|
|
1880
1875
|
|
|
@@ -1882,7 +1877,7 @@ class ServicePrincipalFederationPolicyAPI:
|
|
|
1882
1877
|
The service principal id for the federation policy.
|
|
1883
1878
|
:param policy_id: str
|
|
1884
1879
|
The identifier for the federation policy.
|
|
1885
|
-
:param policy: :class:`FederationPolicy`
|
|
1880
|
+
:param policy: :class:`FederationPolicy`
|
|
1886
1881
|
:param update_mask: str (optional)
|
|
1887
1882
|
The field mask specifies which fields of the policy to update. To specify multiple fields in the
|
|
1888
1883
|
field mask, use comma as the separator (no space). The special value '*' indicates that all fields
|
|
@@ -3390,38 +3390,6 @@ class PipelinesAPI:
|
|
|
3390
3390
|
def __init__(self, api_client):
|
|
3391
3391
|
self._api = api_client
|
|
3392
3392
|
|
|
3393
|
-
def wait_get_pipeline_running(
|
|
3394
|
-
self,
|
|
3395
|
-
pipeline_id: str,
|
|
3396
|
-
timeout=timedelta(minutes=20),
|
|
3397
|
-
callback: Optional[Callable[[GetPipelineResponse], None]] = None,
|
|
3398
|
-
) -> GetPipelineResponse:
|
|
3399
|
-
deadline = time.time() + timeout.total_seconds()
|
|
3400
|
-
target_states = (PipelineState.RUNNING,)
|
|
3401
|
-
failure_states = (PipelineState.FAILED,)
|
|
3402
|
-
status_message = "polling..."
|
|
3403
|
-
attempt = 1
|
|
3404
|
-
while time.time() < deadline:
|
|
3405
|
-
poll = self.get(pipeline_id=pipeline_id)
|
|
3406
|
-
status = poll.state
|
|
3407
|
-
status_message = poll.cause
|
|
3408
|
-
if status in target_states:
|
|
3409
|
-
return poll
|
|
3410
|
-
if callback:
|
|
3411
|
-
callback(poll)
|
|
3412
|
-
if status in failure_states:
|
|
3413
|
-
msg = f"failed to reach RUNNING, got {status}: {status_message}"
|
|
3414
|
-
raise OperationFailed(msg)
|
|
3415
|
-
prefix = f"pipeline_id={pipeline_id}"
|
|
3416
|
-
sleep = attempt
|
|
3417
|
-
if sleep > 10:
|
|
3418
|
-
# sleep 10s max per attempt
|
|
3419
|
-
sleep = 10
|
|
3420
|
-
_LOG.debug(f"{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)")
|
|
3421
|
-
time.sleep(sleep + random.random())
|
|
3422
|
-
attempt += 1
|
|
3423
|
-
raise TimeoutError(f"timed out after {timeout}: {status_message}")
|
|
3424
|
-
|
|
3425
3393
|
def wait_get_pipeline_idle(
|
|
3426
3394
|
self,
|
|
3427
3395
|
pipeline_id: str,
|
|
@@ -849,8 +849,9 @@ class CreateServingEndpoint:
|
|
|
849
849
|
workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores."""
|
|
850
850
|
|
|
851
851
|
ai_gateway: Optional[AiGatewayConfig] = None
|
|
852
|
-
"""The AI Gateway configuration for the serving endpoint. NOTE:
|
|
853
|
-
throughput endpoints are currently
|
|
852
|
+
"""The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned
|
|
853
|
+
throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only
|
|
854
|
+
support inference tables."""
|
|
854
855
|
|
|
855
856
|
budget_policy_id: Optional[str] = None
|
|
856
857
|
"""The budget policy to be applied to the serving endpoint."""
|
|
@@ -2873,7 +2874,8 @@ class ServedEntityInput:
|
|
|
2873
2874
|
"""The workload size of the served entity. The workload size corresponds to a range of provisioned
|
|
2874
2875
|
concurrency that the compute autoscales between. A single unit of provisioned concurrency can
|
|
2875
2876
|
process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency),
|
|
2876
|
-
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
2877
|
+
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
2878
|
+
Additional custom workload sizes can also be used when available in the workspace. If
|
|
2877
2879
|
scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size
|
|
2878
2880
|
is 0."""
|
|
2879
2881
|
|
|
@@ -3013,7 +3015,8 @@ class ServedEntityOutput:
|
|
|
3013
3015
|
"""The workload size of the served entity. The workload size corresponds to a range of provisioned
|
|
3014
3016
|
concurrency that the compute autoscales between. A single unit of provisioned concurrency can
|
|
3015
3017
|
process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency),
|
|
3016
|
-
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3018
|
+
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3019
|
+
Additional custom workload sizes can also be used when available in the workspace. If
|
|
3017
3020
|
scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size
|
|
3018
3021
|
is 0."""
|
|
3019
3022
|
|
|
@@ -3203,11 +3206,12 @@ class ServedModelInput:
|
|
|
3203
3206
|
model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if
|
|
3204
3207
|
not specified for other entities, it defaults to entity_name-entity_version."""
|
|
3205
3208
|
|
|
3206
|
-
workload_size: Optional[
|
|
3209
|
+
workload_size: Optional[str] = None
|
|
3207
3210
|
"""The workload size of the served entity. The workload size corresponds to a range of provisioned
|
|
3208
3211
|
concurrency that the compute autoscales between. A single unit of provisioned concurrency can
|
|
3209
3212
|
process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency),
|
|
3210
|
-
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3213
|
+
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3214
|
+
Additional custom workload sizes can also be used when available in the workspace. If
|
|
3211
3215
|
scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size
|
|
3212
3216
|
is 0."""
|
|
3213
3217
|
|
|
@@ -3239,7 +3243,7 @@ class ServedModelInput:
|
|
|
3239
3243
|
if self.scale_to_zero_enabled is not None:
|
|
3240
3244
|
body["scale_to_zero_enabled"] = self.scale_to_zero_enabled
|
|
3241
3245
|
if self.workload_size is not None:
|
|
3242
|
-
body["workload_size"] = self.workload_size
|
|
3246
|
+
body["workload_size"] = self.workload_size
|
|
3243
3247
|
if self.workload_type is not None:
|
|
3244
3248
|
body["workload_type"] = self.workload_type.value
|
|
3245
3249
|
return body
|
|
@@ -3281,18 +3285,11 @@ class ServedModelInput:
|
|
|
3281
3285
|
model_version=d.get("model_version", None),
|
|
3282
3286
|
name=d.get("name", None),
|
|
3283
3287
|
scale_to_zero_enabled=d.get("scale_to_zero_enabled", None),
|
|
3284
|
-
workload_size=
|
|
3288
|
+
workload_size=d.get("workload_size", None),
|
|
3285
3289
|
workload_type=_enum(d, "workload_type", ServedModelInputWorkloadType),
|
|
3286
3290
|
)
|
|
3287
3291
|
|
|
3288
3292
|
|
|
3289
|
-
class ServedModelInputWorkloadSize(Enum):
|
|
3290
|
-
|
|
3291
|
-
LARGE = "Large"
|
|
3292
|
-
MEDIUM = "Medium"
|
|
3293
|
-
SMALL = "Small"
|
|
3294
|
-
|
|
3295
|
-
|
|
3296
3293
|
class ServedModelInputWorkloadType(Enum):
|
|
3297
3294
|
"""Please keep this in sync with with workload types in InferenceEndpointEntities.scala"""
|
|
3298
3295
|
|
|
@@ -3337,7 +3334,8 @@ class ServedModelOutput:
|
|
|
3337
3334
|
"""The workload size of the served entity. The workload size corresponds to a range of provisioned
|
|
3338
3335
|
concurrency that the compute autoscales between. A single unit of provisioned concurrency can
|
|
3339
3336
|
process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency),
|
|
3340
|
-
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3337
|
+
"Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency).
|
|
3338
|
+
Additional custom workload sizes can also be used when available in the workspace. If
|
|
3341
3339
|
scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size
|
|
3342
3340
|
is 0."""
|
|
3343
3341
|
|
|
@@ -3531,8 +3529,9 @@ class ServerLogsResponse:
|
|
|
3531
3529
|
@dataclass
|
|
3532
3530
|
class ServingEndpoint:
|
|
3533
3531
|
ai_gateway: Optional[AiGatewayConfig] = None
|
|
3534
|
-
"""The AI Gateway configuration for the serving endpoint. NOTE:
|
|
3535
|
-
throughput endpoints are currently
|
|
3532
|
+
"""The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned
|
|
3533
|
+
throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only
|
|
3534
|
+
support inference tables."""
|
|
3536
3535
|
|
|
3537
3536
|
budget_policy_id: Optional[str] = None
|
|
3538
3537
|
"""The budget policy associated with the endpoint."""
|
|
@@ -3749,8 +3748,9 @@ class ServingEndpointAccessControlResponse:
|
|
|
3749
3748
|
@dataclass
|
|
3750
3749
|
class ServingEndpointDetailed:
|
|
3751
3750
|
ai_gateway: Optional[AiGatewayConfig] = None
|
|
3752
|
-
"""The AI Gateway configuration for the serving endpoint. NOTE:
|
|
3753
|
-
throughput endpoints are currently
|
|
3751
|
+
"""The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned
|
|
3752
|
+
throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only
|
|
3753
|
+
support inference tables."""
|
|
3754
3754
|
|
|
3755
3755
|
budget_policy_id: Optional[str] = None
|
|
3756
3756
|
"""The budget policy associated with the endpoint."""
|
|
@@ -4242,8 +4242,9 @@ class ServingEndpointsAPI:
|
|
|
4242
4242
|
The name of the serving endpoint. This field is required and must be unique across a Databricks
|
|
4243
4243
|
workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores.
|
|
4244
4244
|
:param ai_gateway: :class:`AiGatewayConfig` (optional)
|
|
4245
|
-
The AI Gateway configuration for the serving endpoint. NOTE:
|
|
4246
|
-
|
|
4245
|
+
The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput,
|
|
4246
|
+
and pay-per-token endpoints are fully supported; agent endpoints currently only support inference
|
|
4247
|
+
tables.
|
|
4247
4248
|
:param budget_policy_id: str (optional)
|
|
4248
4249
|
The budget policy to be applied to the serving endpoint.
|
|
4249
4250
|
:param config: :class:`EndpointCoreConfigInput` (optional)
|
|
@@ -4533,8 +4534,7 @@ class ServingEndpointsAPI:
|
|
|
4533
4534
|
def put(self, name: str, *, rate_limits: Optional[List[RateLimit]] = None) -> PutResponse:
|
|
4534
4535
|
"""Update rate limits of a serving endpoint.
|
|
4535
4536
|
|
|
4536
|
-
|
|
4537
|
-
currently supported. For external models, use AI Gateway to manage rate limits.
|
|
4537
|
+
Deprecated: Please use AI Gateway to manage rate limits instead.
|
|
4538
4538
|
|
|
4539
4539
|
:param name: str
|
|
4540
4540
|
The name of the serving endpoint whose rate limits are being updated. This field is required.
|
|
@@ -4566,8 +4566,8 @@ class ServingEndpointsAPI:
|
|
|
4566
4566
|
) -> PutAiGatewayResponse:
|
|
4567
4567
|
"""Update AI Gateway of a serving endpoint.
|
|
4568
4568
|
|
|
4569
|
-
Used to update the AI Gateway of a serving endpoint. NOTE:
|
|
4570
|
-
|
|
4569
|
+
Used to update the AI Gateway of a serving endpoint. NOTE: External model, provisioned throughput, and
|
|
4570
|
+
pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables.
|
|
4571
4571
|
|
|
4572
4572
|
:param name: str
|
|
4573
4573
|
The name of the serving endpoint whose AI Gateway is being updated. This field is required.
|