databricks-sdk 0.47.0__py3-none-any.whl → 0.48.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/service/catalog.py +0 -2
- databricks/sdk/service/compute.py +181 -376
- databricks/sdk/service/dashboards.py +0 -2
- databricks/sdk/service/iam.py +29 -12
- databricks/sdk/service/jobs.py +0 -1
- databricks/sdk/service/marketplace.py +0 -2
- databricks/sdk/service/ml.py +45 -20
- databricks/sdk/service/oauth2.py +0 -12
- databricks/sdk/service/pipelines.py +28 -25
- databricks/sdk/service/serving.py +0 -193
- databricks/sdk/service/sharing.py +71 -71
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/RECORD +18 -18
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/top_level.txt +0 -0
|
@@ -1082,7 +1082,6 @@ class MessageErrorType(Enum):
|
|
|
1082
1082
|
FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION"
|
|
1083
1083
|
FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION"
|
|
1084
1084
|
FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION = "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION"
|
|
1085
|
-
GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION = "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION"
|
|
1086
1085
|
GENERIC_CHAT_COMPLETION_EXCEPTION = "GENERIC_CHAT_COMPLETION_EXCEPTION"
|
|
1087
1086
|
GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION = "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION"
|
|
1088
1087
|
GENERIC_SQL_EXEC_API_CALL_EXCEPTION = "GENERIC_SQL_EXEC_API_CALL_EXCEPTION"
|
|
@@ -1097,7 +1096,6 @@ class MessageErrorType(Enum):
|
|
|
1097
1096
|
MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION"
|
|
1098
1097
|
MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION"
|
|
1099
1098
|
MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION"
|
|
1100
|
-
MISSING_SQL_QUERY_EXCEPTION = "MISSING_SQL_QUERY_EXCEPTION"
|
|
1101
1099
|
NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE = "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE"
|
|
1102
1100
|
NO_QUERY_TO_VISUALIZE_EXCEPTION = "NO_QUERY_TO_VISUALIZE_EXCEPTION"
|
|
1103
1101
|
NO_TABLES_TO_QUERY_EXCEPTION = "NO_TABLES_TO_QUERY_EXCEPTION"
|
databricks/sdk/service/iam.py
CHANGED
|
@@ -846,7 +846,7 @@ class ObjectPermissions:
|
|
|
846
846
|
@dataclass
|
|
847
847
|
class PartialUpdate:
|
|
848
848
|
id: Optional[str] = None
|
|
849
|
-
"""Unique ID in the Databricks workspace."""
|
|
849
|
+
"""Unique ID for a user in the Databricks workspace."""
|
|
850
850
|
|
|
851
851
|
operations: Optional[List[Patch]] = None
|
|
852
852
|
|
|
@@ -1918,7 +1918,8 @@ class User:
|
|
|
1918
1918
|
groups: Optional[List[ComplexValue]] = None
|
|
1919
1919
|
|
|
1920
1920
|
id: Optional[str] = None
|
|
1921
|
-
"""Databricks user ID.
|
|
1921
|
+
"""Databricks user ID. This is automatically set by Databricks. Any value provided by the client
|
|
1922
|
+
will be ignored."""
|
|
1922
1923
|
|
|
1923
1924
|
name: Optional[Name] = None
|
|
1924
1925
|
|
|
@@ -2479,7 +2480,7 @@ class AccountGroupsAPI:
|
|
|
2479
2480
|
Partially updates the details of a group.
|
|
2480
2481
|
|
|
2481
2482
|
:param id: str
|
|
2482
|
-
Unique ID in the Databricks
|
|
2483
|
+
Unique ID for a group in the Databricks account.
|
|
2483
2484
|
:param operations: List[:class:`Patch`] (optional)
|
|
2484
2485
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
2485
2486
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -2492,6 +2493,7 @@ class AccountGroupsAPI:
|
|
|
2492
2493
|
if schemas is not None:
|
|
2493
2494
|
body["schemas"] = [v.value for v in schemas]
|
|
2494
2495
|
headers = {
|
|
2496
|
+
"Accept": "application/json",
|
|
2495
2497
|
"Content-Type": "application/json",
|
|
2496
2498
|
}
|
|
2497
2499
|
|
|
@@ -2555,6 +2557,7 @@ class AccountGroupsAPI:
|
|
|
2555
2557
|
if schemas is not None:
|
|
2556
2558
|
body["schemas"] = [v.value for v in schemas]
|
|
2557
2559
|
headers = {
|
|
2560
|
+
"Accept": "application/json",
|
|
2558
2561
|
"Content-Type": "application/json",
|
|
2559
2562
|
}
|
|
2560
2563
|
|
|
@@ -2762,7 +2765,7 @@ class AccountServicePrincipalsAPI:
|
|
|
2762
2765
|
Partially updates the details of a single service principal in the Databricks account.
|
|
2763
2766
|
|
|
2764
2767
|
:param id: str
|
|
2765
|
-
Unique ID in the Databricks
|
|
2768
|
+
Unique ID for a service principal in the Databricks account.
|
|
2766
2769
|
:param operations: List[:class:`Patch`] (optional)
|
|
2767
2770
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
2768
2771
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -2775,6 +2778,7 @@ class AccountServicePrincipalsAPI:
|
|
|
2775
2778
|
if schemas is not None:
|
|
2776
2779
|
body["schemas"] = [v.value for v in schemas]
|
|
2777
2780
|
headers = {
|
|
2781
|
+
"Accept": "application/json",
|
|
2778
2782
|
"Content-Type": "application/json",
|
|
2779
2783
|
}
|
|
2780
2784
|
|
|
@@ -2844,6 +2848,7 @@ class AccountServicePrincipalsAPI:
|
|
|
2844
2848
|
if schemas is not None:
|
|
2845
2849
|
body["schemas"] = [v.value for v in schemas]
|
|
2846
2850
|
headers = {
|
|
2851
|
+
"Accept": "application/json",
|
|
2847
2852
|
"Content-Type": "application/json",
|
|
2848
2853
|
}
|
|
2849
2854
|
|
|
@@ -2907,7 +2912,8 @@ class AccountUsersAPI:
|
|
|
2907
2912
|
External ID is not currently supported. It is reserved for future use.
|
|
2908
2913
|
:param groups: List[:class:`ComplexValue`] (optional)
|
|
2909
2914
|
:param id: str (optional)
|
|
2910
|
-
Databricks user ID.
|
|
2915
|
+
Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
|
|
2916
|
+
be ignored.
|
|
2911
2917
|
:param name: :class:`Name` (optional)
|
|
2912
2918
|
:param roles: List[:class:`ComplexValue`] (optional)
|
|
2913
2919
|
Corresponds to AWS instance profile/arn role.
|
|
@@ -3117,7 +3123,7 @@ class AccountUsersAPI:
|
|
|
3117
3123
|
Partially updates a user resource by applying the supplied operations on specific user attributes.
|
|
3118
3124
|
|
|
3119
3125
|
:param id: str
|
|
3120
|
-
Unique ID in the Databricks
|
|
3126
|
+
Unique ID for a user in the Databricks account.
|
|
3121
3127
|
:param operations: List[:class:`Patch`] (optional)
|
|
3122
3128
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
3123
3129
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -3130,6 +3136,7 @@ class AccountUsersAPI:
|
|
|
3130
3136
|
if schemas is not None:
|
|
3131
3137
|
body["schemas"] = [v.value for v in schemas]
|
|
3132
3138
|
headers = {
|
|
3139
|
+
"Accept": "application/json",
|
|
3133
3140
|
"Content-Type": "application/json",
|
|
3134
3141
|
}
|
|
3135
3142
|
|
|
@@ -3157,7 +3164,8 @@ class AccountUsersAPI:
|
|
|
3157
3164
|
Replaces a user's information with the data supplied in request.
|
|
3158
3165
|
|
|
3159
3166
|
:param id: str
|
|
3160
|
-
Databricks user ID.
|
|
3167
|
+
Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
|
|
3168
|
+
be ignored.
|
|
3161
3169
|
:param active: bool (optional)
|
|
3162
3170
|
If this user is active
|
|
3163
3171
|
:param display_name: str (optional)
|
|
@@ -3207,6 +3215,7 @@ class AccountUsersAPI:
|
|
|
3207
3215
|
if user_name is not None:
|
|
3208
3216
|
body["userName"] = user_name
|
|
3209
3217
|
headers = {
|
|
3218
|
+
"Accept": "application/json",
|
|
3210
3219
|
"Content-Type": "application/json",
|
|
3211
3220
|
}
|
|
3212
3221
|
|
|
@@ -3425,7 +3434,7 @@ class GroupsAPI:
|
|
|
3425
3434
|
Partially updates the details of a group.
|
|
3426
3435
|
|
|
3427
3436
|
:param id: str
|
|
3428
|
-
Unique ID in the Databricks workspace.
|
|
3437
|
+
Unique ID for a group in the Databricks workspace.
|
|
3429
3438
|
:param operations: List[:class:`Patch`] (optional)
|
|
3430
3439
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
3431
3440
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -3438,6 +3447,7 @@ class GroupsAPI:
|
|
|
3438
3447
|
if schemas is not None:
|
|
3439
3448
|
body["schemas"] = [v.value for v in schemas]
|
|
3440
3449
|
headers = {
|
|
3450
|
+
"Accept": "application/json",
|
|
3441
3451
|
"Content-Type": "application/json",
|
|
3442
3452
|
}
|
|
3443
3453
|
|
|
@@ -3499,6 +3509,7 @@ class GroupsAPI:
|
|
|
3499
3509
|
if schemas is not None:
|
|
3500
3510
|
body["schemas"] = [v.value for v in schemas]
|
|
3501
3511
|
headers = {
|
|
3512
|
+
"Accept": "application/json",
|
|
3502
3513
|
"Content-Type": "application/json",
|
|
3503
3514
|
}
|
|
3504
3515
|
|
|
@@ -3911,7 +3922,7 @@ class ServicePrincipalsAPI:
|
|
|
3911
3922
|
Partially updates the details of a single service principal in the Databricks workspace.
|
|
3912
3923
|
|
|
3913
3924
|
:param id: str
|
|
3914
|
-
Unique ID in the Databricks workspace.
|
|
3925
|
+
Unique ID for a service principal in the Databricks workspace.
|
|
3915
3926
|
:param operations: List[:class:`Patch`] (optional)
|
|
3916
3927
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
3917
3928
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -3924,6 +3935,7 @@ class ServicePrincipalsAPI:
|
|
|
3924
3935
|
if schemas is not None:
|
|
3925
3936
|
body["schemas"] = [v.value for v in schemas]
|
|
3926
3937
|
headers = {
|
|
3938
|
+
"Accept": "application/json",
|
|
3927
3939
|
"Content-Type": "application/json",
|
|
3928
3940
|
}
|
|
3929
3941
|
|
|
@@ -3988,6 +4000,7 @@ class ServicePrincipalsAPI:
|
|
|
3988
4000
|
if schemas is not None:
|
|
3989
4001
|
body["schemas"] = [v.value for v in schemas]
|
|
3990
4002
|
headers = {
|
|
4003
|
+
"Accept": "application/json",
|
|
3991
4004
|
"Content-Type": "application/json",
|
|
3992
4005
|
}
|
|
3993
4006
|
|
|
@@ -4046,7 +4059,8 @@ class UsersAPI:
|
|
|
4046
4059
|
External ID is not currently supported. It is reserved for future use.
|
|
4047
4060
|
:param groups: List[:class:`ComplexValue`] (optional)
|
|
4048
4061
|
:param id: str (optional)
|
|
4049
|
-
Databricks user ID.
|
|
4062
|
+
Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
|
|
4063
|
+
be ignored.
|
|
4050
4064
|
:param name: :class:`Name` (optional)
|
|
4051
4065
|
:param roles: List[:class:`ComplexValue`] (optional)
|
|
4052
4066
|
Corresponds to AWS instance profile/arn role.
|
|
@@ -4280,7 +4294,7 @@ class UsersAPI:
|
|
|
4280
4294
|
Partially updates a user resource by applying the supplied operations on specific user attributes.
|
|
4281
4295
|
|
|
4282
4296
|
:param id: str
|
|
4283
|
-
Unique ID in the Databricks workspace.
|
|
4297
|
+
Unique ID for a user in the Databricks workspace.
|
|
4284
4298
|
:param operations: List[:class:`Patch`] (optional)
|
|
4285
4299
|
:param schemas: List[:class:`PatchSchema`] (optional)
|
|
4286
4300
|
The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
|
|
@@ -4293,6 +4307,7 @@ class UsersAPI:
|
|
|
4293
4307
|
if schemas is not None:
|
|
4294
4308
|
body["schemas"] = [v.value for v in schemas]
|
|
4295
4309
|
headers = {
|
|
4310
|
+
"Accept": "application/json",
|
|
4296
4311
|
"Content-Type": "application/json",
|
|
4297
4312
|
}
|
|
4298
4313
|
|
|
@@ -4341,7 +4356,8 @@ class UsersAPI:
|
|
|
4341
4356
|
Replaces a user's information with the data supplied in request.
|
|
4342
4357
|
|
|
4343
4358
|
:param id: str
|
|
4344
|
-
Databricks user ID.
|
|
4359
|
+
Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
|
|
4360
|
+
be ignored.
|
|
4345
4361
|
:param active: bool (optional)
|
|
4346
4362
|
If this user is active
|
|
4347
4363
|
:param display_name: str (optional)
|
|
@@ -4391,6 +4407,7 @@ class UsersAPI:
|
|
|
4391
4407
|
if user_name is not None:
|
|
4392
4408
|
body["userName"] = user_name
|
|
4393
4409
|
headers = {
|
|
4410
|
+
"Accept": "application/json",
|
|
4394
4411
|
"Content-Type": "application/json",
|
|
4395
4412
|
}
|
|
4396
4413
|
|
databricks/sdk/service/jobs.py
CHANGED
|
@@ -3659,7 +3659,6 @@ class PerformanceTarget(Enum):
|
|
|
3659
3659
|
on serverless compute should be. The performance mode on the job or pipeline should map to a
|
|
3660
3660
|
performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget)."""
|
|
3661
3661
|
|
|
3662
|
-
BALANCED = "BALANCED"
|
|
3663
3662
|
COST_OPTIMIZED = "COST_OPTIMIZED"
|
|
3664
3663
|
PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED"
|
|
3665
3664
|
|
|
@@ -1192,7 +1192,6 @@ class FileParent:
|
|
|
1192
1192
|
class FileParentType(Enum):
|
|
1193
1193
|
|
|
1194
1194
|
LISTING = "LISTING"
|
|
1195
|
-
LISTING_RESOURCE = "LISTING_RESOURCE"
|
|
1196
1195
|
PROVIDER = "PROVIDER"
|
|
1197
1196
|
|
|
1198
1197
|
|
|
@@ -2453,7 +2452,6 @@ class ListingType(Enum):
|
|
|
2453
2452
|
|
|
2454
2453
|
class MarketplaceFileType(Enum):
|
|
2455
2454
|
|
|
2456
|
-
APP = "APP"
|
|
2457
2455
|
EMBEDDED_NOTEBOOK = "EMBEDDED_NOTEBOOK"
|
|
2458
2456
|
PROVIDER_ICON = "PROVIDER_ICON"
|
|
2459
2457
|
|
databricks/sdk/service/ml.py
CHANGED
|
@@ -499,19 +499,27 @@ class CreateForecastingExperimentRequest:
|
|
|
499
499
|
time_column: str
|
|
500
500
|
"""Name of the column in the input training table that represents the timestamp of each row."""
|
|
501
501
|
|
|
502
|
-
|
|
503
|
-
"""The
|
|
504
|
-
time
|
|
505
|
-
|
|
502
|
+
data_granularity_unit: str
|
|
503
|
+
"""The time unit of the input data granularity. Together with data_granularity_quantity field, this
|
|
504
|
+
defines the time interval between consecutive rows in the time series data. Possible values: *
|
|
505
|
+
'W' (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' /
|
|
506
|
+
'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' /
|
|
507
|
+
'quarter' / 'quarters' * 'Y' / 'year' / 'years'"""
|
|
506
508
|
|
|
507
509
|
forecast_horizon: int
|
|
508
510
|
"""The number of time steps into the future for which predictions should be made. This value
|
|
509
|
-
represents a multiple of
|
|
511
|
+
represents a multiple of data_granularity_unit and data_granularity_quantity determining how far
|
|
512
|
+
ahead the model will forecast."""
|
|
510
513
|
|
|
511
514
|
custom_weights_column: Optional[str] = None
|
|
512
515
|
"""Name of the column in the input training table used to customize the weight for each time series
|
|
513
516
|
to calculate weighted metrics."""
|
|
514
517
|
|
|
518
|
+
data_granularity_quantity: Optional[int] = None
|
|
519
|
+
"""The quantity of the input data granularity. Together with data_granularity_unit field, this
|
|
520
|
+
defines the time interval between consecutive rows in the time series data. For now, only 1
|
|
521
|
+
second, 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported."""
|
|
522
|
+
|
|
515
523
|
experiment_path: Optional[str] = None
|
|
516
524
|
"""The path to the created experiment. This is the path where the experiment will be stored in the
|
|
517
525
|
workspace."""
|
|
@@ -552,10 +560,12 @@ class CreateForecastingExperimentRequest:
|
|
|
552
560
|
body = {}
|
|
553
561
|
if self.custom_weights_column is not None:
|
|
554
562
|
body["custom_weights_column"] = self.custom_weights_column
|
|
563
|
+
if self.data_granularity_quantity is not None:
|
|
564
|
+
body["data_granularity_quantity"] = self.data_granularity_quantity
|
|
565
|
+
if self.data_granularity_unit is not None:
|
|
566
|
+
body["data_granularity_unit"] = self.data_granularity_unit
|
|
555
567
|
if self.experiment_path is not None:
|
|
556
568
|
body["experiment_path"] = self.experiment_path
|
|
557
|
-
if self.forecast_granularity is not None:
|
|
558
|
-
body["forecast_granularity"] = self.forecast_granularity
|
|
559
569
|
if self.forecast_horizon is not None:
|
|
560
570
|
body["forecast_horizon"] = self.forecast_horizon
|
|
561
571
|
if self.holiday_regions:
|
|
@@ -587,10 +597,12 @@ class CreateForecastingExperimentRequest:
|
|
|
587
597
|
body = {}
|
|
588
598
|
if self.custom_weights_column is not None:
|
|
589
599
|
body["custom_weights_column"] = self.custom_weights_column
|
|
600
|
+
if self.data_granularity_quantity is not None:
|
|
601
|
+
body["data_granularity_quantity"] = self.data_granularity_quantity
|
|
602
|
+
if self.data_granularity_unit is not None:
|
|
603
|
+
body["data_granularity_unit"] = self.data_granularity_unit
|
|
590
604
|
if self.experiment_path is not None:
|
|
591
605
|
body["experiment_path"] = self.experiment_path
|
|
592
|
-
if self.forecast_granularity is not None:
|
|
593
|
-
body["forecast_granularity"] = self.forecast_granularity
|
|
594
606
|
if self.forecast_horizon is not None:
|
|
595
607
|
body["forecast_horizon"] = self.forecast_horizon
|
|
596
608
|
if self.holiday_regions:
|
|
@@ -622,8 +634,9 @@ class CreateForecastingExperimentRequest:
|
|
|
622
634
|
"""Deserializes the CreateForecastingExperimentRequest from a dictionary."""
|
|
623
635
|
return cls(
|
|
624
636
|
custom_weights_column=d.get("custom_weights_column", None),
|
|
637
|
+
data_granularity_quantity=d.get("data_granularity_quantity", None),
|
|
638
|
+
data_granularity_unit=d.get("data_granularity_unit", None),
|
|
625
639
|
experiment_path=d.get("experiment_path", None),
|
|
626
|
-
forecast_granularity=d.get("forecast_granularity", None),
|
|
627
640
|
forecast_horizon=d.get("forecast_horizon", None),
|
|
628
641
|
holiday_regions=d.get("holiday_regions", None),
|
|
629
642
|
max_runtime=d.get("max_runtime", None),
|
|
@@ -6987,10 +7000,11 @@ class ForecastingAPI:
|
|
|
6987
7000
|
train_data_path: str,
|
|
6988
7001
|
target_column: str,
|
|
6989
7002
|
time_column: str,
|
|
6990
|
-
|
|
7003
|
+
data_granularity_unit: str,
|
|
6991
7004
|
forecast_horizon: int,
|
|
6992
7005
|
*,
|
|
6993
7006
|
custom_weights_column: Optional[str] = None,
|
|
7007
|
+
data_granularity_quantity: Optional[int] = None,
|
|
6994
7008
|
experiment_path: Optional[str] = None,
|
|
6995
7009
|
holiday_regions: Optional[List[str]] = None,
|
|
6996
7010
|
max_runtime: Optional[int] = None,
|
|
@@ -7013,16 +7027,23 @@ class ForecastingAPI:
|
|
|
7013
7027
|
this column will be used as the ground truth for model training.
|
|
7014
7028
|
:param time_column: str
|
|
7015
7029
|
Name of the column in the input training table that represents the timestamp of each row.
|
|
7016
|
-
:param
|
|
7017
|
-
The
|
|
7018
|
-
|
|
7019
|
-
|
|
7030
|
+
:param data_granularity_unit: str
|
|
7031
|
+
The time unit of the input data granularity. Together with data_granularity_quantity field, this
|
|
7032
|
+
defines the time interval between consecutive rows in the time series data. Possible values: * 'W'
|
|
7033
|
+
(weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes'
|
|
7034
|
+
/ 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' / 'quarter' / 'quarters'
|
|
7035
|
+
* 'Y' / 'year' / 'years'
|
|
7020
7036
|
:param forecast_horizon: int
|
|
7021
7037
|
The number of time steps into the future for which predictions should be made. This value represents
|
|
7022
|
-
a multiple of
|
|
7038
|
+
a multiple of data_granularity_unit and data_granularity_quantity determining how far ahead the
|
|
7039
|
+
model will forecast.
|
|
7023
7040
|
:param custom_weights_column: str (optional)
|
|
7024
7041
|
Name of the column in the input training table used to customize the weight for each time series to
|
|
7025
7042
|
calculate weighted metrics.
|
|
7043
|
+
:param data_granularity_quantity: int (optional)
|
|
7044
|
+
The quantity of the input data granularity. Together with data_granularity_unit field, this defines
|
|
7045
|
+
the time interval between consecutive rows in the time series data. For now, only 1 second,
|
|
7046
|
+
1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported.
|
|
7026
7047
|
:param experiment_path: str (optional)
|
|
7027
7048
|
The path to the created experiment. This is the path where the experiment will be stored in the
|
|
7028
7049
|
workspace.
|
|
@@ -7057,10 +7078,12 @@ class ForecastingAPI:
|
|
|
7057
7078
|
body = {}
|
|
7058
7079
|
if custom_weights_column is not None:
|
|
7059
7080
|
body["custom_weights_column"] = custom_weights_column
|
|
7081
|
+
if data_granularity_quantity is not None:
|
|
7082
|
+
body["data_granularity_quantity"] = data_granularity_quantity
|
|
7083
|
+
if data_granularity_unit is not None:
|
|
7084
|
+
body["data_granularity_unit"] = data_granularity_unit
|
|
7060
7085
|
if experiment_path is not None:
|
|
7061
7086
|
body["experiment_path"] = experiment_path
|
|
7062
|
-
if forecast_granularity is not None:
|
|
7063
|
-
body["forecast_granularity"] = forecast_granularity
|
|
7064
7087
|
if forecast_horizon is not None:
|
|
7065
7088
|
body["forecast_horizon"] = forecast_horizon
|
|
7066
7089
|
if holiday_regions is not None:
|
|
@@ -7102,10 +7125,11 @@ class ForecastingAPI:
|
|
|
7102
7125
|
train_data_path: str,
|
|
7103
7126
|
target_column: str,
|
|
7104
7127
|
time_column: str,
|
|
7105
|
-
|
|
7128
|
+
data_granularity_unit: str,
|
|
7106
7129
|
forecast_horizon: int,
|
|
7107
7130
|
*,
|
|
7108
7131
|
custom_weights_column: Optional[str] = None,
|
|
7132
|
+
data_granularity_quantity: Optional[int] = None,
|
|
7109
7133
|
experiment_path: Optional[str] = None,
|
|
7110
7134
|
holiday_regions: Optional[List[str]] = None,
|
|
7111
7135
|
max_runtime: Optional[int] = None,
|
|
@@ -7119,8 +7143,9 @@ class ForecastingAPI:
|
|
|
7119
7143
|
) -> ForecastingExperiment:
|
|
7120
7144
|
return self.create_experiment(
|
|
7121
7145
|
custom_weights_column=custom_weights_column,
|
|
7146
|
+
data_granularity_quantity=data_granularity_quantity,
|
|
7147
|
+
data_granularity_unit=data_granularity_unit,
|
|
7122
7148
|
experiment_path=experiment_path,
|
|
7123
|
-
forecast_granularity=forecast_granularity,
|
|
7124
7149
|
forecast_horizon=forecast_horizon,
|
|
7125
7150
|
holiday_regions=holiday_regions,
|
|
7126
7151
|
max_runtime=max_runtime,
|
databricks/sdk/service/oauth2.py
CHANGED
|
@@ -776,13 +776,6 @@ class OidcFederationPolicy:
|
|
|
776
776
|
endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
|
|
777
777
|
discovering public keys."""
|
|
778
778
|
|
|
779
|
-
jwks_uri: Optional[str] = None
|
|
780
|
-
"""URL of the public keys used to validate the signature of federated tokens, in JWKS format. Most
|
|
781
|
-
use cases should not need to specify this field. If jwks_uri and jwks_json are both unspecified
|
|
782
|
-
(recommended), Databricks automatically fetches the public keys from your issuer’s well known
|
|
783
|
-
endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
|
|
784
|
-
discovering public keys."""
|
|
785
|
-
|
|
786
779
|
subject: Optional[str] = None
|
|
787
780
|
"""The required token subject, as specified in the subject claim of federated tokens. Must be
|
|
788
781
|
specified for service principal federation policies. Must not be specified for account
|
|
@@ -800,8 +793,6 @@ class OidcFederationPolicy:
|
|
|
800
793
|
body["issuer"] = self.issuer
|
|
801
794
|
if self.jwks_json is not None:
|
|
802
795
|
body["jwks_json"] = self.jwks_json
|
|
803
|
-
if self.jwks_uri is not None:
|
|
804
|
-
body["jwks_uri"] = self.jwks_uri
|
|
805
796
|
if self.subject is not None:
|
|
806
797
|
body["subject"] = self.subject
|
|
807
798
|
if self.subject_claim is not None:
|
|
@@ -817,8 +808,6 @@ class OidcFederationPolicy:
|
|
|
817
808
|
body["issuer"] = self.issuer
|
|
818
809
|
if self.jwks_json is not None:
|
|
819
810
|
body["jwks_json"] = self.jwks_json
|
|
820
|
-
if self.jwks_uri is not None:
|
|
821
|
-
body["jwks_uri"] = self.jwks_uri
|
|
822
811
|
if self.subject is not None:
|
|
823
812
|
body["subject"] = self.subject
|
|
824
813
|
if self.subject_claim is not None:
|
|
@@ -832,7 +821,6 @@ class OidcFederationPolicy:
|
|
|
832
821
|
audiences=d.get("audiences", None),
|
|
833
822
|
issuer=d.get("issuer", None),
|
|
834
823
|
jwks_json=d.get("jwks_json", None),
|
|
835
|
-
jwks_uri=d.get("jwks_uri", None),
|
|
836
824
|
subject=d.get("subject", None),
|
|
837
825
|
subject_claim=d.get("subject_claim", None),
|
|
838
826
|
)
|
|
@@ -69,7 +69,7 @@ class CreatePipeline:
|
|
|
69
69
|
|
|
70
70
|
ingestion_definition: Optional[IngestionPipelineDefinition] = None
|
|
71
71
|
"""The configuration for a managed ingestion pipeline. These settings cannot be used with the
|
|
72
|
-
'libraries', '
|
|
72
|
+
'libraries', 'target' or 'catalog' settings."""
|
|
73
73
|
|
|
74
74
|
libraries: Optional[List[PipelineLibrary]] = None
|
|
75
75
|
"""Libraries or code needed by this deployment."""
|
|
@@ -95,7 +95,8 @@ class CreatePipeline:
|
|
|
95
95
|
is thrown."""
|
|
96
96
|
|
|
97
97
|
schema: Optional[str] = None
|
|
98
|
-
"""The default schema (database) where tables are read from or published to.
|
|
98
|
+
"""The default schema (database) where tables are read from or published to. The presence of this
|
|
99
|
+
field implies that the pipeline is in direct publishing mode."""
|
|
99
100
|
|
|
100
101
|
serverless: Optional[bool] = None
|
|
101
102
|
"""Whether serverless compute is enabled for this pipeline."""
|
|
@@ -104,9 +105,9 @@ class CreatePipeline:
|
|
|
104
105
|
"""DBFS root directory for storing checkpoints and tables."""
|
|
105
106
|
|
|
106
107
|
target: Optional[str] = None
|
|
107
|
-
"""Target schema (database) to add tables in this pipeline to.
|
|
108
|
-
|
|
109
|
-
|
|
108
|
+
"""Target schema (database) to add tables in this pipeline to. If not specified, no data is
|
|
109
|
+
published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
|
|
110
|
+
`catalog`."""
|
|
110
111
|
|
|
111
112
|
trigger: Optional[PipelineTrigger] = None
|
|
112
113
|
"""Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
|
|
@@ -442,7 +443,7 @@ class EditPipeline:
|
|
|
442
443
|
|
|
443
444
|
ingestion_definition: Optional[IngestionPipelineDefinition] = None
|
|
444
445
|
"""The configuration for a managed ingestion pipeline. These settings cannot be used with the
|
|
445
|
-
'libraries', '
|
|
446
|
+
'libraries', 'target' or 'catalog' settings."""
|
|
446
447
|
|
|
447
448
|
libraries: Optional[List[PipelineLibrary]] = None
|
|
448
449
|
"""Libraries or code needed by this deployment."""
|
|
@@ -471,7 +472,8 @@ class EditPipeline:
|
|
|
471
472
|
is thrown."""
|
|
472
473
|
|
|
473
474
|
schema: Optional[str] = None
|
|
474
|
-
"""The default schema (database) where tables are read from or published to.
|
|
475
|
+
"""The default schema (database) where tables are read from or published to. The presence of this
|
|
476
|
+
field implies that the pipeline is in direct publishing mode."""
|
|
475
477
|
|
|
476
478
|
serverless: Optional[bool] = None
|
|
477
479
|
"""Whether serverless compute is enabled for this pipeline."""
|
|
@@ -480,9 +482,9 @@ class EditPipeline:
|
|
|
480
482
|
"""DBFS root directory for storing checkpoints and tables."""
|
|
481
483
|
|
|
482
484
|
target: Optional[str] = None
|
|
483
|
-
"""Target schema (database) to add tables in this pipeline to.
|
|
484
|
-
|
|
485
|
-
|
|
485
|
+
"""Target schema (database) to add tables in this pipeline to. If not specified, no data is
|
|
486
|
+
published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
|
|
487
|
+
`catalog`."""
|
|
486
488
|
|
|
487
489
|
trigger: Optional[PipelineTrigger] = None
|
|
488
490
|
"""Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
|
|
@@ -2216,7 +2218,7 @@ class PipelineSpec:
|
|
|
2216
2218
|
|
|
2217
2219
|
ingestion_definition: Optional[IngestionPipelineDefinition] = None
|
|
2218
2220
|
"""The configuration for a managed ingestion pipeline. These settings cannot be used with the
|
|
2219
|
-
'libraries', '
|
|
2221
|
+
'libraries', 'target' or 'catalog' settings."""
|
|
2220
2222
|
|
|
2221
2223
|
libraries: Optional[List[PipelineLibrary]] = None
|
|
2222
2224
|
"""Libraries or code needed by this deployment."""
|
|
@@ -2234,7 +2236,8 @@ class PipelineSpec:
|
|
|
2234
2236
|
"""Restart window of this pipeline."""
|
|
2235
2237
|
|
|
2236
2238
|
schema: Optional[str] = None
|
|
2237
|
-
"""The default schema (database) where tables are read from or published to.
|
|
2239
|
+
"""The default schema (database) where tables are read from or published to. The presence of this
|
|
2240
|
+
field implies that the pipeline is in direct publishing mode."""
|
|
2238
2241
|
|
|
2239
2242
|
serverless: Optional[bool] = None
|
|
2240
2243
|
"""Whether serverless compute is enabled for this pipeline."""
|
|
@@ -2243,9 +2246,9 @@ class PipelineSpec:
|
|
|
2243
2246
|
"""DBFS root directory for storing checkpoints and tables."""
|
|
2244
2247
|
|
|
2245
2248
|
target: Optional[str] = None
|
|
2246
|
-
"""Target schema (database) to add tables in this pipeline to.
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
+
"""Target schema (database) to add tables in this pipeline to. If not specified, no data is
|
|
2250
|
+
published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
|
|
2251
|
+
`catalog`."""
|
|
2249
2252
|
|
|
2250
2253
|
trigger: Optional[PipelineTrigger] = None
|
|
2251
2254
|
"""Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
|
|
@@ -3455,7 +3458,7 @@ class PipelinesAPI:
|
|
|
3455
3458
|
Unique identifier for this pipeline.
|
|
3456
3459
|
:param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
|
|
3457
3460
|
The configuration for a managed ingestion pipeline. These settings cannot be used with the
|
|
3458
|
-
'libraries', '
|
|
3461
|
+
'libraries', 'target' or 'catalog' settings.
|
|
3459
3462
|
:param libraries: List[:class:`PipelineLibrary`] (optional)
|
|
3460
3463
|
Libraries or code needed by this deployment.
|
|
3461
3464
|
:param name: str (optional)
|
|
@@ -3473,15 +3476,15 @@ class PipelinesAPI:
|
|
|
3473
3476
|
Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
|
|
3474
3477
|
thrown.
|
|
3475
3478
|
:param schema: str (optional)
|
|
3476
|
-
The default schema (database) where tables are read from or published to.
|
|
3479
|
+
The default schema (database) where tables are read from or published to. The presence of this field
|
|
3480
|
+
implies that the pipeline is in direct publishing mode.
|
|
3477
3481
|
:param serverless: bool (optional)
|
|
3478
3482
|
Whether serverless compute is enabled for this pipeline.
|
|
3479
3483
|
:param storage: str (optional)
|
|
3480
3484
|
DBFS root directory for storing checkpoints and tables.
|
|
3481
3485
|
:param target: str (optional)
|
|
3482
|
-
Target schema (database) to add tables in this pipeline to.
|
|
3483
|
-
|
|
3484
|
-
for pipeline creation in favor of the `schema` field.
|
|
3486
|
+
Target schema (database) to add tables in this pipeline to. If not specified, no data is published
|
|
3487
|
+
to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
|
|
3485
3488
|
:param trigger: :class:`PipelineTrigger` (optional)
|
|
3486
3489
|
Which pipeline trigger to use. Deprecated: Use `continuous` instead.
|
|
3487
3490
|
|
|
@@ -3959,7 +3962,7 @@ class PipelinesAPI:
|
|
|
3959
3962
|
Unique identifier for this pipeline.
|
|
3960
3963
|
:param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
|
|
3961
3964
|
The configuration for a managed ingestion pipeline. These settings cannot be used with the
|
|
3962
|
-
'libraries', '
|
|
3965
|
+
'libraries', 'target' or 'catalog' settings.
|
|
3963
3966
|
:param libraries: List[:class:`PipelineLibrary`] (optional)
|
|
3964
3967
|
Libraries or code needed by this deployment.
|
|
3965
3968
|
:param name: str (optional)
|
|
@@ -3977,15 +3980,15 @@ class PipelinesAPI:
|
|
|
3977
3980
|
Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
|
|
3978
3981
|
thrown.
|
|
3979
3982
|
:param schema: str (optional)
|
|
3980
|
-
The default schema (database) where tables are read from or published to.
|
|
3983
|
+
The default schema (database) where tables are read from or published to. The presence of this field
|
|
3984
|
+
implies that the pipeline is in direct publishing mode.
|
|
3981
3985
|
:param serverless: bool (optional)
|
|
3982
3986
|
Whether serverless compute is enabled for this pipeline.
|
|
3983
3987
|
:param storage: str (optional)
|
|
3984
3988
|
DBFS root directory for storing checkpoints and tables.
|
|
3985
3989
|
:param target: str (optional)
|
|
3986
|
-
Target schema (database) to add tables in this pipeline to.
|
|
3987
|
-
|
|
3988
|
-
for pipeline creation in favor of the `schema` field.
|
|
3990
|
+
Target schema (database) to add tables in this pipeline to. If not specified, no data is published
|
|
3991
|
+
to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
|
|
3989
3992
|
:param trigger: :class:`PipelineTrigger` (optional)
|
|
3990
3993
|
Which pipeline trigger to use. Deprecated: Use `continuous` instead.
|
|
3991
3994
|
|