databricks-sdk 0.46.0__py3-none-any.whl → 0.47.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -1082,6 +1082,7 @@ class MessageErrorType(Enum):
1082
1082
  FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION"
1083
1083
  FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION"
1084
1084
  FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION = "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION"
1085
+ GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION = "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION"
1085
1086
  GENERIC_CHAT_COMPLETION_EXCEPTION = "GENERIC_CHAT_COMPLETION_EXCEPTION"
1086
1087
  GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION = "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION"
1087
1088
  GENERIC_SQL_EXEC_API_CALL_EXCEPTION = "GENERIC_SQL_EXEC_API_CALL_EXCEPTION"
@@ -1096,6 +1097,7 @@ class MessageErrorType(Enum):
1096
1097
  MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION"
1097
1098
  MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION"
1098
1099
  MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION"
1100
+ MISSING_SQL_QUERY_EXCEPTION = "MISSING_SQL_QUERY_EXCEPTION"
1099
1101
  NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE = "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE"
1100
1102
  NO_QUERY_TO_VISUALIZE_EXCEPTION = "NO_QUERY_TO_VISUALIZE_EXCEPTION"
1101
1103
  NO_TABLES_TO_QUERY_EXCEPTION = "NO_TABLES_TO_QUERY_EXCEPTION"
@@ -846,7 +846,7 @@ class ObjectPermissions:
846
846
  @dataclass
847
847
  class PartialUpdate:
848
848
  id: Optional[str] = None
849
- """Unique ID for a user in the Databricks workspace."""
849
+ """Unique ID in the Databricks workspace."""
850
850
 
851
851
  operations: Optional[List[Patch]] = None
852
852
 
@@ -1918,8 +1918,7 @@ class User:
1918
1918
  groups: Optional[List[ComplexValue]] = None
1919
1919
 
1920
1920
  id: Optional[str] = None
1921
- """Databricks user ID. This is automatically set by Databricks. Any value provided by the client
1922
- will be ignored."""
1921
+ """Databricks user ID."""
1923
1922
 
1924
1923
  name: Optional[Name] = None
1925
1924
 
@@ -2480,7 +2479,7 @@ class AccountGroupsAPI:
2480
2479
  Partially updates the details of a group.
2481
2480
 
2482
2481
  :param id: str
2483
- Unique ID for a group in the Databricks account.
2482
+ Unique ID in the Databricks workspace.
2484
2483
  :param operations: List[:class:`Patch`] (optional)
2485
2484
  :param schemas: List[:class:`PatchSchema`] (optional)
2486
2485
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -2493,7 +2492,6 @@ class AccountGroupsAPI:
2493
2492
  if schemas is not None:
2494
2493
  body["schemas"] = [v.value for v in schemas]
2495
2494
  headers = {
2496
- "Accept": "application/json",
2497
2495
  "Content-Type": "application/json",
2498
2496
  }
2499
2497
 
@@ -2557,7 +2555,6 @@ class AccountGroupsAPI:
2557
2555
  if schemas is not None:
2558
2556
  body["schemas"] = [v.value for v in schemas]
2559
2557
  headers = {
2560
- "Accept": "application/json",
2561
2558
  "Content-Type": "application/json",
2562
2559
  }
2563
2560
 
@@ -2765,7 +2762,7 @@ class AccountServicePrincipalsAPI:
2765
2762
  Partially updates the details of a single service principal in the Databricks account.
2766
2763
 
2767
2764
  :param id: str
2768
- Unique ID for a service principal in the Databricks account.
2765
+ Unique ID in the Databricks workspace.
2769
2766
  :param operations: List[:class:`Patch`] (optional)
2770
2767
  :param schemas: List[:class:`PatchSchema`] (optional)
2771
2768
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -2778,7 +2775,6 @@ class AccountServicePrincipalsAPI:
2778
2775
  if schemas is not None:
2779
2776
  body["schemas"] = [v.value for v in schemas]
2780
2777
  headers = {
2781
- "Accept": "application/json",
2782
2778
  "Content-Type": "application/json",
2783
2779
  }
2784
2780
 
@@ -2848,7 +2844,6 @@ class AccountServicePrincipalsAPI:
2848
2844
  if schemas is not None:
2849
2845
  body["schemas"] = [v.value for v in schemas]
2850
2846
  headers = {
2851
- "Accept": "application/json",
2852
2847
  "Content-Type": "application/json",
2853
2848
  }
2854
2849
 
@@ -2912,8 +2907,7 @@ class AccountUsersAPI:
2912
2907
  External ID is not currently supported. It is reserved for future use.
2913
2908
  :param groups: List[:class:`ComplexValue`] (optional)
2914
2909
  :param id: str (optional)
2915
- Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
2916
- be ignored.
2910
+ Databricks user ID.
2917
2911
  :param name: :class:`Name` (optional)
2918
2912
  :param roles: List[:class:`ComplexValue`] (optional)
2919
2913
  Corresponds to AWS instance profile/arn role.
@@ -3123,7 +3117,7 @@ class AccountUsersAPI:
3123
3117
  Partially updates a user resource by applying the supplied operations on specific user attributes.
3124
3118
 
3125
3119
  :param id: str
3126
- Unique ID for a user in the Databricks account.
3120
+ Unique ID in the Databricks workspace.
3127
3121
  :param operations: List[:class:`Patch`] (optional)
3128
3122
  :param schemas: List[:class:`PatchSchema`] (optional)
3129
3123
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -3136,7 +3130,6 @@ class AccountUsersAPI:
3136
3130
  if schemas is not None:
3137
3131
  body["schemas"] = [v.value for v in schemas]
3138
3132
  headers = {
3139
- "Accept": "application/json",
3140
3133
  "Content-Type": "application/json",
3141
3134
  }
3142
3135
 
@@ -3164,8 +3157,7 @@ class AccountUsersAPI:
3164
3157
  Replaces a user's information with the data supplied in request.
3165
3158
 
3166
3159
  :param id: str
3167
- Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
3168
- be ignored.
3160
+ Databricks user ID.
3169
3161
  :param active: bool (optional)
3170
3162
  If this user is active
3171
3163
  :param display_name: str (optional)
@@ -3215,7 +3207,6 @@ class AccountUsersAPI:
3215
3207
  if user_name is not None:
3216
3208
  body["userName"] = user_name
3217
3209
  headers = {
3218
- "Accept": "application/json",
3219
3210
  "Content-Type": "application/json",
3220
3211
  }
3221
3212
 
@@ -3434,7 +3425,7 @@ class GroupsAPI:
3434
3425
  Partially updates the details of a group.
3435
3426
 
3436
3427
  :param id: str
3437
- Unique ID for a group in the Databricks workspace.
3428
+ Unique ID in the Databricks workspace.
3438
3429
  :param operations: List[:class:`Patch`] (optional)
3439
3430
  :param schemas: List[:class:`PatchSchema`] (optional)
3440
3431
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -3447,7 +3438,6 @@ class GroupsAPI:
3447
3438
  if schemas is not None:
3448
3439
  body["schemas"] = [v.value for v in schemas]
3449
3440
  headers = {
3450
- "Accept": "application/json",
3451
3441
  "Content-Type": "application/json",
3452
3442
  }
3453
3443
 
@@ -3509,7 +3499,6 @@ class GroupsAPI:
3509
3499
  if schemas is not None:
3510
3500
  body["schemas"] = [v.value for v in schemas]
3511
3501
  headers = {
3512
- "Accept": "application/json",
3513
3502
  "Content-Type": "application/json",
3514
3503
  }
3515
3504
 
@@ -3922,7 +3911,7 @@ class ServicePrincipalsAPI:
3922
3911
  Partially updates the details of a single service principal in the Databricks workspace.
3923
3912
 
3924
3913
  :param id: str
3925
- Unique ID for a service principal in the Databricks workspace.
3914
+ Unique ID in the Databricks workspace.
3926
3915
  :param operations: List[:class:`Patch`] (optional)
3927
3916
  :param schemas: List[:class:`PatchSchema`] (optional)
3928
3917
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -3935,7 +3924,6 @@ class ServicePrincipalsAPI:
3935
3924
  if schemas is not None:
3936
3925
  body["schemas"] = [v.value for v in schemas]
3937
3926
  headers = {
3938
- "Accept": "application/json",
3939
3927
  "Content-Type": "application/json",
3940
3928
  }
3941
3929
 
@@ -4000,7 +3988,6 @@ class ServicePrincipalsAPI:
4000
3988
  if schemas is not None:
4001
3989
  body["schemas"] = [v.value for v in schemas]
4002
3990
  headers = {
4003
- "Accept": "application/json",
4004
3991
  "Content-Type": "application/json",
4005
3992
  }
4006
3993
 
@@ -4059,8 +4046,7 @@ class UsersAPI:
4059
4046
  External ID is not currently supported. It is reserved for future use.
4060
4047
  :param groups: List[:class:`ComplexValue`] (optional)
4061
4048
  :param id: str (optional)
4062
- Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
4063
- be ignored.
4049
+ Databricks user ID.
4064
4050
  :param name: :class:`Name` (optional)
4065
4051
  :param roles: List[:class:`ComplexValue`] (optional)
4066
4052
  Corresponds to AWS instance profile/arn role.
@@ -4294,7 +4280,7 @@ class UsersAPI:
4294
4280
  Partially updates a user resource by applying the supplied operations on specific user attributes.
4295
4281
 
4296
4282
  :param id: str
4297
- Unique ID for a user in the Databricks workspace.
4283
+ Unique ID in the Databricks workspace.
4298
4284
  :param operations: List[:class:`Patch`] (optional)
4299
4285
  :param schemas: List[:class:`PatchSchema`] (optional)
4300
4286
  The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"].
@@ -4307,7 +4293,6 @@ class UsersAPI:
4307
4293
  if schemas is not None:
4308
4294
  body["schemas"] = [v.value for v in schemas]
4309
4295
  headers = {
4310
- "Accept": "application/json",
4311
4296
  "Content-Type": "application/json",
4312
4297
  }
4313
4298
 
@@ -4356,8 +4341,7 @@ class UsersAPI:
4356
4341
  Replaces a user's information with the data supplied in request.
4357
4342
 
4358
4343
  :param id: str
4359
- Databricks user ID. This is automatically set by Databricks. Any value provided by the client will
4360
- be ignored.
4344
+ Databricks user ID.
4361
4345
  :param active: bool (optional)
4362
4346
  If this user is active
4363
4347
  :param display_name: str (optional)
@@ -4407,7 +4391,6 @@ class UsersAPI:
4407
4391
  if user_name is not None:
4408
4392
  body["userName"] = user_name
4409
4393
  headers = {
4410
- "Accept": "application/json",
4411
4394
  "Content-Type": "application/json",
4412
4395
  }
4413
4396
 
@@ -3659,6 +3659,7 @@ class PerformanceTarget(Enum):
3659
3659
  on serverless compute should be. The performance mode on the job or pipeline should map to a
3660
3660
  performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget)."""
3661
3661
 
3662
+ BALANCED = "BALANCED"
3662
3663
  COST_OPTIMIZED = "COST_OPTIMIZED"
3663
3664
  PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED"
3664
3665
 
@@ -1192,6 +1192,7 @@ class FileParent:
1192
1192
  class FileParentType(Enum):
1193
1193
 
1194
1194
  LISTING = "LISTING"
1195
+ LISTING_RESOURCE = "LISTING_RESOURCE"
1195
1196
  PROVIDER = "PROVIDER"
1196
1197
 
1197
1198
 
@@ -2452,6 +2453,7 @@ class ListingType(Enum):
2452
2453
 
2453
2454
  class MarketplaceFileType(Enum):
2454
2455
 
2456
+ APP = "APP"
2455
2457
  EMBEDDED_NOTEBOOK = "EMBEDDED_NOTEBOOK"
2456
2458
  PROVIDER_ICON = "PROVIDER_ICON"
2457
2459
 
@@ -499,27 +499,19 @@ class CreateForecastingExperimentRequest:
499
499
  time_column: str
500
500
  """Name of the column in the input training table that represents the timestamp of each row."""
501
501
 
502
- data_granularity_unit: str
503
- """The time unit of the input data granularity. Together with data_granularity_quantity field, this
504
- defines the time interval between consecutive rows in the time series data. Possible values: *
505
- 'W' (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' /
506
- 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' /
507
- 'quarter' / 'quarters' * 'Y' / 'year' / 'years'"""
502
+ forecast_granularity: str
503
+ """The granularity of the forecast. This defines the time interval between consecutive rows in the
504
+ time series data. Possible values: '1 second', '1 minute', '5 minutes', '10 minutes', '15
505
+ minutes', '30 minutes', 'Hourly', 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'."""
508
506
 
509
507
  forecast_horizon: int
510
508
  """The number of time steps into the future for which predictions should be made. This value
511
- represents a multiple of data_granularity_unit and data_granularity_quantity determining how far
512
- ahead the model will forecast."""
509
+ represents a multiple of forecast_granularity determining how far ahead the model will forecast."""
513
510
 
514
511
  custom_weights_column: Optional[str] = None
515
512
  """Name of the column in the input training table used to customize the weight for each time series
516
513
  to calculate weighted metrics."""
517
514
 
518
- data_granularity_quantity: Optional[int] = None
519
- """The quantity of the input data granularity. Together with data_granularity_unit field, this
520
- defines the time interval between consecutive rows in the time series data. For now, only 1
521
- second, 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported."""
522
-
523
515
  experiment_path: Optional[str] = None
524
516
  """The path to the created experiment. This is the path where the experiment will be stored in the
525
517
  workspace."""
@@ -560,12 +552,10 @@ class CreateForecastingExperimentRequest:
560
552
  body = {}
561
553
  if self.custom_weights_column is not None:
562
554
  body["custom_weights_column"] = self.custom_weights_column
563
- if self.data_granularity_quantity is not None:
564
- body["data_granularity_quantity"] = self.data_granularity_quantity
565
- if self.data_granularity_unit is not None:
566
- body["data_granularity_unit"] = self.data_granularity_unit
567
555
  if self.experiment_path is not None:
568
556
  body["experiment_path"] = self.experiment_path
557
+ if self.forecast_granularity is not None:
558
+ body["forecast_granularity"] = self.forecast_granularity
569
559
  if self.forecast_horizon is not None:
570
560
  body["forecast_horizon"] = self.forecast_horizon
571
561
  if self.holiday_regions:
@@ -597,12 +587,10 @@ class CreateForecastingExperimentRequest:
597
587
  body = {}
598
588
  if self.custom_weights_column is not None:
599
589
  body["custom_weights_column"] = self.custom_weights_column
600
- if self.data_granularity_quantity is not None:
601
- body["data_granularity_quantity"] = self.data_granularity_quantity
602
- if self.data_granularity_unit is not None:
603
- body["data_granularity_unit"] = self.data_granularity_unit
604
590
  if self.experiment_path is not None:
605
591
  body["experiment_path"] = self.experiment_path
592
+ if self.forecast_granularity is not None:
593
+ body["forecast_granularity"] = self.forecast_granularity
606
594
  if self.forecast_horizon is not None:
607
595
  body["forecast_horizon"] = self.forecast_horizon
608
596
  if self.holiday_regions:
@@ -634,9 +622,8 @@ class CreateForecastingExperimentRequest:
634
622
  """Deserializes the CreateForecastingExperimentRequest from a dictionary."""
635
623
  return cls(
636
624
  custom_weights_column=d.get("custom_weights_column", None),
637
- data_granularity_quantity=d.get("data_granularity_quantity", None),
638
- data_granularity_unit=d.get("data_granularity_unit", None),
639
625
  experiment_path=d.get("experiment_path", None),
626
+ forecast_granularity=d.get("forecast_granularity", None),
640
627
  forecast_horizon=d.get("forecast_horizon", None),
641
628
  holiday_regions=d.get("holiday_regions", None),
642
629
  max_runtime=d.get("max_runtime", None),
@@ -7000,11 +6987,10 @@ class ForecastingAPI:
7000
6987
  train_data_path: str,
7001
6988
  target_column: str,
7002
6989
  time_column: str,
7003
- data_granularity_unit: str,
6990
+ forecast_granularity: str,
7004
6991
  forecast_horizon: int,
7005
6992
  *,
7006
6993
  custom_weights_column: Optional[str] = None,
7007
- data_granularity_quantity: Optional[int] = None,
7008
6994
  experiment_path: Optional[str] = None,
7009
6995
  holiday_regions: Optional[List[str]] = None,
7010
6996
  max_runtime: Optional[int] = None,
@@ -7027,23 +7013,16 @@ class ForecastingAPI:
7027
7013
  this column will be used as the ground truth for model training.
7028
7014
  :param time_column: str
7029
7015
  Name of the column in the input training table that represents the timestamp of each row.
7030
- :param data_granularity_unit: str
7031
- The time unit of the input data granularity. Together with data_granularity_quantity field, this
7032
- defines the time interval between consecutive rows in the time series data. Possible values: * 'W'
7033
- (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes'
7034
- / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' / 'quarter' / 'quarters'
7035
- * 'Y' / 'year' / 'years'
7016
+ :param forecast_granularity: str
7017
+ The granularity of the forecast. This defines the time interval between consecutive rows in the time
7018
+ series data. Possible values: '1 second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30
7019
+ minutes', 'Hourly', 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'.
7036
7020
  :param forecast_horizon: int
7037
7021
  The number of time steps into the future for which predictions should be made. This value represents
7038
- a multiple of data_granularity_unit and data_granularity_quantity determining how far ahead the
7039
- model will forecast.
7022
+ a multiple of forecast_granularity determining how far ahead the model will forecast.
7040
7023
  :param custom_weights_column: str (optional)
7041
7024
  Name of the column in the input training table used to customize the weight for each time series to
7042
7025
  calculate weighted metrics.
7043
- :param data_granularity_quantity: int (optional)
7044
- The quantity of the input data granularity. Together with data_granularity_unit field, this defines
7045
- the time interval between consecutive rows in the time series data. For now, only 1 second,
7046
- 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported.
7047
7026
  :param experiment_path: str (optional)
7048
7027
  The path to the created experiment. This is the path where the experiment will be stored in the
7049
7028
  workspace.
@@ -7078,12 +7057,10 @@ class ForecastingAPI:
7078
7057
  body = {}
7079
7058
  if custom_weights_column is not None:
7080
7059
  body["custom_weights_column"] = custom_weights_column
7081
- if data_granularity_quantity is not None:
7082
- body["data_granularity_quantity"] = data_granularity_quantity
7083
- if data_granularity_unit is not None:
7084
- body["data_granularity_unit"] = data_granularity_unit
7085
7060
  if experiment_path is not None:
7086
7061
  body["experiment_path"] = experiment_path
7062
+ if forecast_granularity is not None:
7063
+ body["forecast_granularity"] = forecast_granularity
7087
7064
  if forecast_horizon is not None:
7088
7065
  body["forecast_horizon"] = forecast_horizon
7089
7066
  if holiday_regions is not None:
@@ -7125,11 +7102,10 @@ class ForecastingAPI:
7125
7102
  train_data_path: str,
7126
7103
  target_column: str,
7127
7104
  time_column: str,
7128
- data_granularity_unit: str,
7105
+ forecast_granularity: str,
7129
7106
  forecast_horizon: int,
7130
7107
  *,
7131
7108
  custom_weights_column: Optional[str] = None,
7132
- data_granularity_quantity: Optional[int] = None,
7133
7109
  experiment_path: Optional[str] = None,
7134
7110
  holiday_regions: Optional[List[str]] = None,
7135
7111
  max_runtime: Optional[int] = None,
@@ -7143,9 +7119,8 @@ class ForecastingAPI:
7143
7119
  ) -> ForecastingExperiment:
7144
7120
  return self.create_experiment(
7145
7121
  custom_weights_column=custom_weights_column,
7146
- data_granularity_quantity=data_granularity_quantity,
7147
- data_granularity_unit=data_granularity_unit,
7148
7122
  experiment_path=experiment_path,
7123
+ forecast_granularity=forecast_granularity,
7149
7124
  forecast_horizon=forecast_horizon,
7150
7125
  holiday_regions=holiday_regions,
7151
7126
  max_runtime=max_runtime,
@@ -776,6 +776,13 @@ class OidcFederationPolicy:
776
776
  endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
777
777
  discovering public keys."""
778
778
 
779
+ jwks_uri: Optional[str] = None
780
+ """URL of the public keys used to validate the signature of federated tokens, in JWKS format. Most
781
+ use cases should not need to specify this field. If jwks_uri and jwks_json are both unspecified
782
+ (recommended), Databricks automatically fetches the public keys from your issuer’s well known
783
+ endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for
784
+ discovering public keys."""
785
+
779
786
  subject: Optional[str] = None
780
787
  """The required token subject, as specified in the subject claim of federated tokens. Must be
781
788
  specified for service principal federation policies. Must not be specified for account
@@ -793,6 +800,8 @@ class OidcFederationPolicy:
793
800
  body["issuer"] = self.issuer
794
801
  if self.jwks_json is not None:
795
802
  body["jwks_json"] = self.jwks_json
803
+ if self.jwks_uri is not None:
804
+ body["jwks_uri"] = self.jwks_uri
796
805
  if self.subject is not None:
797
806
  body["subject"] = self.subject
798
807
  if self.subject_claim is not None:
@@ -808,6 +817,8 @@ class OidcFederationPolicy:
808
817
  body["issuer"] = self.issuer
809
818
  if self.jwks_json is not None:
810
819
  body["jwks_json"] = self.jwks_json
820
+ if self.jwks_uri is not None:
821
+ body["jwks_uri"] = self.jwks_uri
811
822
  if self.subject is not None:
812
823
  body["subject"] = self.subject
813
824
  if self.subject_claim is not None:
@@ -821,6 +832,7 @@ class OidcFederationPolicy:
821
832
  audiences=d.get("audiences", None),
822
833
  issuer=d.get("issuer", None),
823
834
  jwks_json=d.get("jwks_json", None),
835
+ jwks_uri=d.get("jwks_uri", None),
824
836
  subject=d.get("subject", None),
825
837
  subject_claim=d.get("subject_claim", None),
826
838
  )
@@ -69,7 +69,7 @@ class CreatePipeline:
69
69
 
70
70
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
71
71
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
72
- 'libraries', 'target' or 'catalog' settings."""
72
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
73
73
 
74
74
  libraries: Optional[List[PipelineLibrary]] = None
75
75
  """Libraries or code needed by this deployment."""
@@ -95,8 +95,7 @@ class CreatePipeline:
95
95
  is thrown."""
96
96
 
97
97
  schema: Optional[str] = None
98
- """The default schema (database) where tables are read from or published to. The presence of this
99
- field implies that the pipeline is in direct publishing mode."""
98
+ """The default schema (database) where tables are read from or published to."""
100
99
 
101
100
  serverless: Optional[bool] = None
102
101
  """Whether serverless compute is enabled for this pipeline."""
@@ -105,9 +104,9 @@ class CreatePipeline:
105
104
  """DBFS root directory for storing checkpoints and tables."""
106
105
 
107
106
  target: Optional[str] = None
108
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
109
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
110
- `catalog`."""
107
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
108
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
109
+ deprecated for pipeline creation in favor of the `schema` field."""
111
110
 
112
111
  trigger: Optional[PipelineTrigger] = None
113
112
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -443,7 +442,7 @@ class EditPipeline:
443
442
 
444
443
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
445
444
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
446
- 'libraries', 'target' or 'catalog' settings."""
445
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
447
446
 
448
447
  libraries: Optional[List[PipelineLibrary]] = None
449
448
  """Libraries or code needed by this deployment."""
@@ -472,8 +471,7 @@ class EditPipeline:
472
471
  is thrown."""
473
472
 
474
473
  schema: Optional[str] = None
475
- """The default schema (database) where tables are read from or published to. The presence of this
476
- field implies that the pipeline is in direct publishing mode."""
474
+ """The default schema (database) where tables are read from or published to."""
477
475
 
478
476
  serverless: Optional[bool] = None
479
477
  """Whether serverless compute is enabled for this pipeline."""
@@ -482,9 +480,9 @@ class EditPipeline:
482
480
  """DBFS root directory for storing checkpoints and tables."""
483
481
 
484
482
  target: Optional[str] = None
485
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
486
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
487
- `catalog`."""
483
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
484
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
485
+ deprecated for pipeline creation in favor of the `schema` field."""
488
486
 
489
487
  trigger: Optional[PipelineTrigger] = None
490
488
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -2218,7 +2216,7 @@ class PipelineSpec:
2218
2216
 
2219
2217
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
2220
2218
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
2221
- 'libraries', 'target' or 'catalog' settings."""
2219
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
2222
2220
 
2223
2221
  libraries: Optional[List[PipelineLibrary]] = None
2224
2222
  """Libraries or code needed by this deployment."""
@@ -2236,8 +2234,7 @@ class PipelineSpec:
2236
2234
  """Restart window of this pipeline."""
2237
2235
 
2238
2236
  schema: Optional[str] = None
2239
- """The default schema (database) where tables are read from or published to. The presence of this
2240
- field implies that the pipeline is in direct publishing mode."""
2237
+ """The default schema (database) where tables are read from or published to."""
2241
2238
 
2242
2239
  serverless: Optional[bool] = None
2243
2240
  """Whether serverless compute is enabled for this pipeline."""
@@ -2246,9 +2243,9 @@ class PipelineSpec:
2246
2243
  """DBFS root directory for storing checkpoints and tables."""
2247
2244
 
2248
2245
  target: Optional[str] = None
2249
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
2250
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
2251
- `catalog`."""
2246
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
2247
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
2248
+ deprecated for pipeline creation in favor of the `schema` field."""
2252
2249
 
2253
2250
  trigger: Optional[PipelineTrigger] = None
2254
2251
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -3458,7 +3455,7 @@ class PipelinesAPI:
3458
3455
  Unique identifier for this pipeline.
3459
3456
  :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
3460
3457
  The configuration for a managed ingestion pipeline. These settings cannot be used with the
3461
- 'libraries', 'target' or 'catalog' settings.
3458
+ 'libraries', 'schema', 'target', or 'catalog' settings.
3462
3459
  :param libraries: List[:class:`PipelineLibrary`] (optional)
3463
3460
  Libraries or code needed by this deployment.
3464
3461
  :param name: str (optional)
@@ -3476,15 +3473,15 @@ class PipelinesAPI:
3476
3473
  Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
3477
3474
  thrown.
3478
3475
  :param schema: str (optional)
3479
- The default schema (database) where tables are read from or published to. The presence of this field
3480
- implies that the pipeline is in direct publishing mode.
3476
+ The default schema (database) where tables are read from or published to.
3481
3477
  :param serverless: bool (optional)
3482
3478
  Whether serverless compute is enabled for this pipeline.
3483
3479
  :param storage: str (optional)
3484
3480
  DBFS root directory for storing checkpoints and tables.
3485
3481
  :param target: str (optional)
3486
- Target schema (database) to add tables in this pipeline to. If not specified, no data is published
3487
- to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
3482
+ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must
3483
+ be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated
3484
+ for pipeline creation in favor of the `schema` field.
3488
3485
  :param trigger: :class:`PipelineTrigger` (optional)
3489
3486
  Which pipeline trigger to use. Deprecated: Use `continuous` instead.
3490
3487
 
@@ -3962,7 +3959,7 @@ class PipelinesAPI:
3962
3959
  Unique identifier for this pipeline.
3963
3960
  :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
3964
3961
  The configuration for a managed ingestion pipeline. These settings cannot be used with the
3965
- 'libraries', 'target' or 'catalog' settings.
3962
+ 'libraries', 'schema', 'target', or 'catalog' settings.
3966
3963
  :param libraries: List[:class:`PipelineLibrary`] (optional)
3967
3964
  Libraries or code needed by this deployment.
3968
3965
  :param name: str (optional)
@@ -3980,15 +3977,15 @@ class PipelinesAPI:
3980
3977
  Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
3981
3978
  thrown.
3982
3979
  :param schema: str (optional)
3983
- The default schema (database) where tables are read from or published to. The presence of this field
3984
- implies that the pipeline is in direct publishing mode.
3980
+ The default schema (database) where tables are read from or published to.
3985
3981
  :param serverless: bool (optional)
3986
3982
  Whether serverless compute is enabled for this pipeline.
3987
3983
  :param storage: str (optional)
3988
3984
  DBFS root directory for storing checkpoints and tables.
3989
3985
  :param target: str (optional)
3990
- Target schema (database) to add tables in this pipeline to. If not specified, no data is published
3991
- to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
3986
+ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must
3987
+ be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated
3988
+ for pipeline creation in favor of the `schema` field.
3992
3989
  :param trigger: :class:`PipelineTrigger` (optional)
3993
3990
  Which pipeline trigger to use. Deprecated: Use `continuous` instead.
3994
3991