databricks-sdk 0.45.0__py3-none-any.whl → 0.47.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -3,11 +3,15 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
+ import random
7
+ import time
6
8
  from dataclasses import dataclass
9
+ from datetime import timedelta
7
10
  from enum import Enum
8
- from typing import Any, Dict, Iterator, List, Optional
11
+ from typing import Any, Callable, Dict, Iterator, List, Optional
9
12
 
10
- from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum
13
+ from ..errors import OperationFailed
14
+ from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum
11
15
 
12
16
  _LOG = logging.getLogger("databricks.sdk")
13
17
 
@@ -482,6 +486,184 @@ class CreateExperimentResponse:
482
486
  return cls(experiment_id=d.get("experiment_id", None))
483
487
 
484
488
 
489
+ @dataclass
490
+ class CreateForecastingExperimentRequest:
491
+ train_data_path: str
492
+ """The three-level (fully qualified) name of a unity catalog table. This table serves as the
493
+ training data for the forecasting model."""
494
+
495
+ target_column: str
496
+ """Name of the column in the input training table that serves as the prediction target. The values
497
+ in this column will be used as the ground truth for model training."""
498
+
499
+ time_column: str
500
+ """Name of the column in the input training table that represents the timestamp of each row."""
501
+
502
+ forecast_granularity: str
503
+ """The granularity of the forecast. This defines the time interval between consecutive rows in the
504
+ time series data. Possible values: '1 second', '1 minute', '5 minutes', '10 minutes', '15
505
+ minutes', '30 minutes', 'Hourly', 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'."""
506
+
507
+ forecast_horizon: int
508
+ """The number of time steps into the future for which predictions should be made. This value
509
+ represents a multiple of forecast_granularity determining how far ahead the model will forecast."""
510
+
511
+ custom_weights_column: Optional[str] = None
512
+ """Name of the column in the input training table used to customize the weight for each time series
513
+ to calculate weighted metrics."""
514
+
515
+ experiment_path: Optional[str] = None
516
+ """The path to the created experiment. This is the path where the experiment will be stored in the
517
+ workspace."""
518
+
519
+ holiday_regions: Optional[List[str]] = None
520
+ """Region code(s) to consider when automatically adding holiday features. When empty, no holiday
521
+ features are added. Only supports 1 holiday region for now."""
522
+
523
+ max_runtime: Optional[int] = None
524
+ """The maximum duration in minutes for which the experiment is allowed to run. If the experiment
525
+ exceeds this time limit it will be stopped automatically."""
526
+
527
+ prediction_data_path: Optional[str] = None
528
+ """The three-level (fully qualified) path to a unity catalog table. This table path serves to store
529
+ the predictions."""
530
+
531
+ primary_metric: Optional[str] = None
532
+ """The evaluation metric used to optimize the forecasting model."""
533
+
534
+ register_to: Optional[str] = None
535
+ """The three-level (fully qualified) path to a unity catalog model. This model path serves to store
536
+ the best model."""
537
+
538
+ split_column: Optional[str] = None
539
+ """Name of the column in the input training table used for custom data splits. The values in this
540
+ column must be "train", "validate", or "test" to indicate which split each row belongs to."""
541
+
542
+ timeseries_identifier_columns: Optional[List[str]] = None
543
+ """Name of the column in the input training table used to group the dataset to predict individual
544
+ time series"""
545
+
546
+ training_frameworks: Optional[List[str]] = None
547
+ """The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA',
548
+ 'DeepAR'. An empty list will include all supported frameworks."""
549
+
550
+ def as_dict(self) -> dict:
551
+ """Serializes the CreateForecastingExperimentRequest into a dictionary suitable for use as a JSON request body."""
552
+ body = {}
553
+ if self.custom_weights_column is not None:
554
+ body["custom_weights_column"] = self.custom_weights_column
555
+ if self.experiment_path is not None:
556
+ body["experiment_path"] = self.experiment_path
557
+ if self.forecast_granularity is not None:
558
+ body["forecast_granularity"] = self.forecast_granularity
559
+ if self.forecast_horizon is not None:
560
+ body["forecast_horizon"] = self.forecast_horizon
561
+ if self.holiday_regions:
562
+ body["holiday_regions"] = [v for v in self.holiday_regions]
563
+ if self.max_runtime is not None:
564
+ body["max_runtime"] = self.max_runtime
565
+ if self.prediction_data_path is not None:
566
+ body["prediction_data_path"] = self.prediction_data_path
567
+ if self.primary_metric is not None:
568
+ body["primary_metric"] = self.primary_metric
569
+ if self.register_to is not None:
570
+ body["register_to"] = self.register_to
571
+ if self.split_column is not None:
572
+ body["split_column"] = self.split_column
573
+ if self.target_column is not None:
574
+ body["target_column"] = self.target_column
575
+ if self.time_column is not None:
576
+ body["time_column"] = self.time_column
577
+ if self.timeseries_identifier_columns:
578
+ body["timeseries_identifier_columns"] = [v for v in self.timeseries_identifier_columns]
579
+ if self.train_data_path is not None:
580
+ body["train_data_path"] = self.train_data_path
581
+ if self.training_frameworks:
582
+ body["training_frameworks"] = [v for v in self.training_frameworks]
583
+ return body
584
+
585
+ def as_shallow_dict(self) -> dict:
586
+ """Serializes the CreateForecastingExperimentRequest into a shallow dictionary of its immediate attributes."""
587
+ body = {}
588
+ if self.custom_weights_column is not None:
589
+ body["custom_weights_column"] = self.custom_weights_column
590
+ if self.experiment_path is not None:
591
+ body["experiment_path"] = self.experiment_path
592
+ if self.forecast_granularity is not None:
593
+ body["forecast_granularity"] = self.forecast_granularity
594
+ if self.forecast_horizon is not None:
595
+ body["forecast_horizon"] = self.forecast_horizon
596
+ if self.holiday_regions:
597
+ body["holiday_regions"] = self.holiday_regions
598
+ if self.max_runtime is not None:
599
+ body["max_runtime"] = self.max_runtime
600
+ if self.prediction_data_path is not None:
601
+ body["prediction_data_path"] = self.prediction_data_path
602
+ if self.primary_metric is not None:
603
+ body["primary_metric"] = self.primary_metric
604
+ if self.register_to is not None:
605
+ body["register_to"] = self.register_to
606
+ if self.split_column is not None:
607
+ body["split_column"] = self.split_column
608
+ if self.target_column is not None:
609
+ body["target_column"] = self.target_column
610
+ if self.time_column is not None:
611
+ body["time_column"] = self.time_column
612
+ if self.timeseries_identifier_columns:
613
+ body["timeseries_identifier_columns"] = self.timeseries_identifier_columns
614
+ if self.train_data_path is not None:
615
+ body["train_data_path"] = self.train_data_path
616
+ if self.training_frameworks:
617
+ body["training_frameworks"] = self.training_frameworks
618
+ return body
619
+
620
+ @classmethod
621
+ def from_dict(cls, d: Dict[str, Any]) -> CreateForecastingExperimentRequest:
622
+ """Deserializes the CreateForecastingExperimentRequest from a dictionary."""
623
+ return cls(
624
+ custom_weights_column=d.get("custom_weights_column", None),
625
+ experiment_path=d.get("experiment_path", None),
626
+ forecast_granularity=d.get("forecast_granularity", None),
627
+ forecast_horizon=d.get("forecast_horizon", None),
628
+ holiday_regions=d.get("holiday_regions", None),
629
+ max_runtime=d.get("max_runtime", None),
630
+ prediction_data_path=d.get("prediction_data_path", None),
631
+ primary_metric=d.get("primary_metric", None),
632
+ register_to=d.get("register_to", None),
633
+ split_column=d.get("split_column", None),
634
+ target_column=d.get("target_column", None),
635
+ time_column=d.get("time_column", None),
636
+ timeseries_identifier_columns=d.get("timeseries_identifier_columns", None),
637
+ train_data_path=d.get("train_data_path", None),
638
+ training_frameworks=d.get("training_frameworks", None),
639
+ )
640
+
641
+
642
+ @dataclass
643
+ class CreateForecastingExperimentResponse:
644
+ experiment_id: Optional[str] = None
645
+ """The unique ID of the created forecasting experiment"""
646
+
647
+ def as_dict(self) -> dict:
648
+ """Serializes the CreateForecastingExperimentResponse into a dictionary suitable for use as a JSON request body."""
649
+ body = {}
650
+ if self.experiment_id is not None:
651
+ body["experiment_id"] = self.experiment_id
652
+ return body
653
+
654
+ def as_shallow_dict(self) -> dict:
655
+ """Serializes the CreateForecastingExperimentResponse into a shallow dictionary of its immediate attributes."""
656
+ body = {}
657
+ if self.experiment_id is not None:
658
+ body["experiment_id"] = self.experiment_id
659
+ return body
660
+
661
+ @classmethod
662
+ def from_dict(cls, d: Dict[str, Any]) -> CreateForecastingExperimentResponse:
663
+ """Deserializes the CreateForecastingExperimentResponse from a dictionary."""
664
+ return cls(experiment_id=d.get("experiment_id", None))
665
+
666
+
485
667
  @dataclass
486
668
  class CreateModelRequest:
487
669
  name: str
@@ -1800,6 +1982,60 @@ class FileInfo:
1800
1982
  return cls(file_size=d.get("file_size", None), is_dir=d.get("is_dir", None), path=d.get("path", None))
1801
1983
 
1802
1984
 
1985
+ @dataclass
1986
+ class ForecastingExperiment:
1987
+ """Represents a forecasting experiment with its unique identifier, URL, and state."""
1988
+
1989
+ experiment_id: Optional[str] = None
1990
+ """The unique ID for the forecasting experiment."""
1991
+
1992
+ experiment_page_url: Optional[str] = None
1993
+ """The URL to the forecasting experiment page."""
1994
+
1995
+ state: Optional[ForecastingExperimentState] = None
1996
+ """The current state of the forecasting experiment."""
1997
+
1998
+ def as_dict(self) -> dict:
1999
+ """Serializes the ForecastingExperiment into a dictionary suitable for use as a JSON request body."""
2000
+ body = {}
2001
+ if self.experiment_id is not None:
2002
+ body["experiment_id"] = self.experiment_id
2003
+ if self.experiment_page_url is not None:
2004
+ body["experiment_page_url"] = self.experiment_page_url
2005
+ if self.state is not None:
2006
+ body["state"] = self.state.value
2007
+ return body
2008
+
2009
+ def as_shallow_dict(self) -> dict:
2010
+ """Serializes the ForecastingExperiment into a shallow dictionary of its immediate attributes."""
2011
+ body = {}
2012
+ if self.experiment_id is not None:
2013
+ body["experiment_id"] = self.experiment_id
2014
+ if self.experiment_page_url is not None:
2015
+ body["experiment_page_url"] = self.experiment_page_url
2016
+ if self.state is not None:
2017
+ body["state"] = self.state
2018
+ return body
2019
+
2020
+ @classmethod
2021
+ def from_dict(cls, d: Dict[str, Any]) -> ForecastingExperiment:
2022
+ """Deserializes the ForecastingExperiment from a dictionary."""
2023
+ return cls(
2024
+ experiment_id=d.get("experiment_id", None),
2025
+ experiment_page_url=d.get("experiment_page_url", None),
2026
+ state=_enum(d, "state", ForecastingExperimentState),
2027
+ )
2028
+
2029
+
2030
+ class ForecastingExperimentState(Enum):
2031
+
2032
+ CANCELLED = "CANCELLED"
2033
+ FAILED = "FAILED"
2034
+ PENDING = "PENDING"
2035
+ RUNNING = "RUNNING"
2036
+ SUCCEEDED = "SUCCEEDED"
2037
+
2038
+
1803
2039
  @dataclass
1804
2040
  class GetExperimentByNameResponse:
1805
2041
  experiment: Optional[Experiment] = None
@@ -6705,6 +6941,219 @@ class ExperimentsAPI:
6705
6941
  return UpdateRunResponse.from_dict(res)
6706
6942
 
6707
6943
 
6944
+ class ForecastingAPI:
6945
+ """The Forecasting API allows you to create and get serverless forecasting experiments"""
6946
+
6947
+ def __init__(self, api_client):
6948
+ self._api = api_client
6949
+
6950
+ def wait_get_experiment_forecasting_succeeded(
6951
+ self,
6952
+ experiment_id: str,
6953
+ timeout=timedelta(minutes=120),
6954
+ callback: Optional[Callable[[ForecastingExperiment], None]] = None,
6955
+ ) -> ForecastingExperiment:
6956
+ deadline = time.time() + timeout.total_seconds()
6957
+ target_states = (ForecastingExperimentState.SUCCEEDED,)
6958
+ failure_states = (
6959
+ ForecastingExperimentState.FAILED,
6960
+ ForecastingExperimentState.CANCELLED,
6961
+ )
6962
+ status_message = "polling..."
6963
+ attempt = 1
6964
+ while time.time() < deadline:
6965
+ poll = self.get_experiment(experiment_id=experiment_id)
6966
+ status = poll.state
6967
+ status_message = f"current status: {status}"
6968
+ if status in target_states:
6969
+ return poll
6970
+ if callback:
6971
+ callback(poll)
6972
+ if status in failure_states:
6973
+ msg = f"failed to reach SUCCEEDED, got {status}: {status_message}"
6974
+ raise OperationFailed(msg)
6975
+ prefix = f"experiment_id={experiment_id}"
6976
+ sleep = attempt
6977
+ if sleep > 10:
6978
+ # sleep 10s max per attempt
6979
+ sleep = 10
6980
+ _LOG.debug(f"{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)")
6981
+ time.sleep(sleep + random.random())
6982
+ attempt += 1
6983
+ raise TimeoutError(f"timed out after {timeout}: {status_message}")
6984
+
6985
+ def create_experiment(
6986
+ self,
6987
+ train_data_path: str,
6988
+ target_column: str,
6989
+ time_column: str,
6990
+ forecast_granularity: str,
6991
+ forecast_horizon: int,
6992
+ *,
6993
+ custom_weights_column: Optional[str] = None,
6994
+ experiment_path: Optional[str] = None,
6995
+ holiday_regions: Optional[List[str]] = None,
6996
+ max_runtime: Optional[int] = None,
6997
+ prediction_data_path: Optional[str] = None,
6998
+ primary_metric: Optional[str] = None,
6999
+ register_to: Optional[str] = None,
7000
+ split_column: Optional[str] = None,
7001
+ timeseries_identifier_columns: Optional[List[str]] = None,
7002
+ training_frameworks: Optional[List[str]] = None,
7003
+ ) -> Wait[ForecastingExperiment]:
7004
+ """Create a forecasting experiment.
7005
+
7006
+ Creates a serverless forecasting experiment. Returns the experiment ID.
7007
+
7008
+ :param train_data_path: str
7009
+ The three-level (fully qualified) name of a unity catalog table. This table serves as the training
7010
+ data for the forecasting model.
7011
+ :param target_column: str
7012
+ Name of the column in the input training table that serves as the prediction target. The values in
7013
+ this column will be used as the ground truth for model training.
7014
+ :param time_column: str
7015
+ Name of the column in the input training table that represents the timestamp of each row.
7016
+ :param forecast_granularity: str
7017
+ The granularity of the forecast. This defines the time interval between consecutive rows in the time
7018
+ series data. Possible values: '1 second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30
7019
+ minutes', 'Hourly', 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'.
7020
+ :param forecast_horizon: int
7021
+ The number of time steps into the future for which predictions should be made. This value represents
7022
+ a multiple of forecast_granularity determining how far ahead the model will forecast.
7023
+ :param custom_weights_column: str (optional)
7024
+ Name of the column in the input training table used to customize the weight for each time series to
7025
+ calculate weighted metrics.
7026
+ :param experiment_path: str (optional)
7027
+ The path to the created experiment. This is the path where the experiment will be stored in the
7028
+ workspace.
7029
+ :param holiday_regions: List[str] (optional)
7030
+ Region code(s) to consider when automatically adding holiday features. When empty, no holiday
7031
+ features are added. Only supports 1 holiday region for now.
7032
+ :param max_runtime: int (optional)
7033
+ The maximum duration in minutes for which the experiment is allowed to run. If the experiment
7034
+ exceeds this time limit it will be stopped automatically.
7035
+ :param prediction_data_path: str (optional)
7036
+ The three-level (fully qualified) path to a unity catalog table. This table path serves to store the
7037
+ predictions.
7038
+ :param primary_metric: str (optional)
7039
+ The evaluation metric used to optimize the forecasting model.
7040
+ :param register_to: str (optional)
7041
+ The three-level (fully qualified) path to a unity catalog model. This model path serves to store the
7042
+ best model.
7043
+ :param split_column: str (optional)
7044
+ Name of the column in the input training table used for custom data splits. The values in this
7045
+ column must be "train", "validate", or "test" to indicate which split each row belongs to.
7046
+ :param timeseries_identifier_columns: List[str] (optional)
7047
+ Name of the column in the input training table used to group the dataset to predict individual time
7048
+ series
7049
+ :param training_frameworks: List[str] (optional)
7050
+ The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA', 'DeepAR'.
7051
+ An empty list will include all supported frameworks.
7052
+
7053
+ :returns:
7054
+ Long-running operation waiter for :class:`ForecastingExperiment`.
7055
+ See :method:wait_get_experiment_forecasting_succeeded for more details.
7056
+ """
7057
+ body = {}
7058
+ if custom_weights_column is not None:
7059
+ body["custom_weights_column"] = custom_weights_column
7060
+ if experiment_path is not None:
7061
+ body["experiment_path"] = experiment_path
7062
+ if forecast_granularity is not None:
7063
+ body["forecast_granularity"] = forecast_granularity
7064
+ if forecast_horizon is not None:
7065
+ body["forecast_horizon"] = forecast_horizon
7066
+ if holiday_regions is not None:
7067
+ body["holiday_regions"] = [v for v in holiday_regions]
7068
+ if max_runtime is not None:
7069
+ body["max_runtime"] = max_runtime
7070
+ if prediction_data_path is not None:
7071
+ body["prediction_data_path"] = prediction_data_path
7072
+ if primary_metric is not None:
7073
+ body["primary_metric"] = primary_metric
7074
+ if register_to is not None:
7075
+ body["register_to"] = register_to
7076
+ if split_column is not None:
7077
+ body["split_column"] = split_column
7078
+ if target_column is not None:
7079
+ body["target_column"] = target_column
7080
+ if time_column is not None:
7081
+ body["time_column"] = time_column
7082
+ if timeseries_identifier_columns is not None:
7083
+ body["timeseries_identifier_columns"] = [v for v in timeseries_identifier_columns]
7084
+ if train_data_path is not None:
7085
+ body["train_data_path"] = train_data_path
7086
+ if training_frameworks is not None:
7087
+ body["training_frameworks"] = [v for v in training_frameworks]
7088
+ headers = {
7089
+ "Accept": "application/json",
7090
+ "Content-Type": "application/json",
7091
+ }
7092
+
7093
+ op_response = self._api.do("POST", "/api/2.0/automl/create-forecasting-experiment", body=body, headers=headers)
7094
+ return Wait(
7095
+ self.wait_get_experiment_forecasting_succeeded,
7096
+ response=CreateForecastingExperimentResponse.from_dict(op_response),
7097
+ experiment_id=op_response["experiment_id"],
7098
+ )
7099
+
7100
+ def create_experiment_and_wait(
7101
+ self,
7102
+ train_data_path: str,
7103
+ target_column: str,
7104
+ time_column: str,
7105
+ forecast_granularity: str,
7106
+ forecast_horizon: int,
7107
+ *,
7108
+ custom_weights_column: Optional[str] = None,
7109
+ experiment_path: Optional[str] = None,
7110
+ holiday_regions: Optional[List[str]] = None,
7111
+ max_runtime: Optional[int] = None,
7112
+ prediction_data_path: Optional[str] = None,
7113
+ primary_metric: Optional[str] = None,
7114
+ register_to: Optional[str] = None,
7115
+ split_column: Optional[str] = None,
7116
+ timeseries_identifier_columns: Optional[List[str]] = None,
7117
+ training_frameworks: Optional[List[str]] = None,
7118
+ timeout=timedelta(minutes=120),
7119
+ ) -> ForecastingExperiment:
7120
+ return self.create_experiment(
7121
+ custom_weights_column=custom_weights_column,
7122
+ experiment_path=experiment_path,
7123
+ forecast_granularity=forecast_granularity,
7124
+ forecast_horizon=forecast_horizon,
7125
+ holiday_regions=holiday_regions,
7126
+ max_runtime=max_runtime,
7127
+ prediction_data_path=prediction_data_path,
7128
+ primary_metric=primary_metric,
7129
+ register_to=register_to,
7130
+ split_column=split_column,
7131
+ target_column=target_column,
7132
+ time_column=time_column,
7133
+ timeseries_identifier_columns=timeseries_identifier_columns,
7134
+ train_data_path=train_data_path,
7135
+ training_frameworks=training_frameworks,
7136
+ ).result(timeout=timeout)
7137
+
7138
+ def get_experiment(self, experiment_id: str) -> ForecastingExperiment:
7139
+ """Get a forecasting experiment.
7140
+
7141
+ Public RPC to get forecasting experiment
7142
+
7143
+ :param experiment_id: str
7144
+ The unique ID of a forecasting experiment
7145
+
7146
+ :returns: :class:`ForecastingExperiment`
7147
+ """
7148
+
7149
+ headers = {
7150
+ "Accept": "application/json",
7151
+ }
7152
+
7153
+ res = self._api.do("GET", f"/api/2.0/automl/get-forecasting-experiment/{experiment_id}", headers=headers)
7154
+ return ForecastingExperiment.from_dict(res)
7155
+
7156
+
6708
7157
  class ModelRegistryAPI:
6709
7158
  """Note: This API reference documents APIs for the Workspace Model Registry. Databricks recommends using
6710
7159
  [Models in Unity Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog provides
@@ -69,7 +69,7 @@ class CreatePipeline:
69
69
 
70
70
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
71
71
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
72
- 'libraries', 'target' or 'catalog' settings."""
72
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
73
73
 
74
74
  libraries: Optional[List[PipelineLibrary]] = None
75
75
  """Libraries or code needed by this deployment."""
@@ -95,8 +95,7 @@ class CreatePipeline:
95
95
  is thrown."""
96
96
 
97
97
  schema: Optional[str] = None
98
- """The default schema (database) where tables are read from or published to. The presence of this
99
- field implies that the pipeline is in direct publishing mode."""
98
+ """The default schema (database) where tables are read from or published to."""
100
99
 
101
100
  serverless: Optional[bool] = None
102
101
  """Whether serverless compute is enabled for this pipeline."""
@@ -105,9 +104,9 @@ class CreatePipeline:
105
104
  """DBFS root directory for storing checkpoints and tables."""
106
105
 
107
106
  target: Optional[str] = None
108
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
109
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
110
- `catalog`."""
107
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
108
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
109
+ deprecated for pipeline creation in favor of the `schema` field."""
111
110
 
112
111
  trigger: Optional[PipelineTrigger] = None
113
112
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -443,7 +442,7 @@ class EditPipeline:
443
442
 
444
443
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
445
444
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
446
- 'libraries', 'target' or 'catalog' settings."""
445
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
447
446
 
448
447
  libraries: Optional[List[PipelineLibrary]] = None
449
448
  """Libraries or code needed by this deployment."""
@@ -472,8 +471,7 @@ class EditPipeline:
472
471
  is thrown."""
473
472
 
474
473
  schema: Optional[str] = None
475
- """The default schema (database) where tables are read from or published to. The presence of this
476
- field implies that the pipeline is in direct publishing mode."""
474
+ """The default schema (database) where tables are read from or published to."""
477
475
 
478
476
  serverless: Optional[bool] = None
479
477
  """Whether serverless compute is enabled for this pipeline."""
@@ -482,9 +480,9 @@ class EditPipeline:
482
480
  """DBFS root directory for storing checkpoints and tables."""
483
481
 
484
482
  target: Optional[str] = None
485
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
486
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
487
- `catalog`."""
483
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
484
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
485
+ deprecated for pipeline creation in favor of the `schema` field."""
488
486
 
489
487
  trigger: Optional[PipelineTrigger] = None
490
488
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -2218,7 +2216,7 @@ class PipelineSpec:
2218
2216
 
2219
2217
  ingestion_definition: Optional[IngestionPipelineDefinition] = None
2220
2218
  """The configuration for a managed ingestion pipeline. These settings cannot be used with the
2221
- 'libraries', 'target' or 'catalog' settings."""
2219
+ 'libraries', 'schema', 'target', or 'catalog' settings."""
2222
2220
 
2223
2221
  libraries: Optional[List[PipelineLibrary]] = None
2224
2222
  """Libraries or code needed by this deployment."""
@@ -2236,8 +2234,7 @@ class PipelineSpec:
2236
2234
  """Restart window of this pipeline."""
2237
2235
 
2238
2236
  schema: Optional[str] = None
2239
- """The default schema (database) where tables are read from or published to. The presence of this
2240
- field implies that the pipeline is in direct publishing mode."""
2237
+ """The default schema (database) where tables are read from or published to."""
2241
2238
 
2242
2239
  serverless: Optional[bool] = None
2243
2240
  """Whether serverless compute is enabled for this pipeline."""
@@ -2246,9 +2243,9 @@ class PipelineSpec:
2246
2243
  """DBFS root directory for storing checkpoints and tables."""
2247
2244
 
2248
2245
  target: Optional[str] = None
2249
- """Target schema (database) to add tables in this pipeline to. If not specified, no data is
2250
- published to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify
2251
- `catalog`."""
2246
+ """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target`
2247
+ must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is
2248
+ deprecated for pipeline creation in favor of the `schema` field."""
2252
2249
 
2253
2250
  trigger: Optional[PipelineTrigger] = None
2254
2251
  """Which pipeline trigger to use. Deprecated: Use `continuous` instead."""
@@ -3458,7 +3455,7 @@ class PipelinesAPI:
3458
3455
  Unique identifier for this pipeline.
3459
3456
  :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
3460
3457
  The configuration for a managed ingestion pipeline. These settings cannot be used with the
3461
- 'libraries', 'target' or 'catalog' settings.
3458
+ 'libraries', 'schema', 'target', or 'catalog' settings.
3462
3459
  :param libraries: List[:class:`PipelineLibrary`] (optional)
3463
3460
  Libraries or code needed by this deployment.
3464
3461
  :param name: str (optional)
@@ -3476,15 +3473,15 @@ class PipelinesAPI:
3476
3473
  Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
3477
3474
  thrown.
3478
3475
  :param schema: str (optional)
3479
- The default schema (database) where tables are read from or published to. The presence of this field
3480
- implies that the pipeline is in direct publishing mode.
3476
+ The default schema (database) where tables are read from or published to.
3481
3477
  :param serverless: bool (optional)
3482
3478
  Whether serverless compute is enabled for this pipeline.
3483
3479
  :param storage: str (optional)
3484
3480
  DBFS root directory for storing checkpoints and tables.
3485
3481
  :param target: str (optional)
3486
- Target schema (database) to add tables in this pipeline to. If not specified, no data is published
3487
- to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
3482
+ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must
3483
+ be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated
3484
+ for pipeline creation in favor of the `schema` field.
3488
3485
  :param trigger: :class:`PipelineTrigger` (optional)
3489
3486
  Which pipeline trigger to use. Deprecated: Use `continuous` instead.
3490
3487
 
@@ -3962,7 +3959,7 @@ class PipelinesAPI:
3962
3959
  Unique identifier for this pipeline.
3963
3960
  :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional)
3964
3961
  The configuration for a managed ingestion pipeline. These settings cannot be used with the
3965
- 'libraries', 'target' or 'catalog' settings.
3962
+ 'libraries', 'schema', 'target', or 'catalog' settings.
3966
3963
  :param libraries: List[:class:`PipelineLibrary`] (optional)
3967
3964
  Libraries or code needed by this deployment.
3968
3965
  :param name: str (optional)
@@ -3980,15 +3977,15 @@ class PipelinesAPI:
3980
3977
  Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is
3981
3978
  thrown.
3982
3979
  :param schema: str (optional)
3983
- The default schema (database) where tables are read from or published to. The presence of this field
3984
- implies that the pipeline is in direct publishing mode.
3980
+ The default schema (database) where tables are read from or published to.
3985
3981
  :param serverless: bool (optional)
3986
3982
  Whether serverless compute is enabled for this pipeline.
3987
3983
  :param storage: str (optional)
3988
3984
  DBFS root directory for storing checkpoints and tables.
3989
3985
  :param target: str (optional)
3990
- Target schema (database) to add tables in this pipeline to. If not specified, no data is published
3991
- to the Hive metastore or Unity Catalog. To publish to Unity Catalog, also specify `catalog`.
3986
+ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must
3987
+ be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated
3988
+ for pipeline creation in favor of the `schema` field.
3992
3989
  :param trigger: :class:`PipelineTrigger` (optional)
3993
3990
  Which pipeline trigger to use. Deprecated: Use `continuous` instead.
3994
3991