databricks-sdk 0.59.0__py3-none-any.whl → 0.61.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2360,6 +2360,24 @@ class DeleteCredentialResponse:
2360
2360
  return cls()
2361
2361
 
2362
2362
 
2363
+ @dataclass
2364
+ class DeleteMonitorResponse:
2365
+ def as_dict(self) -> dict:
2366
+ """Serializes the DeleteMonitorResponse into a dictionary suitable for use as a JSON request body."""
2367
+ body = {}
2368
+ return body
2369
+
2370
+ def as_shallow_dict(self) -> dict:
2371
+ """Serializes the DeleteMonitorResponse into a shallow dictionary of its immediate attributes."""
2372
+ body = {}
2373
+ return body
2374
+
2375
+ @classmethod
2376
+ def from_dict(cls, d: Dict[str, Any]) -> DeleteMonitorResponse:
2377
+ """Deserializes the DeleteMonitorResponse from a dictionary."""
2378
+ return cls()
2379
+
2380
+
2363
2381
  @dataclass
2364
2382
  class DeleteRequestExternalLineage:
2365
2383
  source: ExternalLineageObject
@@ -5833,7 +5851,7 @@ class MonitorCronSchedule:
5833
5851
  [examples]: https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html"""
5834
5852
 
5835
5853
  timezone_id: str
5836
- """The timezone id (e.g., ``"PST"``) in which to evaluate the quartz expression."""
5854
+ """The timezone id (e.g., ``PST``) in which to evaluate the quartz expression."""
5837
5855
 
5838
5856
  pause_status: Optional[MonitorCronSchedulePauseStatus] = None
5839
5857
  """Read only field that indicates whether a schedule is paused or not."""
@@ -5871,16 +5889,21 @@ class MonitorCronSchedule:
5871
5889
 
5872
5890
 
5873
5891
  class MonitorCronSchedulePauseStatus(Enum):
5874
- """Read only field that indicates whether a schedule is paused or not."""
5892
+ """Source link:
5893
+ https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto
5894
+ Monitoring workflow schedule pause status."""
5875
5895
 
5876
5896
  PAUSED = "PAUSED"
5877
5897
  UNPAUSED = "UNPAUSED"
5898
+ UNSPECIFIED = "UNSPECIFIED"
5878
5899
 
5879
5900
 
5880
5901
  @dataclass
5881
5902
  class MonitorDataClassificationConfig:
5903
+ """Data classification related configuration."""
5904
+
5882
5905
  enabled: Optional[bool] = None
5883
- """Whether data classification is enabled."""
5906
+ """Whether to enable data classification."""
5884
5907
 
5885
5908
  def as_dict(self) -> dict:
5886
5909
  """Serializes the MonitorDataClassificationConfig into a dictionary suitable for use as a JSON request body."""
@@ -5930,36 +5953,26 @@ class MonitorDestination:
5930
5953
 
5931
5954
  @dataclass
5932
5955
  class MonitorInferenceLog:
5956
+ problem_type: MonitorInferenceLogProblemType
5957
+ """Problem type the model aims to solve."""
5958
+
5933
5959
  timestamp_col: str
5934
- """Column that contains the timestamps of requests. The column must be one of the following: - A
5935
- ``TimestampType`` column - A column whose values can be converted to timestamps through the
5936
- pyspark ``to_timestamp`` [function].
5937
-
5938
- [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html"""
5960
+ """Column for the timestamp."""
5939
5961
 
5940
5962
  granularities: List[str]
5941
- """Granularities for aggregating data into time windows based on their timestamp. Currently the
5942
- following static granularities are supported: {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``,
5943
- ``"1 day"``, ``"<n> week(s)"``, ``"1 month"``, ``"1 year"``}."""
5944
-
5945
- model_id_col: str
5946
- """Column that contains the id of the model generating the predictions. Metrics will be computed
5947
- per model id by default, and also across all model ids."""
5948
-
5949
- problem_type: MonitorInferenceLogProblemType
5950
- """Problem type the model aims to solve. Determines the type of model-quality metrics that will be
5951
- computed."""
5963
+ """List of granularities to use when aggregating data into time windows based on their timestamp."""
5952
5964
 
5953
5965
  prediction_col: str
5954
- """Column that contains the output/prediction from the model."""
5966
+ """Column for the prediction."""
5967
+
5968
+ model_id_col: str
5969
+ """Column for the model identifier."""
5955
5970
 
5956
5971
  label_col: Optional[str] = None
5957
- """Optional column that contains the ground truth for the prediction."""
5972
+ """Column for the label."""
5958
5973
 
5959
5974
  prediction_proba_col: Optional[str] = None
5960
- """Optional column that contains the prediction probabilities for each class in a classification
5961
- problem type. The values in this column should be a map, mapping each class label to the
5962
- prediction probability for a given sample. The map should be of PySpark MapType()."""
5975
+ """Column for prediction probabilities"""
5963
5976
 
5964
5977
  def as_dict(self) -> dict:
5965
5978
  """Serializes the MonitorInferenceLog into a dictionary suitable for use as a JSON request body."""
@@ -6014,8 +6027,6 @@ class MonitorInferenceLog:
6014
6027
 
6015
6028
 
6016
6029
  class MonitorInferenceLogProblemType(Enum):
6017
- """Problem type the model aims to solve. Determines the type of model-quality metrics that will be
6018
- computed."""
6019
6030
 
6020
6031
  PROBLEM_TYPE_CLASSIFICATION = "PROBLEM_TYPE_CLASSIFICATION"
6021
6032
  PROBLEM_TYPE_REGRESSION = "PROBLEM_TYPE_REGRESSION"
@@ -6023,60 +6034,66 @@ class MonitorInferenceLogProblemType(Enum):
6023
6034
 
6024
6035
  @dataclass
6025
6036
  class MonitorInfo:
6037
+ output_schema_name: str
6038
+ """[Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format
6039
+ {catalog}.{schema}"""
6040
+
6026
6041
  table_name: str
6027
- """The full name of the table to monitor. Format: __catalog_name__.__schema_name__.__table_name__."""
6042
+ """[Create:ERR Update:IGN] UC table to monitor. Format: `catalog.schema.table_name`"""
6028
6043
 
6029
6044
  status: MonitorInfoStatus
6030
-
6031
- monitor_version: str
6032
- """The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted."""
6045
+ """[Create:ERR Update:IGN] The monitor status."""
6033
6046
 
6034
6047
  profile_metrics_table_name: str
6035
- """The full name of the profile metrics table. Format:
6036
- __catalog_name__.__schema_name__.__table_name__."""
6048
+ """[Create:ERR Update:IGN] Table that stores profile metrics data. Format:
6049
+ `catalog.schema.table_name`."""
6037
6050
 
6038
6051
  drift_metrics_table_name: str
6039
- """The full name of the drift metrics table. Format:
6040
- __catalog_name__.__schema_name__.__table_name__."""
6052
+ """[Create:ERR Update:IGN] Table that stores drift metrics data. Format:
6053
+ `catalog.schema.table_name`."""
6054
+
6055
+ monitor_version: int
6056
+ """[Create:ERR Update:IGN] Represents the current monitor configuration version in use. The version
6057
+ will be represented in a numeric fashion (1,2,3...). The field has flexibility to take on
6058
+ negative values, which can indicate corrupted monitor_version numbers."""
6041
6059
 
6042
6060
  assets_dir: Optional[str] = None
6043
- """The directory to store monitoring assets (e.g. dashboard, metric tables)."""
6061
+ """[Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store
6062
+ data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs."""
6044
6063
 
6045
6064
  baseline_table_name: Optional[str] = None
6046
- """Name of the baseline table from which drift metrics are computed from. Columns in the monitored
6047
- table should also be present in the baseline table."""
6065
+ """[Create:OPT Update:OPT] Baseline table name. Baseline data is used to compute drift from the
6066
+ data in the monitored `table_name`. The baseline table and the monitored table shall have the
6067
+ same schema."""
6048
6068
 
6049
6069
  custom_metrics: Optional[List[MonitorMetric]] = None
6050
- """Custom metrics to compute on the monitored table. These can be aggregate metrics, derived
6051
- metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across
6052
- time windows)."""
6070
+ """[Create:OPT Update:OPT] Custom metrics."""
6053
6071
 
6054
6072
  dashboard_id: Optional[str] = None
6055
- """Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in
6056
- PENDING state."""
6073
+ """[Create:ERR Update:OPT] Id of dashboard that visualizes the computed metrics. This can be empty
6074
+ if the monitor is in PENDING state."""
6057
6075
 
6058
6076
  data_classification_config: Optional[MonitorDataClassificationConfig] = None
6059
- """The data classification config for the monitor."""
6077
+ """[Create:OPT Update:OPT] Data classification related config."""
6060
6078
 
6061
6079
  inference_log: Optional[MonitorInferenceLog] = None
6062
- """Configuration for monitoring inference logs."""
6063
6080
 
6064
6081
  latest_monitor_failure_msg: Optional[str] = None
6065
- """The latest failure message of the monitor (if any)."""
6082
+ """[Create:ERR Update:IGN] The latest error message for a monitor failure."""
6066
6083
 
6067
6084
  notifications: Optional[MonitorNotifications] = None
6068
- """The notification settings for the monitor."""
6069
-
6070
- output_schema_name: Optional[str] = None
6071
- """Schema where output metric tables are created."""
6085
+ """[Create:OPT Update:OPT] Field for specifying notification settings."""
6072
6086
 
6073
6087
  schedule: Optional[MonitorCronSchedule] = None
6074
- """The schedule for automatically updating and refreshing metric tables."""
6088
+ """[Create:OPT Update:OPT] The monitor schedule."""
6075
6089
 
6076
6090
  slicing_exprs: Optional[List[str]] = None
6077
- """List of column expressions to slice data with for targeted analysis. The data is grouped by each
6078
- expression independently, resulting in a separate slice for each predicate and its complements.
6079
- For high-cardinality columns, only the top 100 unique values by frequency will generate slices."""
6091
+ """[Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The
6092
+ data is grouped by each expression independently, resulting in a separate slice for each
6093
+ predicate and its complements. For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will
6094
+ generate the following slices: two slices for `col_2 > 10` (True and False), and one slice per
6095
+ unique value in `col1`. For high-cardinality columns, only the top 100 unique values by
6096
+ frequency will generate slices."""
6080
6097
 
6081
6098
  snapshot: Optional[MonitorSnapshot] = None
6082
6099
  """Configuration for monitoring snapshot tables."""
@@ -6192,7 +6209,6 @@ class MonitorInfo:
6192
6209
 
6193
6210
 
6194
6211
  class MonitorInfoStatus(Enum):
6195
- """The status of the monitor."""
6196
6212
 
6197
6213
  MONITOR_STATUS_ACTIVE = "MONITOR_STATUS_ACTIVE"
6198
6214
  MONITOR_STATUS_DELETE_PENDING = "MONITOR_STATUS_DELETE_PENDING"
@@ -6203,6 +6219,8 @@ class MonitorInfoStatus(Enum):
6203
6219
 
6204
6220
  @dataclass
6205
6221
  class MonitorMetric:
6222
+ """Custom metric definition."""
6223
+
6206
6224
  name: str
6207
6225
  """Name of the metric in the output tables."""
6208
6226
 
@@ -6271,10 +6289,10 @@ class MonitorMetric:
6271
6289
 
6272
6290
 
6273
6291
  class MonitorMetricType(Enum):
6274
- """Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or
6275
- ``"CUSTOM_METRIC_TYPE_DRIFT"``. The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and
6276
- ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics are computed on a single table, whereas the
6277
- ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across baseline and input table, or across the
6292
+ """Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or
6293
+ ``\"CUSTOM_METRIC_TYPE_DRIFT\"``. The ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and
6294
+ ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics are computed on a single table, whereas the
6295
+ ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across baseline and input table, or across the
6278
6296
  two consecutive time windows. - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing
6279
6297
  columns in your table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate
6280
6298
  metrics - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics"""
@@ -6287,10 +6305,10 @@ class MonitorMetricType(Enum):
6287
6305
  @dataclass
6288
6306
  class MonitorNotifications:
6289
6307
  on_failure: Optional[MonitorDestination] = None
6290
- """Who to send notifications to on monitor failure."""
6308
+ """Destinations to send notifications on failure/timeout."""
6291
6309
 
6292
6310
  on_new_classification_tag_detected: Optional[MonitorDestination] = None
6293
- """Who to send notifications to when new data classification tags are detected."""
6311
+ """Destinations to send notifications on new classification tag detected."""
6294
6312
 
6295
6313
  def as_dict(self) -> dict:
6296
6314
  """Serializes the MonitorNotifications into a dictionary suitable for use as a JSON request body."""
@@ -6394,13 +6412,14 @@ class MonitorRefreshInfoState(Enum):
6394
6412
  PENDING = "PENDING"
6395
6413
  RUNNING = "RUNNING"
6396
6414
  SUCCESS = "SUCCESS"
6415
+ UNKNOWN = "UNKNOWN"
6397
6416
 
6398
6417
 
6399
6418
  class MonitorRefreshInfoTrigger(Enum):
6400
- """The method by which the refresh was triggered."""
6401
6419
 
6402
6420
  MANUAL = "MANUAL"
6403
6421
  SCHEDULE = "SCHEDULE"
6422
+ UNKNOWN_TRIGGER = "UNKNOWN_TRIGGER"
6404
6423
 
6405
6424
 
6406
6425
  @dataclass
@@ -6430,6 +6449,8 @@ class MonitorRefreshListResponse:
6430
6449
 
6431
6450
  @dataclass
6432
6451
  class MonitorSnapshot:
6452
+ """Snapshot analysis configuration"""
6453
+
6433
6454
  def as_dict(self) -> dict:
6434
6455
  """Serializes the MonitorSnapshot into a dictionary suitable for use as a JSON request body."""
6435
6456
  body = {}
@@ -6448,17 +6469,15 @@ class MonitorSnapshot:
6448
6469
 
6449
6470
  @dataclass
6450
6471
  class MonitorTimeSeries:
6472
+ """Time series analysis configuration."""
6473
+
6451
6474
  timestamp_col: str
6452
- """Column that contains the timestamps of requests. The column must be one of the following: - A
6453
- ``TimestampType`` column - A column whose values can be converted to timestamps through the
6454
- pyspark ``to_timestamp`` [function].
6455
-
6456
- [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html"""
6475
+ """Column for the timestamp."""
6457
6476
 
6458
6477
  granularities: List[str]
6459
6478
  """Granularities for aggregating data into time windows based on their timestamp. Currently the
6460
- following static granularities are supported: {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``,
6461
- ``"1 day"``, ``"<n> week(s)"``, ``"1 month"``, ``"1 year"``}."""
6479
+ following static granularities are supported: {``\"5 minutes\"``, ``\"30 minutes\"``, ``\"1
6480
+ hour\"``, ``\"1 day\"``, ``\"\u003cn\u003e week(s)\"``, ``\"1 month\"``, ``\"1 year\"``}."""
6462
6481
 
6463
6482
  def as_dict(self) -> dict:
6464
6483
  """Serializes the MonitorTimeSeries into a dictionary suitable for use as a JSON request body."""
@@ -7360,10 +7379,9 @@ class R2Credentials:
7360
7379
  @dataclass
7361
7380
  class RegenerateDashboardResponse:
7362
7381
  dashboard_id: Optional[str] = None
7363
- """Id of the regenerated monitoring dashboard."""
7364
7382
 
7365
7383
  parent_folder: Optional[str] = None
7366
- """The directory where the regenerated dashboard is stored."""
7384
+ """Parent folder is equivalent to {assets_dir}/{tableName}"""
7367
7385
 
7368
7386
  def as_dict(self) -> dict:
7369
7387
  """Serializes the RegenerateDashboardResponse into a dictionary suitable for use as a JSON request body."""
@@ -12159,34 +12177,28 @@ class OnlineTablesAPI:
12159
12177
 
12160
12178
  class QualityMonitorsAPI:
12161
12179
  """A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics
12162
- tables and a dashboard that you can use to monitor table health and set alerts.
12163
-
12164
- Most write operations require the user to be the owner of the table (or its parent schema or parent
12165
- catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires the user to have
12166
- **SELECT** privileges on the table (along with **USE_SCHEMA** and **USE_CATALOG**)."""
12180
+ tables and a dashboard that you can use to monitor table health and set alerts. Most write operations
12181
+ require the user to be the owner of the table (or its parent schema or parent catalog). Viewing the
12182
+ dashboard, computed metrics, or monitor configuration only requires the user to have **SELECT** privileges
12183
+ on the table (along with **USE_SCHEMA** and **USE_CATALOG**)."""
12167
12184
 
12168
12185
  def __init__(self, api_client):
12169
12186
  self._api = api_client
12170
12187
 
12171
- def cancel_refresh(self, table_name: str, refresh_id: str):
12172
- """Cancel an active monitor refresh for the given refresh ID.
12173
-
12174
- The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
12175
- table's parent catalog and be an owner of the table's parent schema 3. have the following permissions:
12176
- - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an
12177
- owner of the table
12178
-
12179
- Additionally, the call must be made from the workspace where the monitor was created.
12188
+ def cancel_refresh(self, table_name: str, refresh_id: int):
12189
+ """Cancels an already-initiated refresh job.
12180
12190
 
12181
12191
  :param table_name: str
12182
- Full name of the table.
12183
- :param refresh_id: str
12184
- ID of the refresh.
12192
+ UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are
12193
+ disallowed.
12194
+ :param refresh_id: int
12185
12195
 
12186
12196
 
12187
12197
  """
12188
12198
 
12189
- headers = {}
12199
+ headers = {
12200
+ "Accept": "application/json",
12201
+ }
12190
12202
 
12191
12203
  self._api.do(
12192
12204
  "POST", f"/api/2.1/unity-catalog/tables/{table_name}/monitor/refreshes/{refresh_id}/cancel", headers=headers
@@ -12195,13 +12207,14 @@ class QualityMonitorsAPI:
12195
12207
  def create(
12196
12208
  self,
12197
12209
  table_name: str,
12198
- assets_dir: str,
12199
12210
  output_schema_name: str,
12211
+ assets_dir: str,
12200
12212
  *,
12201
12213
  baseline_table_name: Optional[str] = None,
12202
12214
  custom_metrics: Optional[List[MonitorMetric]] = None,
12203
12215
  data_classification_config: Optional[MonitorDataClassificationConfig] = None,
12204
12216
  inference_log: Optional[MonitorInferenceLog] = None,
12217
+ latest_monitor_failure_msg: Optional[str] = None,
12205
12218
  notifications: Optional[MonitorNotifications] = None,
12206
12219
  schedule: Optional[MonitorCronSchedule] = None,
12207
12220
  skip_builtin_dashboard: Optional[bool] = None,
@@ -12221,31 +12234,37 @@ class QualityMonitorsAPI:
12221
12234
  Workspace assets, such as the dashboard, will be created in the workspace where this call was made.
12222
12235
 
12223
12236
  :param table_name: str
12224
- Full name of the table.
12225
- :param assets_dir: str
12226
- The directory to store monitoring assets (e.g. dashboard, metric tables).
12237
+ UC table name in format `catalog.schema.table_name`. This field corresponds to the
12238
+ {full_table_name_arg} arg in the endpoint path.
12227
12239
  :param output_schema_name: str
12228
- Schema where output metric tables are created.
12240
+ [Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format
12241
+ {catalog}.{schema}
12242
+ :param assets_dir: str
12243
+ [Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store
12244
+ data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs.
12229
12245
  :param baseline_table_name: str (optional)
12230
- Name of the baseline table from which drift metrics are computed from. Columns in the monitored
12231
- table should also be present in the baseline table.
12246
+ [Create:OPT Update:OPT] Baseline table name. Baseline data is used to compute drift from the data in
12247
+ the monitored `table_name`. The baseline table and the monitored table shall have the same schema.
12232
12248
  :param custom_metrics: List[:class:`MonitorMetric`] (optional)
12233
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
12234
- (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
12249
+ [Create:OPT Update:OPT] Custom metrics.
12235
12250
  :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
12236
- The data classification config for the monitor.
12251
+ [Create:OPT Update:OPT] Data classification related config.
12237
12252
  :param inference_log: :class:`MonitorInferenceLog` (optional)
12238
- Configuration for monitoring inference logs.
12253
+ :param latest_monitor_failure_msg: str (optional)
12254
+ [Create:ERR Update:IGN] The latest error message for a monitor failure.
12239
12255
  :param notifications: :class:`MonitorNotifications` (optional)
12240
- The notification settings for the monitor.
12256
+ [Create:OPT Update:OPT] Field for specifying notification settings.
12241
12257
  :param schedule: :class:`MonitorCronSchedule` (optional)
12242
- The schedule for automatically updating and refreshing metric tables.
12258
+ [Create:OPT Update:OPT] The monitor schedule.
12243
12259
  :param skip_builtin_dashboard: bool (optional)
12244
12260
  Whether to skip creating a default dashboard summarizing data quality metrics.
12245
12261
  :param slicing_exprs: List[str] (optional)
12246
- List of column expressions to slice data with for targeted analysis. The data is grouped by each
12247
- expression independently, resulting in a separate slice for each predicate and its complements. For
12248
- high-cardinality columns, only the top 100 unique values by frequency will generate slices.
12262
+ [Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The
12263
+ data is grouped by each expression independently, resulting in a separate slice for each predicate
12264
+ and its complements. For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will generate the
12265
+ following slices: two slices for `col_2 > 10` (True and False), and one slice per unique value in
12266
+ `col1`. For high-cardinality columns, only the top 100 unique values by frequency will generate
12267
+ slices.
12249
12268
  :param snapshot: :class:`MonitorSnapshot` (optional)
12250
12269
  Configuration for monitoring snapshot tables.
12251
12270
  :param time_series: :class:`MonitorTimeSeries` (optional)
@@ -12267,6 +12286,8 @@ class QualityMonitorsAPI:
12267
12286
  body["data_classification_config"] = data_classification_config.as_dict()
12268
12287
  if inference_log is not None:
12269
12288
  body["inference_log"] = inference_log.as_dict()
12289
+ if latest_monitor_failure_msg is not None:
12290
+ body["latest_monitor_failure_msg"] = latest_monitor_failure_msg
12270
12291
  if notifications is not None:
12271
12292
  body["notifications"] = notifications.as_dict()
12272
12293
  if output_schema_name is not None:
@@ -12291,7 +12312,7 @@ class QualityMonitorsAPI:
12291
12312
  res = self._api.do("POST", f"/api/2.1/unity-catalog/tables/{table_name}/monitor", body=body, headers=headers)
12292
12313
  return MonitorInfo.from_dict(res)
12293
12314
 
12294
- def delete(self, table_name: str):
12315
+ def delete(self, table_name: str) -> DeleteMonitorResponse:
12295
12316
  """Deletes a monitor for the specified table.
12296
12317
 
12297
12318
  The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
@@ -12305,14 +12326,18 @@ class QualityMonitorsAPI:
12305
12326
  be manually cleaned up (if desired).
12306
12327
 
12307
12328
  :param table_name: str
12308
- Full name of the table.
12309
-
12329
+ UC table name in format `catalog.schema.table_name`. This field corresponds to the
12330
+ {full_table_name_arg} arg in the endpoint path.
12310
12331
 
12332
+ :returns: :class:`DeleteMonitorResponse`
12311
12333
  """
12312
12334
 
12313
- headers = {}
12335
+ headers = {
12336
+ "Accept": "application/json",
12337
+ }
12314
12338
 
12315
- self._api.do("DELETE", f"/api/2.1/unity-catalog/tables/{table_name}/monitor", headers=headers)
12339
+ res = self._api.do("DELETE", f"/api/2.1/unity-catalog/tables/{table_name}/monitor", headers=headers)
12340
+ return DeleteMonitorResponse.from_dict(res)
12316
12341
 
12317
12342
  def get(self, table_name: str) -> MonitorInfo:
12318
12343
  """Gets a monitor for the specified table.
@@ -12327,7 +12352,8 @@ class QualityMonitorsAPI:
12327
12352
  workspace than where the monitor was created.
12328
12353
 
12329
12354
  :param table_name: str
12330
- Full name of the table.
12355
+ UC table name in format `catalog.schema.table_name`. This field corresponds to the
12356
+ {full_table_name_arg} arg in the endpoint path.
12331
12357
 
12332
12358
  :returns: :class:`MonitorInfo`
12333
12359
  """
@@ -12339,7 +12365,7 @@ class QualityMonitorsAPI:
12339
12365
  res = self._api.do("GET", f"/api/2.1/unity-catalog/tables/{table_name}/monitor", headers=headers)
12340
12366
  return MonitorInfo.from_dict(res)
12341
12367
 
12342
- def get_refresh(self, table_name: str, refresh_id: str) -> MonitorRefreshInfo:
12368
+ def get_refresh(self, table_name: str, refresh_id: int) -> MonitorRefreshInfo:
12343
12369
  """Gets info about a specific monitor refresh using the given refresh ID.
12344
12370
 
12345
12371
  The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the
@@ -12351,7 +12377,7 @@ class QualityMonitorsAPI:
12351
12377
 
12352
12378
  :param table_name: str
12353
12379
  Full name of the table.
12354
- :param refresh_id: str
12380
+ :param refresh_id: int
12355
12381
  ID of the refresh.
12356
12382
 
12357
12383
  :returns: :class:`MonitorRefreshInfo`
@@ -12377,7 +12403,8 @@ class QualityMonitorsAPI:
12377
12403
  Additionally, the call must be made from the workspace where the monitor was created.
12378
12404
 
12379
12405
  :param table_name: str
12380
- Full name of the table.
12406
+ UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are
12407
+ disallowed.
12381
12408
 
12382
12409
  :returns: :class:`MonitorRefreshListResponse`
12383
12410
  """
@@ -12403,7 +12430,8 @@ class QualityMonitorsAPI:
12403
12430
  regenerated in the assets directory that was specified when the monitor was created.
12404
12431
 
12405
12432
  :param table_name: str
12406
- Full name of the table.
12433
+ UC table name in format `catalog.schema.table_name`. This field corresponds to the
12434
+ {full_table_name_arg} arg in the endpoint path.
12407
12435
  :param warehouse_id: str (optional)
12408
12436
  Optional argument to specify the warehouse for dashboard regeneration. If not specified, the first
12409
12437
  running warehouse will be used.
@@ -12435,7 +12463,8 @@ class QualityMonitorsAPI:
12435
12463
  Additionally, the call must be made from the workspace where the monitor was created.
12436
12464
 
12437
12465
  :param table_name: str
12438
- Full name of the table.
12466
+ UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are
12467
+ disallowed.
12439
12468
 
12440
12469
  :returns: :class:`MonitorRefreshInfo`
12441
12470
  """
@@ -12457,6 +12486,7 @@ class QualityMonitorsAPI:
12457
12486
  dashboard_id: Optional[str] = None,
12458
12487
  data_classification_config: Optional[MonitorDataClassificationConfig] = None,
12459
12488
  inference_log: Optional[MonitorInferenceLog] = None,
12489
+ latest_monitor_failure_msg: Optional[str] = None,
12460
12490
  notifications: Optional[MonitorNotifications] = None,
12461
12491
  schedule: Optional[MonitorCronSchedule] = None,
12462
12492
  slicing_exprs: Optional[List[str]] = None,
@@ -12476,30 +12506,35 @@ class QualityMonitorsAPI:
12476
12506
  Certain configuration fields, such as output asset identifiers, cannot be updated.
12477
12507
 
12478
12508
  :param table_name: str
12479
- Full name of the table.
12509
+ UC table name in format `catalog.schema.table_name`. This field corresponds to the
12510
+ {full_table_name_arg} arg in the endpoint path.
12480
12511
  :param output_schema_name: str
12481
- Schema where output metric tables are created.
12512
+ [Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format
12513
+ {catalog}.{schema}
12482
12514
  :param baseline_table_name: str (optional)
12483
- Name of the baseline table from which drift metrics are computed from. Columns in the monitored
12484
- table should also be present in the baseline table.
12515
+ [Create:OPT Update:OPT] Baseline table name. Baseline data is used to compute drift from the data in
12516
+ the monitored `table_name`. The baseline table and the monitored table shall have the same schema.
12485
12517
  :param custom_metrics: List[:class:`MonitorMetric`] (optional)
12486
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics
12487
- (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
12518
+ [Create:OPT Update:OPT] Custom metrics.
12488
12519
  :param dashboard_id: str (optional)
12489
- Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in PENDING
12490
- state.
12520
+ [Create:ERR Update:OPT] Id of dashboard that visualizes the computed metrics. This can be empty if
12521
+ the monitor is in PENDING state.
12491
12522
  :param data_classification_config: :class:`MonitorDataClassificationConfig` (optional)
12492
- The data classification config for the monitor.
12523
+ [Create:OPT Update:OPT] Data classification related config.
12493
12524
  :param inference_log: :class:`MonitorInferenceLog` (optional)
12494
- Configuration for monitoring inference logs.
12525
+ :param latest_monitor_failure_msg: str (optional)
12526
+ [Create:ERR Update:IGN] The latest error message for a monitor failure.
12495
12527
  :param notifications: :class:`MonitorNotifications` (optional)
12496
- The notification settings for the monitor.
12528
+ [Create:OPT Update:OPT] Field for specifying notification settings.
12497
12529
  :param schedule: :class:`MonitorCronSchedule` (optional)
12498
- The schedule for automatically updating and refreshing metric tables.
12530
+ [Create:OPT Update:OPT] The monitor schedule.
12499
12531
  :param slicing_exprs: List[str] (optional)
12500
- List of column expressions to slice data with for targeted analysis. The data is grouped by each
12501
- expression independently, resulting in a separate slice for each predicate and its complements. For
12502
- high-cardinality columns, only the top 100 unique values by frequency will generate slices.
12532
+ [Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The
12533
+ data is grouped by each expression independently, resulting in a separate slice for each predicate
12534
+ and its complements. For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will generate the
12535
+ following slices: two slices for `col_2 > 10` (True and False), and one slice per unique value in
12536
+ `col1`. For high-cardinality columns, only the top 100 unique values by frequency will generate
12537
+ slices.
12503
12538
  :param snapshot: :class:`MonitorSnapshot` (optional)
12504
12539
  Configuration for monitoring snapshot tables.
12505
12540
  :param time_series: :class:`MonitorTimeSeries` (optional)
@@ -12518,6 +12553,8 @@ class QualityMonitorsAPI:
12518
12553
  body["data_classification_config"] = data_classification_config.as_dict()
12519
12554
  if inference_log is not None:
12520
12555
  body["inference_log"] = inference_log.as_dict()
12556
+ if latest_monitor_failure_msg is not None:
12557
+ body["latest_monitor_failure_msg"] = latest_monitor_failure_msg
12521
12558
  if notifications is not None:
12522
12559
  body["notifications"] = notifications.as_dict()
12523
12560
  if output_schema_name is not None: