databricks-sdk 0.48.0__py3-none-any.whl → 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -271,6 +271,109 @@ class ApproveTransitionRequestResponse:
271
271
  return cls(activity=_from_dict(d, "activity", Activity))
272
272
 
273
273
 
274
+ @dataclass
275
+ class ArtifactCredentialInfo:
276
+ headers: Optional[List[ArtifactCredentialInfoHttpHeader]] = None
277
+ """A collection of HTTP headers that should be specified when uploading to or downloading from the
278
+ specified `signed_uri`."""
279
+
280
+ path: Optional[str] = None
281
+ """The path, relative to the Run's artifact root location, of the artifact that can be accessed
282
+ with the credential."""
283
+
284
+ run_id: Optional[str] = None
285
+ """The ID of the MLflow Run containing the artifact that can be accessed with the credential."""
286
+
287
+ signed_uri: Optional[str] = None
288
+ """The signed URI credential that provides access to the artifact."""
289
+
290
+ type: Optional[ArtifactCredentialType] = None
291
+ """The type of the signed credential URI (e.g., an AWS presigned URL or an Azure Shared Access
292
+ Signature URI)."""
293
+
294
+ def as_dict(self) -> dict:
295
+ """Serializes the ArtifactCredentialInfo into a dictionary suitable for use as a JSON request body."""
296
+ body = {}
297
+ if self.headers:
298
+ body["headers"] = [v.as_dict() for v in self.headers]
299
+ if self.path is not None:
300
+ body["path"] = self.path
301
+ if self.run_id is not None:
302
+ body["run_id"] = self.run_id
303
+ if self.signed_uri is not None:
304
+ body["signed_uri"] = self.signed_uri
305
+ if self.type is not None:
306
+ body["type"] = self.type.value
307
+ return body
308
+
309
+ def as_shallow_dict(self) -> dict:
310
+ """Serializes the ArtifactCredentialInfo into a shallow dictionary of its immediate attributes."""
311
+ body = {}
312
+ if self.headers:
313
+ body["headers"] = self.headers
314
+ if self.path is not None:
315
+ body["path"] = self.path
316
+ if self.run_id is not None:
317
+ body["run_id"] = self.run_id
318
+ if self.signed_uri is not None:
319
+ body["signed_uri"] = self.signed_uri
320
+ if self.type is not None:
321
+ body["type"] = self.type
322
+ return body
323
+
324
+ @classmethod
325
+ def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfo:
326
+ """Deserializes the ArtifactCredentialInfo from a dictionary."""
327
+ return cls(
328
+ headers=_repeated_dict(d, "headers", ArtifactCredentialInfoHttpHeader),
329
+ path=d.get("path", None),
330
+ run_id=d.get("run_id", None),
331
+ signed_uri=d.get("signed_uri", None),
332
+ type=_enum(d, "type", ArtifactCredentialType),
333
+ )
334
+
335
+
336
+ @dataclass
337
+ class ArtifactCredentialInfoHttpHeader:
338
+ name: Optional[str] = None
339
+ """The HTTP header name."""
340
+
341
+ value: Optional[str] = None
342
+ """The HTTP header value."""
343
+
344
+ def as_dict(self) -> dict:
345
+ """Serializes the ArtifactCredentialInfoHttpHeader into a dictionary suitable for use as a JSON request body."""
346
+ body = {}
347
+ if self.name is not None:
348
+ body["name"] = self.name
349
+ if self.value is not None:
350
+ body["value"] = self.value
351
+ return body
352
+
353
+ def as_shallow_dict(self) -> dict:
354
+ """Serializes the ArtifactCredentialInfoHttpHeader into a shallow dictionary of its immediate attributes."""
355
+ body = {}
356
+ if self.name is not None:
357
+ body["name"] = self.name
358
+ if self.value is not None:
359
+ body["value"] = self.value
360
+ return body
361
+
362
+ @classmethod
363
+ def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfoHttpHeader:
364
+ """Deserializes the ArtifactCredentialInfoHttpHeader from a dictionary."""
365
+ return cls(name=d.get("name", None), value=d.get("value", None))
366
+
367
+
368
+ class ArtifactCredentialType(Enum):
369
+ """The type of a given artifact access credential"""
370
+
371
+ AWS_PRESIGNED_URL = "AWS_PRESIGNED_URL"
372
+ AZURE_ADLS_GEN2_SAS_URI = "AZURE_ADLS_GEN2_SAS_URI"
373
+ AZURE_SAS_URI = "AZURE_SAS_URI"
374
+ GCP_SIGNED_URL = "GCP_SIGNED_URL"
375
+
376
+
274
377
  class CommentActivityAction(Enum):
275
378
  """An action that a user (with sufficient permissions) could take on a comment. Valid values are: *
276
379
  `EDIT_COMMENT`: Edit the comment
@@ -489,87 +592,83 @@ class CreateExperimentResponse:
489
592
  @dataclass
490
593
  class CreateForecastingExperimentRequest:
491
594
  train_data_path: str
492
- """The three-level (fully qualified) name of a unity catalog table. This table serves as the
493
- training data for the forecasting model."""
595
+ """The fully qualified name of a Unity Catalog table, formatted as
596
+ catalog_name.schema_name.table_name, used as training data for the forecasting model."""
494
597
 
495
598
  target_column: str
496
- """Name of the column in the input training table that serves as the prediction target. The values
497
- in this column will be used as the ground truth for model training."""
599
+ """The column in the input training table used as the prediction target for model training. The
600
+ values in this column are used as the ground truth for model training."""
498
601
 
499
602
  time_column: str
500
- """Name of the column in the input training table that represents the timestamp of each row."""
603
+ """The column in the input training table that represents each row's timestamp."""
501
604
 
502
- data_granularity_unit: str
503
- """The time unit of the input data granularity. Together with data_granularity_quantity field, this
504
- defines the time interval between consecutive rows in the time series data. Possible values: *
505
- 'W' (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' /
506
- 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' /
507
- 'quarter' / 'quarters' * 'Y' / 'year' / 'years'"""
605
+ forecast_granularity: str
606
+ """The time interval between consecutive rows in the time series data. Possible values include: '1
607
+ second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily',
608
+ 'Weekly', 'Monthly', 'Quarterly', 'Yearly'."""
508
609
 
509
610
  forecast_horizon: int
510
- """The number of time steps into the future for which predictions should be made. This value
511
- represents a multiple of data_granularity_unit and data_granularity_quantity determining how far
512
- ahead the model will forecast."""
611
+ """The number of time steps into the future to make predictions, calculated as a multiple of
612
+ forecast_granularity. This value represents how far ahead the model should forecast."""
513
613
 
514
614
  custom_weights_column: Optional[str] = None
515
- """Name of the column in the input training table used to customize the weight for each time series
516
- to calculate weighted metrics."""
517
-
518
- data_granularity_quantity: Optional[int] = None
519
- """The quantity of the input data granularity. Together with data_granularity_unit field, this
520
- defines the time interval between consecutive rows in the time series data. For now, only 1
521
- second, 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported."""
615
+ """The column in the training table used to customize weights for each time series."""
522
616
 
523
617
  experiment_path: Optional[str] = None
524
- """The path to the created experiment. This is the path where the experiment will be stored in the
525
- workspace."""
618
+ """The path in the workspace to store the created experiment."""
526
619
 
527
620
  holiday_regions: Optional[List[str]] = None
528
- """Region code(s) to consider when automatically adding holiday features. When empty, no holiday
529
- features are added. Only supports 1 holiday region for now."""
621
+ """The region code(s) to automatically add holiday features. Currently supports only one region."""
622
+
623
+ include_features: Optional[List[str]] = None
624
+ """Specifies the list of feature columns to include in model training. These columns must exist in
625
+ the training data and be of type string, numerical, or boolean. If not specified, no additional
626
+ features will be included. Note: Certain columns are automatically handled: - Automatically
627
+ excluded: split_column, target_column, custom_weights_column. - Automatically included:
628
+ time_column."""
530
629
 
531
630
  max_runtime: Optional[int] = None
532
- """The maximum duration in minutes for which the experiment is allowed to run. If the experiment
533
- exceeds this time limit it will be stopped automatically."""
631
+ """The maximum duration for the experiment in minutes. The experiment stops automatically if it
632
+ exceeds this limit."""
534
633
 
535
634
  prediction_data_path: Optional[str] = None
536
- """The three-level (fully qualified) path to a unity catalog table. This table path serves to store
537
- the predictions."""
635
+ """The fully qualified path of a Unity Catalog table, formatted as
636
+ catalog_name.schema_name.table_name, used to store predictions."""
538
637
 
539
638
  primary_metric: Optional[str] = None
540
639
  """The evaluation metric used to optimize the forecasting model."""
541
640
 
542
641
  register_to: Optional[str] = None
543
- """The three-level (fully qualified) path to a unity catalog model. This model path serves to store
544
- the best model."""
642
+ """The fully qualified path of a Unity Catalog model, formatted as
643
+ catalog_name.schema_name.model_name, used to store the best model."""
545
644
 
546
645
  split_column: Optional[str] = None
547
- """Name of the column in the input training table used for custom data splits. The values in this
548
- column must be "train", "validate", or "test" to indicate which split each row belongs to."""
646
+ """// The column in the training table used for custom data splits. Values must be 'train',
647
+ 'validate', or 'test'."""
549
648
 
550
649
  timeseries_identifier_columns: Optional[List[str]] = None
551
- """Name of the column in the input training table used to group the dataset to predict individual
552
- time series"""
650
+ """The column in the training table used to group the dataset for predicting individual time
651
+ series."""
553
652
 
554
653
  training_frameworks: Optional[List[str]] = None
555
- """The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA',
556
- 'DeepAR'. An empty list will include all supported frameworks."""
654
+ """List of frameworks to include for model tuning. Possible values are 'Prophet', 'ARIMA',
655
+ 'DeepAR'. An empty list includes all supported frameworks."""
557
656
 
558
657
  def as_dict(self) -> dict:
559
658
  """Serializes the CreateForecastingExperimentRequest into a dictionary suitable for use as a JSON request body."""
560
659
  body = {}
561
660
  if self.custom_weights_column is not None:
562
661
  body["custom_weights_column"] = self.custom_weights_column
563
- if self.data_granularity_quantity is not None:
564
- body["data_granularity_quantity"] = self.data_granularity_quantity
565
- if self.data_granularity_unit is not None:
566
- body["data_granularity_unit"] = self.data_granularity_unit
567
662
  if self.experiment_path is not None:
568
663
  body["experiment_path"] = self.experiment_path
664
+ if self.forecast_granularity is not None:
665
+ body["forecast_granularity"] = self.forecast_granularity
569
666
  if self.forecast_horizon is not None:
570
667
  body["forecast_horizon"] = self.forecast_horizon
571
668
  if self.holiday_regions:
572
669
  body["holiday_regions"] = [v for v in self.holiday_regions]
670
+ if self.include_features:
671
+ body["include_features"] = [v for v in self.include_features]
573
672
  if self.max_runtime is not None:
574
673
  body["max_runtime"] = self.max_runtime
575
674
  if self.prediction_data_path is not None:
@@ -597,16 +696,16 @@ class CreateForecastingExperimentRequest:
597
696
  body = {}
598
697
  if self.custom_weights_column is not None:
599
698
  body["custom_weights_column"] = self.custom_weights_column
600
- if self.data_granularity_quantity is not None:
601
- body["data_granularity_quantity"] = self.data_granularity_quantity
602
- if self.data_granularity_unit is not None:
603
- body["data_granularity_unit"] = self.data_granularity_unit
604
699
  if self.experiment_path is not None:
605
700
  body["experiment_path"] = self.experiment_path
701
+ if self.forecast_granularity is not None:
702
+ body["forecast_granularity"] = self.forecast_granularity
606
703
  if self.forecast_horizon is not None:
607
704
  body["forecast_horizon"] = self.forecast_horizon
608
705
  if self.holiday_regions:
609
706
  body["holiday_regions"] = self.holiday_regions
707
+ if self.include_features:
708
+ body["include_features"] = self.include_features
610
709
  if self.max_runtime is not None:
611
710
  body["max_runtime"] = self.max_runtime
612
711
  if self.prediction_data_path is not None:
@@ -634,11 +733,11 @@ class CreateForecastingExperimentRequest:
634
733
  """Deserializes the CreateForecastingExperimentRequest from a dictionary."""
635
734
  return cls(
636
735
  custom_weights_column=d.get("custom_weights_column", None),
637
- data_granularity_quantity=d.get("data_granularity_quantity", None),
638
- data_granularity_unit=d.get("data_granularity_unit", None),
639
736
  experiment_path=d.get("experiment_path", None),
737
+ forecast_granularity=d.get("forecast_granularity", None),
640
738
  forecast_horizon=d.get("forecast_horizon", None),
641
739
  holiday_regions=d.get("holiday_regions", None),
740
+ include_features=d.get("include_features", None),
642
741
  max_runtime=d.get("max_runtime", None),
643
742
  prediction_data_path=d.get("prediction_data_path", None),
644
743
  primary_metric=d.get("primary_metric", None),
@@ -1959,13 +2058,13 @@ class FileInfo:
1959
2058
  """Metadata of a single artifact file or directory."""
1960
2059
 
1961
2060
  file_size: Optional[int] = None
1962
- """Size in bytes. Unset for directories."""
2061
+ """The size in bytes of the file. Unset for directories."""
1963
2062
 
1964
2063
  is_dir: Optional[bool] = None
1965
2064
  """Whether the path is a directory."""
1966
2065
 
1967
2066
  path: Optional[str] = None
1968
- """Path relative to the root artifact directory run."""
2067
+ """The path relative to the root artifact directory run."""
1969
2068
 
1970
2069
  def as_dict(self) -> dict:
1971
2070
  """Serializes the FileInfo into a dictionary suitable for use as a JSON request body."""
@@ -2049,6 +2148,56 @@ class ForecastingExperimentState(Enum):
2049
2148
  SUCCEEDED = "SUCCEEDED"
2050
2149
 
2051
2150
 
2151
+ @dataclass
2152
+ class GetCredentialsForTraceDataDownloadResponse:
2153
+ credential_info: Optional[ArtifactCredentialInfo] = None
2154
+ """The artifact download credentials for the specified trace data."""
2155
+
2156
+ def as_dict(self) -> dict:
2157
+ """Serializes the GetCredentialsForTraceDataDownloadResponse into a dictionary suitable for use as a JSON request body."""
2158
+ body = {}
2159
+ if self.credential_info:
2160
+ body["credential_info"] = self.credential_info.as_dict()
2161
+ return body
2162
+
2163
+ def as_shallow_dict(self) -> dict:
2164
+ """Serializes the GetCredentialsForTraceDataDownloadResponse into a shallow dictionary of its immediate attributes."""
2165
+ body = {}
2166
+ if self.credential_info:
2167
+ body["credential_info"] = self.credential_info
2168
+ return body
2169
+
2170
+ @classmethod
2171
+ def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataDownloadResponse:
2172
+ """Deserializes the GetCredentialsForTraceDataDownloadResponse from a dictionary."""
2173
+ return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo))
2174
+
2175
+
2176
+ @dataclass
2177
+ class GetCredentialsForTraceDataUploadResponse:
2178
+ credential_info: Optional[ArtifactCredentialInfo] = None
2179
+ """The artifact upload credentials for the specified trace data."""
2180
+
2181
+ def as_dict(self) -> dict:
2182
+ """Serializes the GetCredentialsForTraceDataUploadResponse into a dictionary suitable for use as a JSON request body."""
2183
+ body = {}
2184
+ if self.credential_info:
2185
+ body["credential_info"] = self.credential_info.as_dict()
2186
+ return body
2187
+
2188
+ def as_shallow_dict(self) -> dict:
2189
+ """Serializes the GetCredentialsForTraceDataUploadResponse into a shallow dictionary of its immediate attributes."""
2190
+ body = {}
2191
+ if self.credential_info:
2192
+ body["credential_info"] = self.credential_info
2193
+ return body
2194
+
2195
+ @classmethod
2196
+ def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataUploadResponse:
2197
+ """Deserializes the GetCredentialsForTraceDataUploadResponse from a dictionary."""
2198
+ return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo))
2199
+
2200
+
2052
2201
  @dataclass
2053
2202
  class GetExperimentByNameResponse:
2054
2203
  experiment: Optional[Experiment] = None
@@ -2549,13 +2698,13 @@ class JobSpecWithoutSecret:
2549
2698
  @dataclass
2550
2699
  class ListArtifactsResponse:
2551
2700
  files: Optional[List[FileInfo]] = None
2552
- """File location and metadata for artifacts."""
2701
+ """The file location and metadata for artifacts."""
2553
2702
 
2554
2703
  next_page_token: Optional[str] = None
2555
- """Token that can be used to retrieve the next page of artifact results"""
2704
+ """The token that can be used to retrieve the next page of artifact results."""
2556
2705
 
2557
2706
  root_uri: Optional[str] = None
2558
- """Root artifact directory for the run."""
2707
+ """The root artifact directory for the run."""
2559
2708
 
2560
2709
  def as_dict(self) -> dict:
2561
2710
  """Serializes the ListArtifactsResponse into a dictionary suitable for use as a JSON request body."""
@@ -2797,11 +2946,16 @@ class LogInputs:
2797
2946
  datasets: Optional[List[DatasetInput]] = None
2798
2947
  """Dataset inputs"""
2799
2948
 
2949
+ models: Optional[List[ModelInput]] = None
2950
+ """Model inputs"""
2951
+
2800
2952
  def as_dict(self) -> dict:
2801
2953
  """Serializes the LogInputs into a dictionary suitable for use as a JSON request body."""
2802
2954
  body = {}
2803
2955
  if self.datasets:
2804
2956
  body["datasets"] = [v.as_dict() for v in self.datasets]
2957
+ if self.models:
2958
+ body["models"] = [v.as_dict() for v in self.models]
2805
2959
  if self.run_id is not None:
2806
2960
  body["run_id"] = self.run_id
2807
2961
  return body
@@ -2811,6 +2965,8 @@ class LogInputs:
2811
2965
  body = {}
2812
2966
  if self.datasets:
2813
2967
  body["datasets"] = self.datasets
2968
+ if self.models:
2969
+ body["models"] = self.models
2814
2970
  if self.run_id is not None:
2815
2971
  body["run_id"] = self.run_id
2816
2972
  return body
@@ -2818,7 +2974,11 @@ class LogInputs:
2818
2974
  @classmethod
2819
2975
  def from_dict(cls, d: Dict[str, Any]) -> LogInputs:
2820
2976
  """Deserializes the LogInputs from a dictionary."""
2821
- return cls(datasets=_repeated_dict(d, "datasets", DatasetInput), run_id=d.get("run_id", None))
2977
+ return cls(
2978
+ datasets=_repeated_dict(d, "datasets", DatasetInput),
2979
+ models=_repeated_dict(d, "models", ModelInput),
2980
+ run_id=d.get("run_id", None),
2981
+ )
2822
2982
 
2823
2983
 
2824
2984
  @dataclass
@@ -2850,6 +3010,17 @@ class LogMetric:
2850
3010
  timestamp: int
2851
3011
  """Unix timestamp in milliseconds at the time metric was logged."""
2852
3012
 
3013
+ dataset_digest: Optional[str] = None
3014
+ """Dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset that
3015
+ uniquely identifies it within datasets of the same name."""
3016
+
3017
+ dataset_name: Optional[str] = None
3018
+ """The name of the dataset associated with the metric. E.g. “my.uc.table@2”
3019
+ “nyc-taxi-dataset”, “fantastic-elk-3”"""
3020
+
3021
+ model_id: Optional[str] = None
3022
+ """ID of the logged model associated with the metric, if applicable"""
3023
+
2853
3024
  run_id: Optional[str] = None
2854
3025
  """ID of the run under which to log the metric. Must be provided."""
2855
3026
 
@@ -2863,8 +3034,14 @@ class LogMetric:
2863
3034
  def as_dict(self) -> dict:
2864
3035
  """Serializes the LogMetric into a dictionary suitable for use as a JSON request body."""
2865
3036
  body = {}
3037
+ if self.dataset_digest is not None:
3038
+ body["dataset_digest"] = self.dataset_digest
3039
+ if self.dataset_name is not None:
3040
+ body["dataset_name"] = self.dataset_name
2866
3041
  if self.key is not None:
2867
3042
  body["key"] = self.key
3043
+ if self.model_id is not None:
3044
+ body["model_id"] = self.model_id
2868
3045
  if self.run_id is not None:
2869
3046
  body["run_id"] = self.run_id
2870
3047
  if self.run_uuid is not None:
@@ -2880,8 +3057,14 @@ class LogMetric:
2880
3057
  def as_shallow_dict(self) -> dict:
2881
3058
  """Serializes the LogMetric into a shallow dictionary of its immediate attributes."""
2882
3059
  body = {}
3060
+ if self.dataset_digest is not None:
3061
+ body["dataset_digest"] = self.dataset_digest
3062
+ if self.dataset_name is not None:
3063
+ body["dataset_name"] = self.dataset_name
2883
3064
  if self.key is not None:
2884
3065
  body["key"] = self.key
3066
+ if self.model_id is not None:
3067
+ body["model_id"] = self.model_id
2885
3068
  if self.run_id is not None:
2886
3069
  body["run_id"] = self.run_id
2887
3070
  if self.run_uuid is not None:
@@ -2898,7 +3081,10 @@ class LogMetric:
2898
3081
  def from_dict(cls, d: Dict[str, Any]) -> LogMetric:
2899
3082
  """Deserializes the LogMetric from a dictionary."""
2900
3083
  return cls(
3084
+ dataset_digest=d.get("dataset_digest", None),
3085
+ dataset_name=d.get("dataset_name", None),
2901
3086
  key=d.get("key", None),
3087
+ model_id=d.get("model_id", None),
2902
3088
  run_id=d.get("run_id", None),
2903
3089
  run_uuid=d.get("run_uuid", None),
2904
3090
  step=d.get("step", None),
@@ -3049,23 +3235,46 @@ class LogParamResponse:
3049
3235
  class Metric:
3050
3236
  """Metric associated with a run, represented as a key-value pair."""
3051
3237
 
3238
+ dataset_digest: Optional[str] = None
3239
+ """The dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset
3240
+ that uniquely identifies it within datasets of the same name."""
3241
+
3242
+ dataset_name: Optional[str] = None
3243
+ """The name of the dataset associated with the metric. E.g. “my.uc.table@2”
3244
+ “nyc-taxi-dataset”, “fantastic-elk-3”"""
3245
+
3052
3246
  key: Optional[str] = None
3053
- """Key identifying this metric."""
3247
+ """The key identifying the metric."""
3248
+
3249
+ model_id: Optional[str] = None
3250
+ """The ID of the logged model or registered model version associated with the metric, if
3251
+ applicable."""
3252
+
3253
+ run_id: Optional[str] = None
3254
+ """The ID of the run containing the metric."""
3054
3255
 
3055
3256
  step: Optional[int] = None
3056
- """Step at which to log the metric."""
3257
+ """The step at which the metric was logged."""
3057
3258
 
3058
3259
  timestamp: Optional[int] = None
3059
- """The timestamp at which this metric was recorded."""
3260
+ """The timestamp at which the metric was recorded."""
3060
3261
 
3061
3262
  value: Optional[float] = None
3062
- """Value associated with this metric."""
3263
+ """The value of the metric."""
3063
3264
 
3064
3265
  def as_dict(self) -> dict:
3065
3266
  """Serializes the Metric into a dictionary suitable for use as a JSON request body."""
3066
3267
  body = {}
3268
+ if self.dataset_digest is not None:
3269
+ body["dataset_digest"] = self.dataset_digest
3270
+ if self.dataset_name is not None:
3271
+ body["dataset_name"] = self.dataset_name
3067
3272
  if self.key is not None:
3068
3273
  body["key"] = self.key
3274
+ if self.model_id is not None:
3275
+ body["model_id"] = self.model_id
3276
+ if self.run_id is not None:
3277
+ body["run_id"] = self.run_id
3069
3278
  if self.step is not None:
3070
3279
  body["step"] = self.step
3071
3280
  if self.timestamp is not None:
@@ -3077,8 +3286,16 @@ class Metric:
3077
3286
  def as_shallow_dict(self) -> dict:
3078
3287
  """Serializes the Metric into a shallow dictionary of its immediate attributes."""
3079
3288
  body = {}
3289
+ if self.dataset_digest is not None:
3290
+ body["dataset_digest"] = self.dataset_digest
3291
+ if self.dataset_name is not None:
3292
+ body["dataset_name"] = self.dataset_name
3080
3293
  if self.key is not None:
3081
3294
  body["key"] = self.key
3295
+ if self.model_id is not None:
3296
+ body["model_id"] = self.model_id
3297
+ if self.run_id is not None:
3298
+ body["run_id"] = self.run_id
3082
3299
  if self.step is not None:
3083
3300
  body["step"] = self.step
3084
3301
  if self.timestamp is not None:
@@ -3091,7 +3308,11 @@ class Metric:
3091
3308
  def from_dict(cls, d: Dict[str, Any]) -> Metric:
3092
3309
  """Deserializes the Metric from a dictionary."""
3093
3310
  return cls(
3311
+ dataset_digest=d.get("dataset_digest", None),
3312
+ dataset_name=d.get("dataset_name", None),
3094
3313
  key=d.get("key", None),
3314
+ model_id=d.get("model_id", None),
3315
+ run_id=d.get("run_id", None),
3095
3316
  step=d.get("step", None),
3096
3317
  timestamp=d.get("timestamp", None),
3097
3318
  value=d.get("value", None),
@@ -3266,6 +3487,33 @@ class ModelDatabricks:
3266
3487
  )
3267
3488
 
3268
3489
 
3490
+ @dataclass
3491
+ class ModelInput:
3492
+ """Represents a LoggedModel or Registered Model Version input to a Run."""
3493
+
3494
+ model_id: str
3495
+ """The unique identifier of the model."""
3496
+
3497
+ def as_dict(self) -> dict:
3498
+ """Serializes the ModelInput into a dictionary suitable for use as a JSON request body."""
3499
+ body = {}
3500
+ if self.model_id is not None:
3501
+ body["model_id"] = self.model_id
3502
+ return body
3503
+
3504
+ def as_shallow_dict(self) -> dict:
3505
+ """Serializes the ModelInput into a shallow dictionary of its immediate attributes."""
3506
+ body = {}
3507
+ if self.model_id is not None:
3508
+ body["model_id"] = self.model_id
3509
+ return body
3510
+
3511
+ @classmethod
3512
+ def from_dict(cls, d: Dict[str, Any]) -> ModelInput:
3513
+ """Deserializes the ModelInput from a dictionary."""
3514
+ return cls(model_id=d.get("model_id", None))
3515
+
3516
+
3269
3517
  @dataclass
3270
3518
  class ModelTag:
3271
3519
  key: Optional[str] = None
@@ -4584,11 +4832,19 @@ class RunInputs:
4584
4832
  dataset_inputs: Optional[List[DatasetInput]] = None
4585
4833
  """Run metrics."""
4586
4834
 
4835
+ model_inputs: Optional[List[ModelInput]] = None
4836
+ """**NOTE**: Experimental: This API field may change or be removed in a future release without
4837
+ warning.
4838
+
4839
+ Model inputs to the Run."""
4840
+
4587
4841
  def as_dict(self) -> dict:
4588
4842
  """Serializes the RunInputs into a dictionary suitable for use as a JSON request body."""
4589
4843
  body = {}
4590
4844
  if self.dataset_inputs:
4591
4845
  body["dataset_inputs"] = [v.as_dict() for v in self.dataset_inputs]
4846
+ if self.model_inputs:
4847
+ body["model_inputs"] = [v.as_dict() for v in self.model_inputs]
4592
4848
  return body
4593
4849
 
4594
4850
  def as_shallow_dict(self) -> dict:
@@ -4596,12 +4852,17 @@ class RunInputs:
4596
4852
  body = {}
4597
4853
  if self.dataset_inputs:
4598
4854
  body["dataset_inputs"] = self.dataset_inputs
4855
+ if self.model_inputs:
4856
+ body["model_inputs"] = self.model_inputs
4599
4857
  return body
4600
4858
 
4601
4859
  @classmethod
4602
4860
  def from_dict(cls, d: Dict[str, Any]) -> RunInputs:
4603
4861
  """Deserializes the RunInputs from a dictionary."""
4604
- return cls(dataset_inputs=_repeated_dict(d, "dataset_inputs", DatasetInput))
4862
+ return cls(
4863
+ dataset_inputs=_repeated_dict(d, "dataset_inputs", DatasetInput),
4864
+ model_inputs=_repeated_dict(d, "model_inputs", ModelInput),
4865
+ )
4605
4866
 
4606
4867
 
4607
4868
  @dataclass
@@ -6114,6 +6375,38 @@ class ExperimentsAPI:
6114
6375
  res = self._api.do("GET", "/api/2.0/mlflow/experiments/get-by-name", query=query, headers=headers)
6115
6376
  return GetExperimentByNameResponse.from_dict(res)
6116
6377
 
6378
+ def get_credentials_for_trace_data_download(self, request_id: str) -> GetCredentialsForTraceDataDownloadResponse:
6379
+ """Get credentials to download trace data.
6380
+
6381
+ :param request_id: str
6382
+ The ID of the trace to fetch artifact download credentials for.
6383
+
6384
+ :returns: :class:`GetCredentialsForTraceDataDownloadResponse`
6385
+ """
6386
+
6387
+ headers = {
6388
+ "Accept": "application/json",
6389
+ }
6390
+
6391
+ res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-download", headers=headers)
6392
+ return GetCredentialsForTraceDataDownloadResponse.from_dict(res)
6393
+
6394
+ def get_credentials_for_trace_data_upload(self, request_id: str) -> GetCredentialsForTraceDataUploadResponse:
6395
+ """Get credentials to upload trace data.
6396
+
6397
+ :param request_id: str
6398
+ The ID of the trace to fetch artifact upload credentials for.
6399
+
6400
+ :returns: :class:`GetCredentialsForTraceDataUploadResponse`
6401
+ """
6402
+
6403
+ headers = {
6404
+ "Accept": "application/json",
6405
+ }
6406
+
6407
+ res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-upload", headers=headers)
6408
+ return GetCredentialsForTraceDataUploadResponse.from_dict(res)
6409
+
6117
6410
  def get_experiment(self, experiment_id: str) -> GetExperimentResponse:
6118
6411
  """Get an experiment.
6119
6412
 
@@ -6270,10 +6563,11 @@ class ExperimentsAPI:
6270
6563
  API](/api/workspace/files/listdirectorycontents).
6271
6564
 
6272
6565
  :param page_token: str (optional)
6273
- Token indicating the page of artifact results to fetch. `page_token` is not supported when listing
6274
- artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call
6275
- `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports
6276
- pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents).
6566
+ The token indicating the page of artifact results to fetch. `page_token` is not supported when
6567
+ listing artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes.
6568
+ Please call `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which
6569
+ supports pagination. See [List directory contents | Files
6570
+ API](/api/workspace/files/listdirectorycontents).
6277
6571
  :param path: str (optional)
6278
6572
  Filter artifacts matching this path (a relative path from the root artifact directory).
6279
6573
  :param run_id: str (optional)
@@ -6431,7 +6725,9 @@ class ExperimentsAPI:
6431
6725
 
6432
6726
  self._api.do("POST", "/api/2.0/mlflow/runs/log-batch", body=body, headers=headers)
6433
6727
 
6434
- def log_inputs(self, run_id: str, *, datasets: Optional[List[DatasetInput]] = None):
6728
+ def log_inputs(
6729
+ self, run_id: str, *, datasets: Optional[List[DatasetInput]] = None, models: Optional[List[ModelInput]] = None
6730
+ ):
6435
6731
  """Log inputs to a run.
6436
6732
 
6437
6733
  **NOTE:** Experimental: This API may change or be removed in a future release without warning.
@@ -6442,12 +6738,16 @@ class ExperimentsAPI:
6442
6738
  ID of the run to log under
6443
6739
  :param datasets: List[:class:`DatasetInput`] (optional)
6444
6740
  Dataset inputs
6741
+ :param models: List[:class:`ModelInput`] (optional)
6742
+ Model inputs
6445
6743
 
6446
6744
 
6447
6745
  """
6448
6746
  body = {}
6449
6747
  if datasets is not None:
6450
6748
  body["datasets"] = [v.as_dict() for v in datasets]
6749
+ if models is not None:
6750
+ body["models"] = [v.as_dict() for v in models]
6451
6751
  if run_id is not None:
6452
6752
  body["run_id"] = run_id
6453
6753
  headers = {
@@ -6463,6 +6763,9 @@ class ExperimentsAPI:
6463
6763
  value: float,
6464
6764
  timestamp: int,
6465
6765
  *,
6766
+ dataset_digest: Optional[str] = None,
6767
+ dataset_name: Optional[str] = None,
6768
+ model_id: Optional[str] = None,
6466
6769
  run_id: Optional[str] = None,
6467
6770
  run_uuid: Optional[str] = None,
6468
6771
  step: Optional[int] = None,
@@ -6479,6 +6782,14 @@ class ExperimentsAPI:
6479
6782
  Double value of the metric being logged.
6480
6783
  :param timestamp: int
6481
6784
  Unix timestamp in milliseconds at the time metric was logged.
6785
+ :param dataset_digest: str (optional)
6786
+ Dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset that
6787
+ uniquely identifies it within datasets of the same name.
6788
+ :param dataset_name: str (optional)
6789
+ The name of the dataset associated with the metric. E.g. “my.uc.table@2” “nyc-taxi-dataset”,
6790
+ “fantastic-elk-3”
6791
+ :param model_id: str (optional)
6792
+ ID of the logged model associated with the metric, if applicable
6482
6793
  :param run_id: str (optional)
6483
6794
  ID of the run under which to log the metric. Must be provided.
6484
6795
  :param run_uuid: str (optional)
@@ -6490,8 +6801,14 @@ class ExperimentsAPI:
6490
6801
 
6491
6802
  """
6492
6803
  body = {}
6804
+ if dataset_digest is not None:
6805
+ body["dataset_digest"] = dataset_digest
6806
+ if dataset_name is not None:
6807
+ body["dataset_name"] = dataset_name
6493
6808
  if key is not None:
6494
6809
  body["key"] = key
6810
+ if model_id is not None:
6811
+ body["model_id"] = model_id
6495
6812
  if run_id is not None:
6496
6813
  body["run_id"] = run_id
6497
6814
  if run_uuid is not None:
@@ -7000,13 +7317,13 @@ class ForecastingAPI:
7000
7317
  train_data_path: str,
7001
7318
  target_column: str,
7002
7319
  time_column: str,
7003
- data_granularity_unit: str,
7320
+ forecast_granularity: str,
7004
7321
  forecast_horizon: int,
7005
7322
  *,
7006
7323
  custom_weights_column: Optional[str] = None,
7007
- data_granularity_quantity: Optional[int] = None,
7008
7324
  experiment_path: Optional[str] = None,
7009
7325
  holiday_regions: Optional[List[str]] = None,
7326
+ include_features: Optional[List[str]] = None,
7010
7327
  max_runtime: Optional[int] = None,
7011
7328
  prediction_data_path: Optional[str] = None,
7012
7329
  primary_metric: Optional[str] = None,
@@ -7020,56 +7337,50 @@ class ForecastingAPI:
7020
7337
  Creates a serverless forecasting experiment. Returns the experiment ID.
7021
7338
 
7022
7339
  :param train_data_path: str
7023
- The three-level (fully qualified) name of a unity catalog table. This table serves as the training
7024
- data for the forecasting model.
7340
+ The fully qualified name of a Unity Catalog table, formatted as catalog_name.schema_name.table_name,
7341
+ used as training data for the forecasting model.
7025
7342
  :param target_column: str
7026
- Name of the column in the input training table that serves as the prediction target. The values in
7027
- this column will be used as the ground truth for model training.
7343
+ The column in the input training table used as the prediction target for model training. The values
7344
+ in this column are used as the ground truth for model training.
7028
7345
  :param time_column: str
7029
- Name of the column in the input training table that represents the timestamp of each row.
7030
- :param data_granularity_unit: str
7031
- The time unit of the input data granularity. Together with data_granularity_quantity field, this
7032
- defines the time interval between consecutive rows in the time series data. Possible values: * 'W'
7033
- (weeks) * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes'
7034
- / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / 'month' / 'months' * 'Q' / 'quarter' / 'quarters'
7035
- * 'Y' / 'year' / 'years'
7346
+ The column in the input training table that represents each row's timestamp.
7347
+ :param forecast_granularity: str
7348
+ The time interval between consecutive rows in the time series data. Possible values include: '1
7349
+ second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily',
7350
+ 'Weekly', 'Monthly', 'Quarterly', 'Yearly'.
7036
7351
  :param forecast_horizon: int
7037
- The number of time steps into the future for which predictions should be made. This value represents
7038
- a multiple of data_granularity_unit and data_granularity_quantity determining how far ahead the
7039
- model will forecast.
7352
+ The number of time steps into the future to make predictions, calculated as a multiple of
7353
+ forecast_granularity. This value represents how far ahead the model should forecast.
7040
7354
  :param custom_weights_column: str (optional)
7041
- Name of the column in the input training table used to customize the weight for each time series to
7042
- calculate weighted metrics.
7043
- :param data_granularity_quantity: int (optional)
7044
- The quantity of the input data granularity. Together with data_granularity_unit field, this defines
7045
- the time interval between consecutive rows in the time series data. For now, only 1 second,
7046
- 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year are supported.
7355
+ The column in the training table used to customize weights for each time series.
7047
7356
  :param experiment_path: str (optional)
7048
- The path to the created experiment. This is the path where the experiment will be stored in the
7049
- workspace.
7357
+ The path in the workspace to store the created experiment.
7050
7358
  :param holiday_regions: List[str] (optional)
7051
- Region code(s) to consider when automatically adding holiday features. When empty, no holiday
7052
- features are added. Only supports 1 holiday region for now.
7359
+ The region code(s) to automatically add holiday features. Currently supports only one region.
7360
+ :param include_features: List[str] (optional)
7361
+ Specifies the list of feature columns to include in model training. These columns must exist in the
7362
+ training data and be of type string, numerical, or boolean. If not specified, no additional features
7363
+ will be included. Note: Certain columns are automatically handled: - Automatically excluded:
7364
+ split_column, target_column, custom_weights_column. - Automatically included: time_column.
7053
7365
  :param max_runtime: int (optional)
7054
- The maximum duration in minutes for which the experiment is allowed to run. If the experiment
7055
- exceeds this time limit it will be stopped automatically.
7366
+ The maximum duration for the experiment in minutes. The experiment stops automatically if it exceeds
7367
+ this limit.
7056
7368
  :param prediction_data_path: str (optional)
7057
- The three-level (fully qualified) path to a unity catalog table. This table path serves to store the
7058
- predictions.
7369
+ The fully qualified path of a Unity Catalog table, formatted as catalog_name.schema_name.table_name,
7370
+ used to store predictions.
7059
7371
  :param primary_metric: str (optional)
7060
7372
  The evaluation metric used to optimize the forecasting model.
7061
7373
  :param register_to: str (optional)
7062
- The three-level (fully qualified) path to a unity catalog model. This model path serves to store the
7063
- best model.
7374
+ The fully qualified path of a Unity Catalog model, formatted as catalog_name.schema_name.model_name,
7375
+ used to store the best model.
7064
7376
  :param split_column: str (optional)
7065
- Name of the column in the input training table used for custom data splits. The values in this
7066
- column must be "train", "validate", or "test" to indicate which split each row belongs to.
7377
+ // The column in the training table used for custom data splits. Values must be 'train', 'validate',
7378
+ or 'test'.
7067
7379
  :param timeseries_identifier_columns: List[str] (optional)
7068
- Name of the column in the input training table used to group the dataset to predict individual time
7069
- series
7380
+ The column in the training table used to group the dataset for predicting individual time series.
7070
7381
  :param training_frameworks: List[str] (optional)
7071
- The list of frameworks to include for model tuning. Possible values: 'Prophet', 'ARIMA', 'DeepAR'.
7072
- An empty list will include all supported frameworks.
7382
+ List of frameworks to include for model tuning. Possible values are 'Prophet', 'ARIMA', 'DeepAR'. An
7383
+ empty list includes all supported frameworks.
7073
7384
 
7074
7385
  :returns:
7075
7386
  Long-running operation waiter for :class:`ForecastingExperiment`.
@@ -7078,16 +7389,16 @@ class ForecastingAPI:
7078
7389
  body = {}
7079
7390
  if custom_weights_column is not None:
7080
7391
  body["custom_weights_column"] = custom_weights_column
7081
- if data_granularity_quantity is not None:
7082
- body["data_granularity_quantity"] = data_granularity_quantity
7083
- if data_granularity_unit is not None:
7084
- body["data_granularity_unit"] = data_granularity_unit
7085
7392
  if experiment_path is not None:
7086
7393
  body["experiment_path"] = experiment_path
7394
+ if forecast_granularity is not None:
7395
+ body["forecast_granularity"] = forecast_granularity
7087
7396
  if forecast_horizon is not None:
7088
7397
  body["forecast_horizon"] = forecast_horizon
7089
7398
  if holiday_regions is not None:
7090
7399
  body["holiday_regions"] = [v for v in holiday_regions]
7400
+ if include_features is not None:
7401
+ body["include_features"] = [v for v in include_features]
7091
7402
  if max_runtime is not None:
7092
7403
  body["max_runtime"] = max_runtime
7093
7404
  if prediction_data_path is not None:
@@ -7125,13 +7436,13 @@ class ForecastingAPI:
7125
7436
  train_data_path: str,
7126
7437
  target_column: str,
7127
7438
  time_column: str,
7128
- data_granularity_unit: str,
7439
+ forecast_granularity: str,
7129
7440
  forecast_horizon: int,
7130
7441
  *,
7131
7442
  custom_weights_column: Optional[str] = None,
7132
- data_granularity_quantity: Optional[int] = None,
7133
7443
  experiment_path: Optional[str] = None,
7134
7444
  holiday_regions: Optional[List[str]] = None,
7445
+ include_features: Optional[List[str]] = None,
7135
7446
  max_runtime: Optional[int] = None,
7136
7447
  prediction_data_path: Optional[str] = None,
7137
7448
  primary_metric: Optional[str] = None,
@@ -7143,11 +7454,11 @@ class ForecastingAPI:
7143
7454
  ) -> ForecastingExperiment:
7144
7455
  return self.create_experiment(
7145
7456
  custom_weights_column=custom_weights_column,
7146
- data_granularity_quantity=data_granularity_quantity,
7147
- data_granularity_unit=data_granularity_unit,
7148
7457
  experiment_path=experiment_path,
7458
+ forecast_granularity=forecast_granularity,
7149
7459
  forecast_horizon=forecast_horizon,
7150
7460
  holiday_regions=holiday_regions,
7461
+ include_features=include_features,
7151
7462
  max_runtime=max_runtime,
7152
7463
  prediction_data_path=prediction_data_path,
7153
7464
  primary_metric=primary_metric,