databricks-sdk 0.49.0__py3-none-any.whl → 0.50.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +5 -3
- databricks/sdk/service/billing.py +9 -0
- databricks/sdk/service/catalog.py +22 -0
- databricks/sdk/service/compute.py +20 -1
- databricks/sdk/service/dashboards.py +244 -28
- databricks/sdk/service/jobs.py +539 -75
- databricks/sdk/service/ml.py +408 -72
- databricks/sdk/service/pipelines.py +0 -32
- databricks/sdk/service/serving.py +15 -12
- databricks/sdk/service/settings.py +472 -4
- databricks/sdk/service/sql.py +11 -0
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/RECORD +18 -18
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.49.0.dist-info → databricks_sdk-0.50.0.dist-info}/top_level.txt +0 -0
databricks/sdk/service/ml.py
CHANGED
|
@@ -271,6 +271,109 @@ class ApproveTransitionRequestResponse:
|
|
|
271
271
|
return cls(activity=_from_dict(d, "activity", Activity))
|
|
272
272
|
|
|
273
273
|
|
|
274
|
+
@dataclass
|
|
275
|
+
class ArtifactCredentialInfo:
|
|
276
|
+
headers: Optional[List[ArtifactCredentialInfoHttpHeader]] = None
|
|
277
|
+
"""A collection of HTTP headers that should be specified when uploading to or downloading from the
|
|
278
|
+
specified `signed_uri`."""
|
|
279
|
+
|
|
280
|
+
path: Optional[str] = None
|
|
281
|
+
"""The path, relative to the Run's artifact root location, of the artifact that can be accessed
|
|
282
|
+
with the credential."""
|
|
283
|
+
|
|
284
|
+
run_id: Optional[str] = None
|
|
285
|
+
"""The ID of the MLflow Run containing the artifact that can be accessed with the credential."""
|
|
286
|
+
|
|
287
|
+
signed_uri: Optional[str] = None
|
|
288
|
+
"""The signed URI credential that provides access to the artifact."""
|
|
289
|
+
|
|
290
|
+
type: Optional[ArtifactCredentialType] = None
|
|
291
|
+
"""The type of the signed credential URI (e.g., an AWS presigned URL or an Azure Shared Access
|
|
292
|
+
Signature URI)."""
|
|
293
|
+
|
|
294
|
+
def as_dict(self) -> dict:
|
|
295
|
+
"""Serializes the ArtifactCredentialInfo into a dictionary suitable for use as a JSON request body."""
|
|
296
|
+
body = {}
|
|
297
|
+
if self.headers:
|
|
298
|
+
body["headers"] = [v.as_dict() for v in self.headers]
|
|
299
|
+
if self.path is not None:
|
|
300
|
+
body["path"] = self.path
|
|
301
|
+
if self.run_id is not None:
|
|
302
|
+
body["run_id"] = self.run_id
|
|
303
|
+
if self.signed_uri is not None:
|
|
304
|
+
body["signed_uri"] = self.signed_uri
|
|
305
|
+
if self.type is not None:
|
|
306
|
+
body["type"] = self.type.value
|
|
307
|
+
return body
|
|
308
|
+
|
|
309
|
+
def as_shallow_dict(self) -> dict:
|
|
310
|
+
"""Serializes the ArtifactCredentialInfo into a shallow dictionary of its immediate attributes."""
|
|
311
|
+
body = {}
|
|
312
|
+
if self.headers:
|
|
313
|
+
body["headers"] = self.headers
|
|
314
|
+
if self.path is not None:
|
|
315
|
+
body["path"] = self.path
|
|
316
|
+
if self.run_id is not None:
|
|
317
|
+
body["run_id"] = self.run_id
|
|
318
|
+
if self.signed_uri is not None:
|
|
319
|
+
body["signed_uri"] = self.signed_uri
|
|
320
|
+
if self.type is not None:
|
|
321
|
+
body["type"] = self.type
|
|
322
|
+
return body
|
|
323
|
+
|
|
324
|
+
@classmethod
|
|
325
|
+
def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfo:
|
|
326
|
+
"""Deserializes the ArtifactCredentialInfo from a dictionary."""
|
|
327
|
+
return cls(
|
|
328
|
+
headers=_repeated_dict(d, "headers", ArtifactCredentialInfoHttpHeader),
|
|
329
|
+
path=d.get("path", None),
|
|
330
|
+
run_id=d.get("run_id", None),
|
|
331
|
+
signed_uri=d.get("signed_uri", None),
|
|
332
|
+
type=_enum(d, "type", ArtifactCredentialType),
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
@dataclass
|
|
337
|
+
class ArtifactCredentialInfoHttpHeader:
|
|
338
|
+
name: Optional[str] = None
|
|
339
|
+
"""The HTTP header name."""
|
|
340
|
+
|
|
341
|
+
value: Optional[str] = None
|
|
342
|
+
"""The HTTP header value."""
|
|
343
|
+
|
|
344
|
+
def as_dict(self) -> dict:
|
|
345
|
+
"""Serializes the ArtifactCredentialInfoHttpHeader into a dictionary suitable for use as a JSON request body."""
|
|
346
|
+
body = {}
|
|
347
|
+
if self.name is not None:
|
|
348
|
+
body["name"] = self.name
|
|
349
|
+
if self.value is not None:
|
|
350
|
+
body["value"] = self.value
|
|
351
|
+
return body
|
|
352
|
+
|
|
353
|
+
def as_shallow_dict(self) -> dict:
|
|
354
|
+
"""Serializes the ArtifactCredentialInfoHttpHeader into a shallow dictionary of its immediate attributes."""
|
|
355
|
+
body = {}
|
|
356
|
+
if self.name is not None:
|
|
357
|
+
body["name"] = self.name
|
|
358
|
+
if self.value is not None:
|
|
359
|
+
body["value"] = self.value
|
|
360
|
+
return body
|
|
361
|
+
|
|
362
|
+
@classmethod
|
|
363
|
+
def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfoHttpHeader:
|
|
364
|
+
"""Deserializes the ArtifactCredentialInfoHttpHeader from a dictionary."""
|
|
365
|
+
return cls(name=d.get("name", None), value=d.get("value", None))
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
class ArtifactCredentialType(Enum):
|
|
369
|
+
"""The type of a given artifact access credential"""
|
|
370
|
+
|
|
371
|
+
AWS_PRESIGNED_URL = "AWS_PRESIGNED_URL"
|
|
372
|
+
AZURE_ADLS_GEN2_SAS_URI = "AZURE_ADLS_GEN2_SAS_URI"
|
|
373
|
+
AZURE_SAS_URI = "AZURE_SAS_URI"
|
|
374
|
+
GCP_SIGNED_URL = "GCP_SIGNED_URL"
|
|
375
|
+
|
|
376
|
+
|
|
274
377
|
class CommentActivityAction(Enum):
|
|
275
378
|
"""An action that a user (with sufficient permissions) could take on a comment. Valid values are: *
|
|
276
379
|
`EDIT_COMMENT`: Edit the comment
|
|
@@ -489,63 +592,67 @@ class CreateExperimentResponse:
|
|
|
489
592
|
@dataclass
|
|
490
593
|
class CreateForecastingExperimentRequest:
|
|
491
594
|
train_data_path: str
|
|
492
|
-
"""The
|
|
493
|
-
training data for the forecasting model."""
|
|
595
|
+
"""The fully qualified name of a Unity Catalog table, formatted as
|
|
596
|
+
catalog_name.schema_name.table_name, used as training data for the forecasting model."""
|
|
494
597
|
|
|
495
598
|
target_column: str
|
|
496
|
-
"""
|
|
497
|
-
in this column
|
|
599
|
+
"""The column in the input training table used as the prediction target for model training. The
|
|
600
|
+
values in this column are used as the ground truth for model training."""
|
|
498
601
|
|
|
499
602
|
time_column: str
|
|
500
|
-
"""
|
|
603
|
+
"""The column in the input training table that represents each row's timestamp."""
|
|
501
604
|
|
|
502
605
|
forecast_granularity: str
|
|
503
|
-
"""The
|
|
504
|
-
|
|
505
|
-
|
|
606
|
+
"""The time interval between consecutive rows in the time series data. Possible values include: '1
|
|
607
|
+
second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily',
|
|
608
|
+
'Weekly', 'Monthly', 'Quarterly', 'Yearly'."""
|
|
506
609
|
|
|
507
610
|
forecast_horizon: int
|
|
508
|
-
"""The number of time steps into the future
|
|
509
|
-
|
|
611
|
+
"""The number of time steps into the future to make predictions, calculated as a multiple of
|
|
612
|
+
forecast_granularity. This value represents how far ahead the model should forecast."""
|
|
510
613
|
|
|
511
614
|
custom_weights_column: Optional[str] = None
|
|
512
|
-
"""
|
|
513
|
-
to calculate weighted metrics."""
|
|
615
|
+
"""The column in the training table used to customize weights for each time series."""
|
|
514
616
|
|
|
515
617
|
experiment_path: Optional[str] = None
|
|
516
|
-
"""The path
|
|
517
|
-
workspace."""
|
|
618
|
+
"""The path in the workspace to store the created experiment."""
|
|
518
619
|
|
|
519
620
|
holiday_regions: Optional[List[str]] = None
|
|
520
|
-
"""
|
|
521
|
-
|
|
621
|
+
"""The region code(s) to automatically add holiday features. Currently supports only one region."""
|
|
622
|
+
|
|
623
|
+
include_features: Optional[List[str]] = None
|
|
624
|
+
"""Specifies the list of feature columns to include in model training. These columns must exist in
|
|
625
|
+
the training data and be of type string, numerical, or boolean. If not specified, no additional
|
|
626
|
+
features will be included. Note: Certain columns are automatically handled: - Automatically
|
|
627
|
+
excluded: split_column, target_column, custom_weights_column. - Automatically included:
|
|
628
|
+
time_column."""
|
|
522
629
|
|
|
523
630
|
max_runtime: Optional[int] = None
|
|
524
|
-
"""The maximum duration
|
|
525
|
-
exceeds this
|
|
631
|
+
"""The maximum duration for the experiment in minutes. The experiment stops automatically if it
|
|
632
|
+
exceeds this limit."""
|
|
526
633
|
|
|
527
634
|
prediction_data_path: Optional[str] = None
|
|
528
|
-
"""The
|
|
529
|
-
|
|
635
|
+
"""The fully qualified path of a Unity Catalog table, formatted as
|
|
636
|
+
catalog_name.schema_name.table_name, used to store predictions."""
|
|
530
637
|
|
|
531
638
|
primary_metric: Optional[str] = None
|
|
532
639
|
"""The evaluation metric used to optimize the forecasting model."""
|
|
533
640
|
|
|
534
641
|
register_to: Optional[str] = None
|
|
535
|
-
"""The
|
|
536
|
-
the best model."""
|
|
642
|
+
"""The fully qualified path of a Unity Catalog model, formatted as
|
|
643
|
+
catalog_name.schema_name.model_name, used to store the best model."""
|
|
537
644
|
|
|
538
645
|
split_column: Optional[str] = None
|
|
539
|
-
"""
|
|
540
|
-
|
|
646
|
+
"""// The column in the training table used for custom data splits. Values must be 'train',
|
|
647
|
+
'validate', or 'test'."""
|
|
541
648
|
|
|
542
649
|
timeseries_identifier_columns: Optional[List[str]] = None
|
|
543
|
-
"""
|
|
544
|
-
|
|
650
|
+
"""The column in the training table used to group the dataset for predicting individual time
|
|
651
|
+
series."""
|
|
545
652
|
|
|
546
653
|
training_frameworks: Optional[List[str]] = None
|
|
547
|
-
"""
|
|
548
|
-
'DeepAR'. An empty list
|
|
654
|
+
"""List of frameworks to include for model tuning. Possible values are 'Prophet', 'ARIMA',
|
|
655
|
+
'DeepAR'. An empty list includes all supported frameworks."""
|
|
549
656
|
|
|
550
657
|
def as_dict(self) -> dict:
|
|
551
658
|
"""Serializes the CreateForecastingExperimentRequest into a dictionary suitable for use as a JSON request body."""
|
|
@@ -560,6 +667,8 @@ class CreateForecastingExperimentRequest:
|
|
|
560
667
|
body["forecast_horizon"] = self.forecast_horizon
|
|
561
668
|
if self.holiday_regions:
|
|
562
669
|
body["holiday_regions"] = [v for v in self.holiday_regions]
|
|
670
|
+
if self.include_features:
|
|
671
|
+
body["include_features"] = [v for v in self.include_features]
|
|
563
672
|
if self.max_runtime is not None:
|
|
564
673
|
body["max_runtime"] = self.max_runtime
|
|
565
674
|
if self.prediction_data_path is not None:
|
|
@@ -595,6 +704,8 @@ class CreateForecastingExperimentRequest:
|
|
|
595
704
|
body["forecast_horizon"] = self.forecast_horizon
|
|
596
705
|
if self.holiday_regions:
|
|
597
706
|
body["holiday_regions"] = self.holiday_regions
|
|
707
|
+
if self.include_features:
|
|
708
|
+
body["include_features"] = self.include_features
|
|
598
709
|
if self.max_runtime is not None:
|
|
599
710
|
body["max_runtime"] = self.max_runtime
|
|
600
711
|
if self.prediction_data_path is not None:
|
|
@@ -626,6 +737,7 @@ class CreateForecastingExperimentRequest:
|
|
|
626
737
|
forecast_granularity=d.get("forecast_granularity", None),
|
|
627
738
|
forecast_horizon=d.get("forecast_horizon", None),
|
|
628
739
|
holiday_regions=d.get("holiday_regions", None),
|
|
740
|
+
include_features=d.get("include_features", None),
|
|
629
741
|
max_runtime=d.get("max_runtime", None),
|
|
630
742
|
prediction_data_path=d.get("prediction_data_path", None),
|
|
631
743
|
primary_metric=d.get("primary_metric", None),
|
|
@@ -1946,13 +2058,13 @@ class FileInfo:
|
|
|
1946
2058
|
"""Metadata of a single artifact file or directory."""
|
|
1947
2059
|
|
|
1948
2060
|
file_size: Optional[int] = None
|
|
1949
|
-
"""
|
|
2061
|
+
"""The size in bytes of the file. Unset for directories."""
|
|
1950
2062
|
|
|
1951
2063
|
is_dir: Optional[bool] = None
|
|
1952
2064
|
"""Whether the path is a directory."""
|
|
1953
2065
|
|
|
1954
2066
|
path: Optional[str] = None
|
|
1955
|
-
"""
|
|
2067
|
+
"""The path relative to the root artifact directory run."""
|
|
1956
2068
|
|
|
1957
2069
|
def as_dict(self) -> dict:
|
|
1958
2070
|
"""Serializes the FileInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2036,6 +2148,56 @@ class ForecastingExperimentState(Enum):
|
|
|
2036
2148
|
SUCCEEDED = "SUCCEEDED"
|
|
2037
2149
|
|
|
2038
2150
|
|
|
2151
|
+
@dataclass
|
|
2152
|
+
class GetCredentialsForTraceDataDownloadResponse:
|
|
2153
|
+
credential_info: Optional[ArtifactCredentialInfo] = None
|
|
2154
|
+
"""The artifact download credentials for the specified trace data."""
|
|
2155
|
+
|
|
2156
|
+
def as_dict(self) -> dict:
|
|
2157
|
+
"""Serializes the GetCredentialsForTraceDataDownloadResponse into a dictionary suitable for use as a JSON request body."""
|
|
2158
|
+
body = {}
|
|
2159
|
+
if self.credential_info:
|
|
2160
|
+
body["credential_info"] = self.credential_info.as_dict()
|
|
2161
|
+
return body
|
|
2162
|
+
|
|
2163
|
+
def as_shallow_dict(self) -> dict:
|
|
2164
|
+
"""Serializes the GetCredentialsForTraceDataDownloadResponse into a shallow dictionary of its immediate attributes."""
|
|
2165
|
+
body = {}
|
|
2166
|
+
if self.credential_info:
|
|
2167
|
+
body["credential_info"] = self.credential_info
|
|
2168
|
+
return body
|
|
2169
|
+
|
|
2170
|
+
@classmethod
|
|
2171
|
+
def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataDownloadResponse:
|
|
2172
|
+
"""Deserializes the GetCredentialsForTraceDataDownloadResponse from a dictionary."""
|
|
2173
|
+
return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo))
|
|
2174
|
+
|
|
2175
|
+
|
|
2176
|
+
@dataclass
|
|
2177
|
+
class GetCredentialsForTraceDataUploadResponse:
|
|
2178
|
+
credential_info: Optional[ArtifactCredentialInfo] = None
|
|
2179
|
+
"""The artifact upload credentials for the specified trace data."""
|
|
2180
|
+
|
|
2181
|
+
def as_dict(self) -> dict:
|
|
2182
|
+
"""Serializes the GetCredentialsForTraceDataUploadResponse into a dictionary suitable for use as a JSON request body."""
|
|
2183
|
+
body = {}
|
|
2184
|
+
if self.credential_info:
|
|
2185
|
+
body["credential_info"] = self.credential_info.as_dict()
|
|
2186
|
+
return body
|
|
2187
|
+
|
|
2188
|
+
def as_shallow_dict(self) -> dict:
|
|
2189
|
+
"""Serializes the GetCredentialsForTraceDataUploadResponse into a shallow dictionary of its immediate attributes."""
|
|
2190
|
+
body = {}
|
|
2191
|
+
if self.credential_info:
|
|
2192
|
+
body["credential_info"] = self.credential_info
|
|
2193
|
+
return body
|
|
2194
|
+
|
|
2195
|
+
@classmethod
|
|
2196
|
+
def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataUploadResponse:
|
|
2197
|
+
"""Deserializes the GetCredentialsForTraceDataUploadResponse from a dictionary."""
|
|
2198
|
+
return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo))
|
|
2199
|
+
|
|
2200
|
+
|
|
2039
2201
|
@dataclass
|
|
2040
2202
|
class GetExperimentByNameResponse:
|
|
2041
2203
|
experiment: Optional[Experiment] = None
|
|
@@ -2536,13 +2698,13 @@ class JobSpecWithoutSecret:
|
|
|
2536
2698
|
@dataclass
|
|
2537
2699
|
class ListArtifactsResponse:
|
|
2538
2700
|
files: Optional[List[FileInfo]] = None
|
|
2539
|
-
"""
|
|
2701
|
+
"""The file location and metadata for artifacts."""
|
|
2540
2702
|
|
|
2541
2703
|
next_page_token: Optional[str] = None
|
|
2542
|
-
"""
|
|
2704
|
+
"""The token that can be used to retrieve the next page of artifact results."""
|
|
2543
2705
|
|
|
2544
2706
|
root_uri: Optional[str] = None
|
|
2545
|
-
"""
|
|
2707
|
+
"""The root artifact directory for the run."""
|
|
2546
2708
|
|
|
2547
2709
|
def as_dict(self) -> dict:
|
|
2548
2710
|
"""Serializes the ListArtifactsResponse into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2784,11 +2946,16 @@ class LogInputs:
|
|
|
2784
2946
|
datasets: Optional[List[DatasetInput]] = None
|
|
2785
2947
|
"""Dataset inputs"""
|
|
2786
2948
|
|
|
2949
|
+
models: Optional[List[ModelInput]] = None
|
|
2950
|
+
"""Model inputs"""
|
|
2951
|
+
|
|
2787
2952
|
def as_dict(self) -> dict:
|
|
2788
2953
|
"""Serializes the LogInputs into a dictionary suitable for use as a JSON request body."""
|
|
2789
2954
|
body = {}
|
|
2790
2955
|
if self.datasets:
|
|
2791
2956
|
body["datasets"] = [v.as_dict() for v in self.datasets]
|
|
2957
|
+
if self.models:
|
|
2958
|
+
body["models"] = [v.as_dict() for v in self.models]
|
|
2792
2959
|
if self.run_id is not None:
|
|
2793
2960
|
body["run_id"] = self.run_id
|
|
2794
2961
|
return body
|
|
@@ -2798,6 +2965,8 @@ class LogInputs:
|
|
|
2798
2965
|
body = {}
|
|
2799
2966
|
if self.datasets:
|
|
2800
2967
|
body["datasets"] = self.datasets
|
|
2968
|
+
if self.models:
|
|
2969
|
+
body["models"] = self.models
|
|
2801
2970
|
if self.run_id is not None:
|
|
2802
2971
|
body["run_id"] = self.run_id
|
|
2803
2972
|
return body
|
|
@@ -2805,7 +2974,11 @@ class LogInputs:
|
|
|
2805
2974
|
@classmethod
|
|
2806
2975
|
def from_dict(cls, d: Dict[str, Any]) -> LogInputs:
|
|
2807
2976
|
"""Deserializes the LogInputs from a dictionary."""
|
|
2808
|
-
return cls(
|
|
2977
|
+
return cls(
|
|
2978
|
+
datasets=_repeated_dict(d, "datasets", DatasetInput),
|
|
2979
|
+
models=_repeated_dict(d, "models", ModelInput),
|
|
2980
|
+
run_id=d.get("run_id", None),
|
|
2981
|
+
)
|
|
2809
2982
|
|
|
2810
2983
|
|
|
2811
2984
|
@dataclass
|
|
@@ -2837,6 +3010,17 @@ class LogMetric:
|
|
|
2837
3010
|
timestamp: int
|
|
2838
3011
|
"""Unix timestamp in milliseconds at the time metric was logged."""
|
|
2839
3012
|
|
|
3013
|
+
dataset_digest: Optional[str] = None
|
|
3014
|
+
"""Dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset that
|
|
3015
|
+
uniquely identifies it within datasets of the same name."""
|
|
3016
|
+
|
|
3017
|
+
dataset_name: Optional[str] = None
|
|
3018
|
+
"""The name of the dataset associated with the metric. E.g. “my.uc.table@2”
|
|
3019
|
+
“nyc-taxi-dataset”, “fantastic-elk-3”"""
|
|
3020
|
+
|
|
3021
|
+
model_id: Optional[str] = None
|
|
3022
|
+
"""ID of the logged model associated with the metric, if applicable"""
|
|
3023
|
+
|
|
2840
3024
|
run_id: Optional[str] = None
|
|
2841
3025
|
"""ID of the run under which to log the metric. Must be provided."""
|
|
2842
3026
|
|
|
@@ -2850,8 +3034,14 @@ class LogMetric:
|
|
|
2850
3034
|
def as_dict(self) -> dict:
|
|
2851
3035
|
"""Serializes the LogMetric into a dictionary suitable for use as a JSON request body."""
|
|
2852
3036
|
body = {}
|
|
3037
|
+
if self.dataset_digest is not None:
|
|
3038
|
+
body["dataset_digest"] = self.dataset_digest
|
|
3039
|
+
if self.dataset_name is not None:
|
|
3040
|
+
body["dataset_name"] = self.dataset_name
|
|
2853
3041
|
if self.key is not None:
|
|
2854
3042
|
body["key"] = self.key
|
|
3043
|
+
if self.model_id is not None:
|
|
3044
|
+
body["model_id"] = self.model_id
|
|
2855
3045
|
if self.run_id is not None:
|
|
2856
3046
|
body["run_id"] = self.run_id
|
|
2857
3047
|
if self.run_uuid is not None:
|
|
@@ -2867,8 +3057,14 @@ class LogMetric:
|
|
|
2867
3057
|
def as_shallow_dict(self) -> dict:
|
|
2868
3058
|
"""Serializes the LogMetric into a shallow dictionary of its immediate attributes."""
|
|
2869
3059
|
body = {}
|
|
3060
|
+
if self.dataset_digest is not None:
|
|
3061
|
+
body["dataset_digest"] = self.dataset_digest
|
|
3062
|
+
if self.dataset_name is not None:
|
|
3063
|
+
body["dataset_name"] = self.dataset_name
|
|
2870
3064
|
if self.key is not None:
|
|
2871
3065
|
body["key"] = self.key
|
|
3066
|
+
if self.model_id is not None:
|
|
3067
|
+
body["model_id"] = self.model_id
|
|
2872
3068
|
if self.run_id is not None:
|
|
2873
3069
|
body["run_id"] = self.run_id
|
|
2874
3070
|
if self.run_uuid is not None:
|
|
@@ -2885,7 +3081,10 @@ class LogMetric:
|
|
|
2885
3081
|
def from_dict(cls, d: Dict[str, Any]) -> LogMetric:
|
|
2886
3082
|
"""Deserializes the LogMetric from a dictionary."""
|
|
2887
3083
|
return cls(
|
|
3084
|
+
dataset_digest=d.get("dataset_digest", None),
|
|
3085
|
+
dataset_name=d.get("dataset_name", None),
|
|
2888
3086
|
key=d.get("key", None),
|
|
3087
|
+
model_id=d.get("model_id", None),
|
|
2889
3088
|
run_id=d.get("run_id", None),
|
|
2890
3089
|
run_uuid=d.get("run_uuid", None),
|
|
2891
3090
|
step=d.get("step", None),
|
|
@@ -3036,23 +3235,46 @@ class LogParamResponse:
|
|
|
3036
3235
|
class Metric:
|
|
3037
3236
|
"""Metric associated with a run, represented as a key-value pair."""
|
|
3038
3237
|
|
|
3238
|
+
dataset_digest: Optional[str] = None
|
|
3239
|
+
"""The dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset
|
|
3240
|
+
that uniquely identifies it within datasets of the same name."""
|
|
3241
|
+
|
|
3242
|
+
dataset_name: Optional[str] = None
|
|
3243
|
+
"""The name of the dataset associated with the metric. E.g. “my.uc.table@2”
|
|
3244
|
+
“nyc-taxi-dataset”, “fantastic-elk-3”"""
|
|
3245
|
+
|
|
3039
3246
|
key: Optional[str] = None
|
|
3040
|
-
"""
|
|
3247
|
+
"""The key identifying the metric."""
|
|
3248
|
+
|
|
3249
|
+
model_id: Optional[str] = None
|
|
3250
|
+
"""The ID of the logged model or registered model version associated with the metric, if
|
|
3251
|
+
applicable."""
|
|
3252
|
+
|
|
3253
|
+
run_id: Optional[str] = None
|
|
3254
|
+
"""The ID of the run containing the metric."""
|
|
3041
3255
|
|
|
3042
3256
|
step: Optional[int] = None
|
|
3043
|
-
"""
|
|
3257
|
+
"""The step at which the metric was logged."""
|
|
3044
3258
|
|
|
3045
3259
|
timestamp: Optional[int] = None
|
|
3046
|
-
"""The timestamp at which
|
|
3260
|
+
"""The timestamp at which the metric was recorded."""
|
|
3047
3261
|
|
|
3048
3262
|
value: Optional[float] = None
|
|
3049
|
-
"""
|
|
3263
|
+
"""The value of the metric."""
|
|
3050
3264
|
|
|
3051
3265
|
def as_dict(self) -> dict:
|
|
3052
3266
|
"""Serializes the Metric into a dictionary suitable for use as a JSON request body."""
|
|
3053
3267
|
body = {}
|
|
3268
|
+
if self.dataset_digest is not None:
|
|
3269
|
+
body["dataset_digest"] = self.dataset_digest
|
|
3270
|
+
if self.dataset_name is not None:
|
|
3271
|
+
body["dataset_name"] = self.dataset_name
|
|
3054
3272
|
if self.key is not None:
|
|
3055
3273
|
body["key"] = self.key
|
|
3274
|
+
if self.model_id is not None:
|
|
3275
|
+
body["model_id"] = self.model_id
|
|
3276
|
+
if self.run_id is not None:
|
|
3277
|
+
body["run_id"] = self.run_id
|
|
3056
3278
|
if self.step is not None:
|
|
3057
3279
|
body["step"] = self.step
|
|
3058
3280
|
if self.timestamp is not None:
|
|
@@ -3064,8 +3286,16 @@ class Metric:
|
|
|
3064
3286
|
def as_shallow_dict(self) -> dict:
|
|
3065
3287
|
"""Serializes the Metric into a shallow dictionary of its immediate attributes."""
|
|
3066
3288
|
body = {}
|
|
3289
|
+
if self.dataset_digest is not None:
|
|
3290
|
+
body["dataset_digest"] = self.dataset_digest
|
|
3291
|
+
if self.dataset_name is not None:
|
|
3292
|
+
body["dataset_name"] = self.dataset_name
|
|
3067
3293
|
if self.key is not None:
|
|
3068
3294
|
body["key"] = self.key
|
|
3295
|
+
if self.model_id is not None:
|
|
3296
|
+
body["model_id"] = self.model_id
|
|
3297
|
+
if self.run_id is not None:
|
|
3298
|
+
body["run_id"] = self.run_id
|
|
3069
3299
|
if self.step is not None:
|
|
3070
3300
|
body["step"] = self.step
|
|
3071
3301
|
if self.timestamp is not None:
|
|
@@ -3078,7 +3308,11 @@ class Metric:
|
|
|
3078
3308
|
def from_dict(cls, d: Dict[str, Any]) -> Metric:
|
|
3079
3309
|
"""Deserializes the Metric from a dictionary."""
|
|
3080
3310
|
return cls(
|
|
3311
|
+
dataset_digest=d.get("dataset_digest", None),
|
|
3312
|
+
dataset_name=d.get("dataset_name", None),
|
|
3081
3313
|
key=d.get("key", None),
|
|
3314
|
+
model_id=d.get("model_id", None),
|
|
3315
|
+
run_id=d.get("run_id", None),
|
|
3082
3316
|
step=d.get("step", None),
|
|
3083
3317
|
timestamp=d.get("timestamp", None),
|
|
3084
3318
|
value=d.get("value", None),
|
|
@@ -3253,6 +3487,33 @@ class ModelDatabricks:
|
|
|
3253
3487
|
)
|
|
3254
3488
|
|
|
3255
3489
|
|
|
3490
|
+
@dataclass
|
|
3491
|
+
class ModelInput:
|
|
3492
|
+
"""Represents a LoggedModel or Registered Model Version input to a Run."""
|
|
3493
|
+
|
|
3494
|
+
model_id: str
|
|
3495
|
+
"""The unique identifier of the model."""
|
|
3496
|
+
|
|
3497
|
+
def as_dict(self) -> dict:
|
|
3498
|
+
"""Serializes the ModelInput into a dictionary suitable for use as a JSON request body."""
|
|
3499
|
+
body = {}
|
|
3500
|
+
if self.model_id is not None:
|
|
3501
|
+
body["model_id"] = self.model_id
|
|
3502
|
+
return body
|
|
3503
|
+
|
|
3504
|
+
def as_shallow_dict(self) -> dict:
|
|
3505
|
+
"""Serializes the ModelInput into a shallow dictionary of its immediate attributes."""
|
|
3506
|
+
body = {}
|
|
3507
|
+
if self.model_id is not None:
|
|
3508
|
+
body["model_id"] = self.model_id
|
|
3509
|
+
return body
|
|
3510
|
+
|
|
3511
|
+
@classmethod
|
|
3512
|
+
def from_dict(cls, d: Dict[str, Any]) -> ModelInput:
|
|
3513
|
+
"""Deserializes the ModelInput from a dictionary."""
|
|
3514
|
+
return cls(model_id=d.get("model_id", None))
|
|
3515
|
+
|
|
3516
|
+
|
|
3256
3517
|
@dataclass
|
|
3257
3518
|
class ModelTag:
|
|
3258
3519
|
key: Optional[str] = None
|
|
@@ -4571,11 +4832,19 @@ class RunInputs:
|
|
|
4571
4832
|
dataset_inputs: Optional[List[DatasetInput]] = None
|
|
4572
4833
|
"""Run metrics."""
|
|
4573
4834
|
|
|
4835
|
+
model_inputs: Optional[List[ModelInput]] = None
|
|
4836
|
+
"""**NOTE**: Experimental: This API field may change or be removed in a future release without
|
|
4837
|
+
warning.
|
|
4838
|
+
|
|
4839
|
+
Model inputs to the Run."""
|
|
4840
|
+
|
|
4574
4841
|
def as_dict(self) -> dict:
|
|
4575
4842
|
"""Serializes the RunInputs into a dictionary suitable for use as a JSON request body."""
|
|
4576
4843
|
body = {}
|
|
4577
4844
|
if self.dataset_inputs:
|
|
4578
4845
|
body["dataset_inputs"] = [v.as_dict() for v in self.dataset_inputs]
|
|
4846
|
+
if self.model_inputs:
|
|
4847
|
+
body["model_inputs"] = [v.as_dict() for v in self.model_inputs]
|
|
4579
4848
|
return body
|
|
4580
4849
|
|
|
4581
4850
|
def as_shallow_dict(self) -> dict:
|
|
@@ -4583,12 +4852,17 @@ class RunInputs:
|
|
|
4583
4852
|
body = {}
|
|
4584
4853
|
if self.dataset_inputs:
|
|
4585
4854
|
body["dataset_inputs"] = self.dataset_inputs
|
|
4855
|
+
if self.model_inputs:
|
|
4856
|
+
body["model_inputs"] = self.model_inputs
|
|
4586
4857
|
return body
|
|
4587
4858
|
|
|
4588
4859
|
@classmethod
|
|
4589
4860
|
def from_dict(cls, d: Dict[str, Any]) -> RunInputs:
|
|
4590
4861
|
"""Deserializes the RunInputs from a dictionary."""
|
|
4591
|
-
return cls(
|
|
4862
|
+
return cls(
|
|
4863
|
+
dataset_inputs=_repeated_dict(d, "dataset_inputs", DatasetInput),
|
|
4864
|
+
model_inputs=_repeated_dict(d, "model_inputs", ModelInput),
|
|
4865
|
+
)
|
|
4592
4866
|
|
|
4593
4867
|
|
|
4594
4868
|
@dataclass
|
|
@@ -6101,6 +6375,38 @@ class ExperimentsAPI:
|
|
|
6101
6375
|
res = self._api.do("GET", "/api/2.0/mlflow/experiments/get-by-name", query=query, headers=headers)
|
|
6102
6376
|
return GetExperimentByNameResponse.from_dict(res)
|
|
6103
6377
|
|
|
6378
|
+
def get_credentials_for_trace_data_download(self, request_id: str) -> GetCredentialsForTraceDataDownloadResponse:
|
|
6379
|
+
"""Get credentials to download trace data.
|
|
6380
|
+
|
|
6381
|
+
:param request_id: str
|
|
6382
|
+
The ID of the trace to fetch artifact download credentials for.
|
|
6383
|
+
|
|
6384
|
+
:returns: :class:`GetCredentialsForTraceDataDownloadResponse`
|
|
6385
|
+
"""
|
|
6386
|
+
|
|
6387
|
+
headers = {
|
|
6388
|
+
"Accept": "application/json",
|
|
6389
|
+
}
|
|
6390
|
+
|
|
6391
|
+
res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-download", headers=headers)
|
|
6392
|
+
return GetCredentialsForTraceDataDownloadResponse.from_dict(res)
|
|
6393
|
+
|
|
6394
|
+
def get_credentials_for_trace_data_upload(self, request_id: str) -> GetCredentialsForTraceDataUploadResponse:
|
|
6395
|
+
"""Get credentials to upload trace data.
|
|
6396
|
+
|
|
6397
|
+
:param request_id: str
|
|
6398
|
+
The ID of the trace to fetch artifact upload credentials for.
|
|
6399
|
+
|
|
6400
|
+
:returns: :class:`GetCredentialsForTraceDataUploadResponse`
|
|
6401
|
+
"""
|
|
6402
|
+
|
|
6403
|
+
headers = {
|
|
6404
|
+
"Accept": "application/json",
|
|
6405
|
+
}
|
|
6406
|
+
|
|
6407
|
+
res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-upload", headers=headers)
|
|
6408
|
+
return GetCredentialsForTraceDataUploadResponse.from_dict(res)
|
|
6409
|
+
|
|
6104
6410
|
def get_experiment(self, experiment_id: str) -> GetExperimentResponse:
|
|
6105
6411
|
"""Get an experiment.
|
|
6106
6412
|
|
|
@@ -6257,10 +6563,11 @@ class ExperimentsAPI:
|
|
|
6257
6563
|
API](/api/workspace/files/listdirectorycontents).
|
|
6258
6564
|
|
|
6259
6565
|
:param page_token: str (optional)
|
|
6260
|
-
|
|
6261
|
-
artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes.
|
|
6262
|
-
`/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which
|
|
6263
|
-
pagination. See [List directory contents | Files
|
|
6566
|
+
The token indicating the page of artifact results to fetch. `page_token` is not supported when
|
|
6567
|
+
listing artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes.
|
|
6568
|
+
Please call `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which
|
|
6569
|
+
supports pagination. See [List directory contents | Files
|
|
6570
|
+
API](/api/workspace/files/listdirectorycontents).
|
|
6264
6571
|
:param path: str (optional)
|
|
6265
6572
|
Filter artifacts matching this path (a relative path from the root artifact directory).
|
|
6266
6573
|
:param run_id: str (optional)
|
|
@@ -6418,7 +6725,9 @@ class ExperimentsAPI:
|
|
|
6418
6725
|
|
|
6419
6726
|
self._api.do("POST", "/api/2.0/mlflow/runs/log-batch", body=body, headers=headers)
|
|
6420
6727
|
|
|
6421
|
-
def log_inputs(
|
|
6728
|
+
def log_inputs(
|
|
6729
|
+
self, run_id: str, *, datasets: Optional[List[DatasetInput]] = None, models: Optional[List[ModelInput]] = None
|
|
6730
|
+
):
|
|
6422
6731
|
"""Log inputs to a run.
|
|
6423
6732
|
|
|
6424
6733
|
**NOTE:** Experimental: This API may change or be removed in a future release without warning.
|
|
@@ -6429,12 +6738,16 @@ class ExperimentsAPI:
|
|
|
6429
6738
|
ID of the run to log under
|
|
6430
6739
|
:param datasets: List[:class:`DatasetInput`] (optional)
|
|
6431
6740
|
Dataset inputs
|
|
6741
|
+
:param models: List[:class:`ModelInput`] (optional)
|
|
6742
|
+
Model inputs
|
|
6432
6743
|
|
|
6433
6744
|
|
|
6434
6745
|
"""
|
|
6435
6746
|
body = {}
|
|
6436
6747
|
if datasets is not None:
|
|
6437
6748
|
body["datasets"] = [v.as_dict() for v in datasets]
|
|
6749
|
+
if models is not None:
|
|
6750
|
+
body["models"] = [v.as_dict() for v in models]
|
|
6438
6751
|
if run_id is not None:
|
|
6439
6752
|
body["run_id"] = run_id
|
|
6440
6753
|
headers = {
|
|
@@ -6450,6 +6763,9 @@ class ExperimentsAPI:
|
|
|
6450
6763
|
value: float,
|
|
6451
6764
|
timestamp: int,
|
|
6452
6765
|
*,
|
|
6766
|
+
dataset_digest: Optional[str] = None,
|
|
6767
|
+
dataset_name: Optional[str] = None,
|
|
6768
|
+
model_id: Optional[str] = None,
|
|
6453
6769
|
run_id: Optional[str] = None,
|
|
6454
6770
|
run_uuid: Optional[str] = None,
|
|
6455
6771
|
step: Optional[int] = None,
|
|
@@ -6466,6 +6782,14 @@ class ExperimentsAPI:
|
|
|
6466
6782
|
Double value of the metric being logged.
|
|
6467
6783
|
:param timestamp: int
|
|
6468
6784
|
Unix timestamp in milliseconds at the time metric was logged.
|
|
6785
|
+
:param dataset_digest: str (optional)
|
|
6786
|
+
Dataset digest of the dataset associated with the metric, e.g. an md5 hash of the dataset that
|
|
6787
|
+
uniquely identifies it within datasets of the same name.
|
|
6788
|
+
:param dataset_name: str (optional)
|
|
6789
|
+
The name of the dataset associated with the metric. E.g. “my.uc.table@2” “nyc-taxi-dataset”,
|
|
6790
|
+
“fantastic-elk-3”
|
|
6791
|
+
:param model_id: str (optional)
|
|
6792
|
+
ID of the logged model associated with the metric, if applicable
|
|
6469
6793
|
:param run_id: str (optional)
|
|
6470
6794
|
ID of the run under which to log the metric. Must be provided.
|
|
6471
6795
|
:param run_uuid: str (optional)
|
|
@@ -6477,8 +6801,14 @@ class ExperimentsAPI:
|
|
|
6477
6801
|
|
|
6478
6802
|
"""
|
|
6479
6803
|
body = {}
|
|
6804
|
+
if dataset_digest is not None:
|
|
6805
|
+
body["dataset_digest"] = dataset_digest
|
|
6806
|
+
if dataset_name is not None:
|
|
6807
|
+
body["dataset_name"] = dataset_name
|
|
6480
6808
|
if key is not None:
|
|
6481
6809
|
body["key"] = key
|
|
6810
|
+
if model_id is not None:
|
|
6811
|
+
body["model_id"] = model_id
|
|
6482
6812
|
if run_id is not None:
|
|
6483
6813
|
body["run_id"] = run_id
|
|
6484
6814
|
if run_uuid is not None:
|
|
@@ -6993,6 +7323,7 @@ class ForecastingAPI:
|
|
|
6993
7323
|
custom_weights_column: Optional[str] = None,
|
|
6994
7324
|
experiment_path: Optional[str] = None,
|
|
6995
7325
|
holiday_regions: Optional[List[str]] = None,
|
|
7326
|
+
include_features: Optional[List[str]] = None,
|
|
6996
7327
|
max_runtime: Optional[int] = None,
|
|
6997
7328
|
prediction_data_path: Optional[str] = None,
|
|
6998
7329
|
primary_metric: Optional[str] = None,
|
|
@@ -7006,49 +7337,50 @@ class ForecastingAPI:
|
|
|
7006
7337
|
Creates a serverless forecasting experiment. Returns the experiment ID.
|
|
7007
7338
|
|
|
7008
7339
|
:param train_data_path: str
|
|
7009
|
-
The
|
|
7010
|
-
data for the forecasting model.
|
|
7340
|
+
The fully qualified name of a Unity Catalog table, formatted as catalog_name.schema_name.table_name,
|
|
7341
|
+
used as training data for the forecasting model.
|
|
7011
7342
|
:param target_column: str
|
|
7012
|
-
|
|
7013
|
-
this column
|
|
7343
|
+
The column in the input training table used as the prediction target for model training. The values
|
|
7344
|
+
in this column are used as the ground truth for model training.
|
|
7014
7345
|
:param time_column: str
|
|
7015
|
-
|
|
7346
|
+
The column in the input training table that represents each row's timestamp.
|
|
7016
7347
|
:param forecast_granularity: str
|
|
7017
|
-
The
|
|
7018
|
-
|
|
7019
|
-
|
|
7348
|
+
The time interval between consecutive rows in the time series data. Possible values include: '1
|
|
7349
|
+
second', '1 minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily',
|
|
7350
|
+
'Weekly', 'Monthly', 'Quarterly', 'Yearly'.
|
|
7020
7351
|
:param forecast_horizon: int
|
|
7021
|
-
The number of time steps into the future
|
|
7022
|
-
|
|
7352
|
+
The number of time steps into the future to make predictions, calculated as a multiple of
|
|
7353
|
+
forecast_granularity. This value represents how far ahead the model should forecast.
|
|
7023
7354
|
:param custom_weights_column: str (optional)
|
|
7024
|
-
|
|
7025
|
-
calculate weighted metrics.
|
|
7355
|
+
The column in the training table used to customize weights for each time series.
|
|
7026
7356
|
:param experiment_path: str (optional)
|
|
7027
|
-
The path
|
|
7028
|
-
workspace.
|
|
7357
|
+
The path in the workspace to store the created experiment.
|
|
7029
7358
|
:param holiday_regions: List[str] (optional)
|
|
7030
|
-
|
|
7031
|
-
|
|
7359
|
+
The region code(s) to automatically add holiday features. Currently supports only one region.
|
|
7360
|
+
:param include_features: List[str] (optional)
|
|
7361
|
+
Specifies the list of feature columns to include in model training. These columns must exist in the
|
|
7362
|
+
training data and be of type string, numerical, or boolean. If not specified, no additional features
|
|
7363
|
+
will be included. Note: Certain columns are automatically handled: - Automatically excluded:
|
|
7364
|
+
split_column, target_column, custom_weights_column. - Automatically included: time_column.
|
|
7032
7365
|
:param max_runtime: int (optional)
|
|
7033
|
-
The maximum duration
|
|
7034
|
-
|
|
7366
|
+
The maximum duration for the experiment in minutes. The experiment stops automatically if it exceeds
|
|
7367
|
+
this limit.
|
|
7035
7368
|
:param prediction_data_path: str (optional)
|
|
7036
|
-
The
|
|
7037
|
-
predictions.
|
|
7369
|
+
The fully qualified path of a Unity Catalog table, formatted as catalog_name.schema_name.table_name,
|
|
7370
|
+
used to store predictions.
|
|
7038
7371
|
:param primary_metric: str (optional)
|
|
7039
7372
|
The evaluation metric used to optimize the forecasting model.
|
|
7040
7373
|
:param register_to: str (optional)
|
|
7041
|
-
The
|
|
7042
|
-
best model.
|
|
7374
|
+
The fully qualified path of a Unity Catalog model, formatted as catalog_name.schema_name.model_name,
|
|
7375
|
+
used to store the best model.
|
|
7043
7376
|
:param split_column: str (optional)
|
|
7044
|
-
|
|
7045
|
-
|
|
7377
|
+
// The column in the training table used for custom data splits. Values must be 'train', 'validate',
|
|
7378
|
+
or 'test'.
|
|
7046
7379
|
:param timeseries_identifier_columns: List[str] (optional)
|
|
7047
|
-
|
|
7048
|
-
series
|
|
7380
|
+
The column in the training table used to group the dataset for predicting individual time series.
|
|
7049
7381
|
:param training_frameworks: List[str] (optional)
|
|
7050
|
-
|
|
7051
|
-
|
|
7382
|
+
List of frameworks to include for model tuning. Possible values are 'Prophet', 'ARIMA', 'DeepAR'. An
|
|
7383
|
+
empty list includes all supported frameworks.
|
|
7052
7384
|
|
|
7053
7385
|
:returns:
|
|
7054
7386
|
Long-running operation waiter for :class:`ForecastingExperiment`.
|
|
@@ -7065,6 +7397,8 @@ class ForecastingAPI:
|
|
|
7065
7397
|
body["forecast_horizon"] = forecast_horizon
|
|
7066
7398
|
if holiday_regions is not None:
|
|
7067
7399
|
body["holiday_regions"] = [v for v in holiday_regions]
|
|
7400
|
+
if include_features is not None:
|
|
7401
|
+
body["include_features"] = [v for v in include_features]
|
|
7068
7402
|
if max_runtime is not None:
|
|
7069
7403
|
body["max_runtime"] = max_runtime
|
|
7070
7404
|
if prediction_data_path is not None:
|
|
@@ -7108,6 +7442,7 @@ class ForecastingAPI:
|
|
|
7108
7442
|
custom_weights_column: Optional[str] = None,
|
|
7109
7443
|
experiment_path: Optional[str] = None,
|
|
7110
7444
|
holiday_regions: Optional[List[str]] = None,
|
|
7445
|
+
include_features: Optional[List[str]] = None,
|
|
7111
7446
|
max_runtime: Optional[int] = None,
|
|
7112
7447
|
prediction_data_path: Optional[str] = None,
|
|
7113
7448
|
primary_metric: Optional[str] = None,
|
|
@@ -7123,6 +7458,7 @@ class ForecastingAPI:
|
|
|
7123
7458
|
forecast_granularity=forecast_granularity,
|
|
7124
7459
|
forecast_horizon=forecast_horizon,
|
|
7125
7460
|
holiday_regions=holiday_regions,
|
|
7461
|
+
include_features=include_features,
|
|
7126
7462
|
max_runtime=max_runtime,
|
|
7127
7463
|
prediction_data_path=prediction_data_path,
|
|
7128
7464
|
primary_metric=primary_metric,
|