mlrun 1.7.0rc22__py3-none-any.whl → 1.7.0rc28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (81) hide show
  1. mlrun/__main__.py +10 -8
  2. mlrun/alerts/alert.py +13 -1
  3. mlrun/artifacts/manager.py +5 -0
  4. mlrun/common/constants.py +2 -2
  5. mlrun/common/formatters/__init__.py +1 -0
  6. mlrun/common/formatters/artifact.py +26 -3
  7. mlrun/common/formatters/base.py +9 -9
  8. mlrun/common/formatters/run.py +26 -0
  9. mlrun/common/helpers.py +11 -0
  10. mlrun/common/schemas/__init__.py +4 -0
  11. mlrun/common/schemas/alert.py +5 -9
  12. mlrun/common/schemas/api_gateway.py +64 -16
  13. mlrun/common/schemas/artifact.py +11 -0
  14. mlrun/common/schemas/constants.py +3 -0
  15. mlrun/common/schemas/feature_store.py +58 -28
  16. mlrun/common/schemas/model_monitoring/constants.py +21 -12
  17. mlrun/common/schemas/model_monitoring/model_endpoints.py +0 -12
  18. mlrun/common/schemas/pipeline.py +16 -0
  19. mlrun/common/schemas/project.py +17 -0
  20. mlrun/common/schemas/runs.py +17 -0
  21. mlrun/common/schemas/schedule.py +1 -1
  22. mlrun/common/types.py +5 -0
  23. mlrun/config.py +10 -25
  24. mlrun/datastore/azure_blob.py +2 -1
  25. mlrun/datastore/datastore.py +3 -3
  26. mlrun/datastore/google_cloud_storage.py +6 -2
  27. mlrun/datastore/snowflake_utils.py +3 -1
  28. mlrun/datastore/sources.py +26 -11
  29. mlrun/datastore/store_resources.py +2 -0
  30. mlrun/datastore/targets.py +68 -16
  31. mlrun/db/base.py +64 -2
  32. mlrun/db/httpdb.py +129 -41
  33. mlrun/db/nopdb.py +44 -3
  34. mlrun/errors.py +5 -3
  35. mlrun/execution.py +18 -10
  36. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  37. mlrun/frameworks/__init__.py +0 -6
  38. mlrun/model.py +23 -0
  39. mlrun/model_monitoring/api.py +6 -52
  40. mlrun/model_monitoring/applications/histogram_data_drift.py +1 -1
  41. mlrun/model_monitoring/db/stores/__init__.py +37 -24
  42. mlrun/model_monitoring/db/stores/base/store.py +40 -1
  43. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +42 -87
  44. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +27 -35
  45. mlrun/model_monitoring/db/tsdb/__init__.py +15 -15
  46. mlrun/model_monitoring/db/tsdb/base.py +1 -1
  47. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +6 -4
  48. mlrun/model_monitoring/helpers.py +17 -9
  49. mlrun/model_monitoring/stream_processing.py +9 -11
  50. mlrun/model_monitoring/writer.py +11 -11
  51. mlrun/package/__init__.py +1 -13
  52. mlrun/package/packagers/__init__.py +1 -6
  53. mlrun/projects/pipelines.py +10 -9
  54. mlrun/projects/project.py +95 -81
  55. mlrun/render.py +10 -5
  56. mlrun/run.py +13 -8
  57. mlrun/runtimes/base.py +11 -4
  58. mlrun/runtimes/daskjob.py +7 -1
  59. mlrun/runtimes/local.py +16 -3
  60. mlrun/runtimes/nuclio/application/application.py +0 -2
  61. mlrun/runtimes/nuclio/function.py +20 -0
  62. mlrun/runtimes/nuclio/serving.py +9 -6
  63. mlrun/runtimes/pod.py +5 -29
  64. mlrun/serving/routers.py +75 -59
  65. mlrun/serving/server.py +11 -0
  66. mlrun/serving/states.py +29 -0
  67. mlrun/serving/v2_serving.py +62 -39
  68. mlrun/utils/helpers.py +39 -1
  69. mlrun/utils/logger.py +36 -2
  70. mlrun/utils/notifications/notification/base.py +43 -7
  71. mlrun/utils/notifications/notification/git.py +21 -0
  72. mlrun/utils/notifications/notification/slack.py +9 -14
  73. mlrun/utils/notifications/notification/webhook.py +41 -1
  74. mlrun/utils/notifications/notification_pusher.py +3 -9
  75. mlrun/utils/version/version.json +2 -2
  76. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/METADATA +12 -7
  77. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/RECORD +81 -80
  78. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/WHEEL +1 -1
  79. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/LICENSE +0 -0
  80. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/entry_points.txt +0 -0
  81. {mlrun-1.7.0rc22.dist-info → mlrun-1.7.0rc28.dist-info}/top_level.txt +0 -0
mlrun/__main__.py CHANGED
@@ -50,12 +50,12 @@ from .run import (
50
50
  from .runtimes import RemoteRuntime, RunError, RuntimeKinds, ServingRuntime
51
51
  from .secrets import SecretsStore
52
52
  from .utils import (
53
+ RunKeys,
53
54
  dict_to_yaml,
54
55
  get_in,
55
56
  is_relative_path,
56
57
  list2dict,
57
58
  logger,
58
- run_keys,
59
59
  update_in,
60
60
  )
61
61
  from .utils.version import Version
@@ -102,7 +102,9 @@ def main():
102
102
  )
103
103
  @click.option("--uid", help="unique run ID")
104
104
  @click.option("--name", help="run name")
105
- @click.option("--workflow", help="workflow name/id")
105
+ @click.option(
106
+ "--workflow", help="sets the run labels to match the given workflow name/id"
107
+ )
106
108
  @click.option("--project", help="project name/id")
107
109
  @click.option("--db", default="", help="save run results to path or DB url")
108
110
  @click.option(
@@ -378,15 +380,15 @@ def run(
378
380
  set_item(runobj.spec.hyper_param_options, hyper_param_strategy, "strategy")
379
381
  set_item(runobj.spec.hyper_param_options, selector, "selector")
380
382
 
381
- set_item(runobj.spec, inputs, run_keys.inputs, list2dict(inputs))
383
+ set_item(runobj.spec, inputs, RunKeys.inputs, list2dict(inputs))
382
384
  set_item(
383
- runobj.spec, returns, run_keys.returns, [py_eval(value) for value in returns]
385
+ runobj.spec, returns, RunKeys.returns, [py_eval(value) for value in returns]
384
386
  )
385
- set_item(runobj.spec, in_path, run_keys.input_path)
386
- set_item(runobj.spec, out_path, run_keys.output_path)
387
- set_item(runobj.spec, outputs, run_keys.outputs, list(outputs))
387
+ set_item(runobj.spec, in_path, RunKeys.input_path)
388
+ set_item(runobj.spec, out_path, RunKeys.output_path)
389
+ set_item(runobj.spec, outputs, RunKeys.outputs, list(outputs))
388
390
  set_item(
389
- runobj.spec, secrets, run_keys.secrets, line2keylist(secrets, "kind", "source")
391
+ runobj.spec, secrets, RunKeys.secrets, line2keylist(secrets, "kind", "source")
390
392
  )
391
393
  set_item(runobj.spec, verbose, "verbose")
392
394
  set_item(runobj.spec, scrape_metrics, "scrape_metrics")
mlrun/alerts/alert.py CHANGED
@@ -26,7 +26,6 @@ class AlertConfig(ModelObj):
26
26
  "description",
27
27
  "summary",
28
28
  "severity",
29
- "criteria",
30
29
  "reset_policy",
31
30
  "state",
32
31
  ]
@@ -34,6 +33,7 @@ class AlertConfig(ModelObj):
34
33
  "entities",
35
34
  "notifications",
36
35
  "trigger",
36
+ "criteria",
37
37
  ]
38
38
 
39
39
  def __init__(
@@ -104,6 +104,14 @@ class AlertConfig(ModelObj):
104
104
  else self.trigger
105
105
  )
106
106
  return None
107
+ if field_name == "criteria":
108
+ if self.criteria:
109
+ return (
110
+ self.criteria.dict()
111
+ if not isinstance(self.criteria, dict)
112
+ else self.criteria
113
+ )
114
+ return None
107
115
  return super()._serialize_field(struct, field_name, strip)
108
116
 
109
117
  def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
@@ -137,6 +145,10 @@ class AlertConfig(ModelObj):
137
145
  trigger_obj = alert_objects.AlertTrigger.parse_obj(trigger_data)
138
146
  new_obj.trigger = trigger_obj
139
147
 
148
+ criteria_data = struct.get("criteria")
149
+ if criteria_data:
150
+ criteria_obj = alert_objects.AlertCriteria.parse_obj(criteria_data)
151
+ new_obj.criteria = criteria_obj
140
152
  return new_obj
141
153
 
142
154
  def with_notifications(self, notifications: list[alert_objects.AlertNotification]):
@@ -100,6 +100,11 @@ class ArtifactProducer:
100
100
 
101
101
  def dict_to_artifact(struct: dict) -> Artifact:
102
102
  kind = struct.get("kind", "")
103
+
104
+ # TODO: remove this in 1.8.0
105
+ if mlrun.utils.is_legacy_artifact(struct):
106
+ return mlrun.artifacts.base.convert_legacy_artifact_to_new_format(struct)
107
+
103
108
  artifact_class = artifact_types[kind]
104
109
  return artifact_class.from_dict(struct)
105
110
 
mlrun/common/constants.py CHANGED
@@ -64,12 +64,12 @@ class MLRunInternalLabels:
64
64
  username = f"{MLRUN_LABEL_PREFIX}username"
65
65
  username_domain = f"{MLRUN_LABEL_PREFIX}username_domain"
66
66
  task_name = f"{MLRUN_LABEL_PREFIX}task-name"
67
+ resource_name = f"{MLRUN_LABEL_PREFIX}resource_name"
68
+ created = f"{MLRUN_LABEL_PREFIX}created"
67
69
  host = "host"
68
70
  job_type = "job-type"
69
71
  kind = "kind"
70
72
  component = "component"
71
- resource_name = "resource_name"
72
- created = "mlrun-created"
73
73
 
74
74
  owner = "owner"
75
75
  v3io_user = "v3io_user"
@@ -17,3 +17,4 @@ from .artifact import ArtifactFormat # noqa
17
17
  from .function import FunctionFormat # noqa
18
18
  from .pipeline import PipelineFormat # noqa
19
19
  from .project import ProjectFormat # noqa
20
+ from .run import RunFormat # noqa
@@ -13,9 +13,32 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
+ import typing
17
+
16
18
  import mlrun.common.types
17
19
 
20
+ from .base import ObjectFormat
21
+
22
+
23
+ class ArtifactFormat(ObjectFormat, mlrun.common.types.StrEnum):
24
+ minimal = "minimal"
18
25
 
19
- # TODO: add a format that returns a minimal response with ObjectFormat
20
- class ArtifactFormat(mlrun.common.types.StrEnum):
21
- full = "full"
26
+ @staticmethod
27
+ def format_method(_format: str) -> typing.Optional[typing.Callable]:
28
+ return {
29
+ ArtifactFormat.full: None,
30
+ ArtifactFormat.minimal: ArtifactFormat.filter_obj_method(
31
+ [
32
+ "kind",
33
+ "metadata",
34
+ "status",
35
+ "project",
36
+ "spec.producer",
37
+ "spec.db_key",
38
+ "spec.size",
39
+ "spec.framework",
40
+ "spec.metrics",
41
+ "spec.target_path",
42
+ ]
43
+ ),
44
+ }[_format]
@@ -28,42 +28,42 @@ class ObjectFormat:
28
28
  full = "full"
29
29
 
30
30
  @staticmethod
31
- def format_method(_format: str) -> typing.Optional[typing.Callable]:
31
+ def format_method(format_: str) -> typing.Optional[typing.Callable]:
32
32
  """
33
33
  Get the formatting method for the provided format.
34
34
  A `None` value signifies a pass-through formatting method (no formatting).
35
- :param _format: The format as a string representation.
35
+ :param format_: The format as a string representation.
36
36
  :return: The formatting method.
37
37
  """
38
38
  return {
39
39
  ObjectFormat.full: None,
40
- }[_format]
40
+ }[format_]
41
41
 
42
42
  @classmethod
43
43
  def format_obj(
44
44
  cls,
45
45
  obj: typing.Any,
46
- _format: str,
46
+ format_: str,
47
47
  exclude_formats: typing.Optional[list[str]] = None,
48
48
  ) -> typing.Any:
49
49
  """
50
50
  Format the provided object based on the provided format.
51
51
  :param obj: The object to format.
52
- :param _format: The format as a string representation.
52
+ :param format_: The format as a string representation.
53
53
  :param exclude_formats: A list of formats to exclude from the formatting process. If the provided format is in
54
54
  this list, an invalid format exception will be raised.
55
55
  """
56
56
  exclude_formats = exclude_formats or []
57
- _format = _format or cls.full
57
+ format_ = format_ or cls.full
58
58
  invalid_format_exc = mlrun.errors.MLRunBadRequestError(
59
- f"Provided format is not supported. format={_format}"
59
+ f"Provided format is not supported. format={format_}"
60
60
  )
61
61
 
62
- if _format in exclude_formats:
62
+ if format_ in exclude_formats:
63
63
  raise invalid_format_exc
64
64
 
65
65
  try:
66
- format_method = cls.format_method(_format)
66
+ format_method = cls.format_method(format_)
67
67
  except KeyError:
68
68
  raise invalid_format_exc
69
69
 
@@ -0,0 +1,26 @@
1
+ # Copyright 2024 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+
16
+
17
+ import mlrun.common.types
18
+ from mlrun.common.formatters.base import ObjectFormat
19
+
20
+
21
+ class RunFormat(ObjectFormat, mlrun.common.types.StrEnum):
22
+ # No enrichment, data is pulled as-is from the database.
23
+ standard = "standard"
24
+
25
+ # Performs run enrichment, including the run's artifacts. Only available for the `get` run API.
26
+ full = "full"
mlrun/common/helpers.py CHANGED
@@ -34,3 +34,14 @@ def parse_versioned_object_uri(
34
34
  uri = uri[:loc]
35
35
 
36
36
  return project, uri, tag, hash_key
37
+
38
+
39
+ def generate_api_gateway_name(project: str, name: str) -> str:
40
+ """
41
+ Generate a unique (within project) api gateway name
42
+ :param project: project name
43
+ :param name: api gateway name
44
+
45
+ :return: the resolved api gateway name
46
+ """
47
+ return f"{project}-{name}" if project else name
@@ -82,6 +82,7 @@ from .events import (
82
82
  )
83
83
  from .feature_store import (
84
84
  EntitiesOutput,
85
+ EntitiesOutputV2,
85
86
  Entity,
86
87
  EntityListOutput,
87
88
  EntityRecord,
@@ -90,7 +91,9 @@ from .feature_store import (
90
91
  FeatureRecord,
91
92
  FeatureSet,
92
93
  FeatureSetDigestOutput,
94
+ FeatureSetDigestOutputV2,
93
95
  FeatureSetDigestSpec,
96
+ FeatureSetDigestSpecV2,
94
97
  FeatureSetIngestInput,
95
98
  FeatureSetIngestOutput,
96
99
  FeatureSetRecord,
@@ -98,6 +101,7 @@ from .feature_store import (
98
101
  FeatureSetSpec,
99
102
  FeatureSetsTagsOutput,
100
103
  FeaturesOutput,
104
+ FeaturesOutputV2,
101
105
  FeatureVector,
102
106
  FeatureVectorRecord,
103
107
  FeatureVectorsOutput,
@@ -39,8 +39,8 @@ class EventKind(StrEnum):
39
39
  CONCEPT_DRIFT_SUSPECTED = "concept_drift_suspected"
40
40
  MODEL_PERFORMANCE_DETECTED = "model_performance_detected"
41
41
  MODEL_PERFORMANCE_SUSPECTED = "model_performance_suspected"
42
- MODEL_SERVING_PERFORMANCE_DETECTED = "model_serving_performance_detected"
43
- MODEL_SERVING_PERFORMANCE_SUSPECTED = "model_serving_performance_suspected"
42
+ SYSTEM_PERFORMANCE_DETECTED = "system_performance_detected"
43
+ SYSTEM_PERFORMANCE_SUSPECTED = "system_performance_suspected"
44
44
  MM_APP_ANOMALY_DETECTED = "mm_app_anomaly_detected"
45
45
  MM_APP_ANOMALY_SUSPECTED = "mm_app_anomaly_suspected"
46
46
  FAILED = "failed"
@@ -53,12 +53,8 @@ _event_kind_entity_map = {
53
53
  EventKind.CONCEPT_DRIFT_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
54
54
  EventKind.MODEL_PERFORMANCE_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
55
55
  EventKind.MODEL_PERFORMANCE_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
56
- EventKind.MODEL_SERVING_PERFORMANCE_DETECTED: [
57
- EventEntityKind.MODEL_ENDPOINT_RESULT
58
- ],
59
- EventKind.MODEL_SERVING_PERFORMANCE_SUSPECTED: [
60
- EventEntityKind.MODEL_ENDPOINT_RESULT
61
- ],
56
+ EventKind.SYSTEM_PERFORMANCE_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
57
+ EventKind.SYSTEM_PERFORMANCE_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
62
58
  EventKind.MM_APP_ANOMALY_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
63
59
  EventKind.MM_APP_ANOMALY_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
64
60
  EventKind.FAILED: [EventEntityKind.JOB],
@@ -104,7 +100,7 @@ class AlertCriteria(pydantic.BaseModel):
104
100
  pydantic.Field(
105
101
  description="Number of events to wait until notification is sent"
106
102
  ),
107
- ] = 0
103
+ ] = 1
108
104
  period: Annotated[
109
105
  str,
110
106
  pydantic.Field(
@@ -17,8 +17,10 @@ from typing import Optional
17
17
 
18
18
  import pydantic
19
19
 
20
+ import mlrun.common.constants as mlrun_constants
20
21
  import mlrun.common.types
21
22
  from mlrun.common.constants import MLRUN_FUNCTIONS_ANNOTATION
23
+ from mlrun.common.helpers import generate_api_gateway_name
22
24
 
23
25
 
24
26
  class APIGatewayAuthenticationMode(mlrun.common.types.StrEnum):
@@ -100,7 +102,58 @@ class APIGateway(_APIGatewayBaseModel):
100
102
  if upstream.nucliofunction.get("name")
101
103
  ]
102
104
 
103
- def enrich_mlrun_function_names(self):
105
+ def get_invoke_url(self):
106
+ return (
107
+ self.spec.host + self.spec.path
108
+ if self.spec.path and self.spec.host
109
+ else self.spec.host
110
+ )
111
+
112
+ def enrich_mlrun_names(self):
113
+ self._enrich_api_gateway_mlrun_name()
114
+ self._enrich_mlrun_function_names()
115
+ return self
116
+
117
+ def replace_nuclio_names_with_mlrun_names(self):
118
+ self._replace_nuclio_api_gateway_name_with_mlrun_name()
119
+ self._replace_nuclio_function_names_with_mlrun_names()
120
+ return self
121
+
122
+ def _replace_nuclio_function_names_with_mlrun_names(self):
123
+ # replace function names from nuclio names to mlrun names
124
+ # and adds mlrun function URI's to an api gateway annotations
125
+ # so when we then get api gateway entity from nuclio, we are able to get mlrun function names
126
+ mlrun_functions = self.metadata.annotations.get(MLRUN_FUNCTIONS_ANNOTATION)
127
+ if mlrun_functions:
128
+ mlrun_function_uris = (
129
+ mlrun_functions.split("&")
130
+ if "&" in mlrun_functions
131
+ else [mlrun_functions]
132
+ )
133
+ if len(mlrun_function_uris) != len(self.spec.upstreams):
134
+ raise mlrun.errors.MLRunValueError(
135
+ "Error when translating nuclio names to mlrun names in api gateway:"
136
+ " number of functions doesn't match the mlrun functions in annotation"
137
+ )
138
+ for i in range(len(mlrun_function_uris)):
139
+ self.spec.upstreams[i].nucliofunction["name"] = mlrun_function_uris[i]
140
+ return self
141
+
142
+ def _replace_nuclio_api_gateway_name_with_mlrun_name(self):
143
+ # replace api gateway name
144
+ # in Nuclio, api gateways are named as `<project>-<mlrun-api-gateway-name>`
145
+ # remove the project prefix from the name if it exists
146
+ project_name = self.metadata.labels.get(
147
+ mlrun_constants.MLRunInternalLabels.nuclio_project_name
148
+ )
149
+ if project_name and self.spec.name.startswith(f"{project_name}-"):
150
+ self.spec.name = self.spec.name[len(project_name) + 1 :]
151
+ self.metadata.name = self.spec.name
152
+ return self
153
+
154
+ def _enrich_mlrun_function_names(self):
155
+ # enrich mlrun names with nuclio prefixes
156
+ # and add mlrun function's URIs to Nuclio function annotations
104
157
  upstream_with_nuclio_names = []
105
158
  mlrun_function_uris = []
106
159
  for upstream in self.spec.upstreams:
@@ -126,21 +179,16 @@ class APIGateway(_APIGatewayBaseModel):
126
179
  )
127
180
  return self
128
181
 
129
- def replace_nuclio_names_with_mlrun_uri(self):
130
- mlrun_functions = self.metadata.annotations.get(MLRUN_FUNCTIONS_ANNOTATION)
131
- if mlrun_functions:
132
- mlrun_function_uris = (
133
- mlrun_functions.split("&")
134
- if "&" in mlrun_functions
135
- else [mlrun_functions]
136
- )
137
- if len(mlrun_function_uris) != len(self.spec.upstreams):
138
- raise mlrun.errors.MLRunValueError(
139
- "Error when translating nuclio names to mlrun names in api gateway:"
140
- " number of functions doesn't match the mlrun functions in annotation"
141
- )
142
- for i in range(len(mlrun_function_uris)):
143
- self.spec.upstreams[i].nucliofunction["name"] = mlrun_function_uris[i]
182
+ def _enrich_api_gateway_mlrun_name(self):
183
+ # replace api gateway name
184
+ # in Nuclio, api gateways are named as `<project>-<mlrun-api-gateway-name>`
185
+ # add the project prefix to the name
186
+ project_name = self.metadata.labels.get(
187
+ mlrun_constants.MLRunInternalLabels.nuclio_project_name
188
+ )
189
+ if project_name:
190
+ self.spec.name = generate_api_gateway_name(project_name, self.spec.name)
191
+ self.metadata.name = self.spec.name
144
192
  return self
145
193
 
146
194
 
@@ -15,6 +15,7 @@
15
15
  import typing
16
16
 
17
17
  import pydantic
18
+ from deprecated import deprecated
18
19
 
19
20
  import mlrun.common.types
20
21
 
@@ -58,6 +59,16 @@ class ArtifactIdentifier(pydantic.BaseModel):
58
59
  # hash: typing.Optional[str]
59
60
 
60
61
 
62
+ @deprecated(
63
+ version="1.7.0",
64
+ reason="mlrun.common.schemas.ArtifactsFormat is deprecated and will be removed in 1.9.0. "
65
+ "Use mlrun.common.formatters.ArtifactFormat instead.",
66
+ category=FutureWarning,
67
+ )
68
+ class ArtifactsFormat(mlrun.common.types.StrEnum):
69
+ full = "full"
70
+
71
+
61
72
  class ArtifactMetadata(pydantic.BaseModel):
62
73
  key: str
63
74
  project: str
@@ -120,10 +120,13 @@ class FeatureStorePartitionByField(mlrun.common.types.StrEnum):
120
120
 
121
121
  class RunPartitionByField(mlrun.common.types.StrEnum):
122
122
  name = "name" # Supported for runs objects
123
+ project_and_name = "project_and_name" # Supported for runs objects
123
124
 
124
125
  def to_partition_by_db_field(self, db_cls):
125
126
  if self.value == RunPartitionByField.name:
126
127
  return db_cls.name
128
+ elif self.value == RunPartitionByField.project_and_name:
129
+ return db_cls.project, db_cls.name
127
130
  else:
128
131
  raise mlrun.errors.MLRunInvalidArgumentError(
129
132
  f"Unknown group by field: {self.value}"
@@ -14,7 +14,7 @@
14
14
  #
15
15
  from typing import Optional
16
16
 
17
- from pydantic import BaseModel, Extra, Field
17
+ import pydantic
18
18
 
19
19
  from .auth import AuthorizationResourceTypes, Credentials
20
20
  from .object import (
@@ -27,32 +27,42 @@ from .object import (
27
27
  )
28
28
 
29
29
 
30
- class Feature(BaseModel):
30
+ class FeatureStoreBaseModel(pydantic.BaseModel):
31
+ """
32
+ Intermediate base class, in order to override pydantic's configuration, as per
33
+ https://docs.pydantic.dev/1.10/usage/model_config/#change-behaviour-globally
34
+ """
35
+
36
+ class Config:
37
+ copy_on_model_validation = "none"
38
+
39
+
40
+ class Feature(FeatureStoreBaseModel):
31
41
  name: str
32
42
  value_type: str
33
43
  labels: Optional[dict] = {}
34
44
 
35
45
  class Config:
36
- extra = Extra.allow
46
+ extra = pydantic.Extra.allow
37
47
 
38
48
 
39
- class Entity(BaseModel):
49
+ class Entity(FeatureStoreBaseModel):
40
50
  name: str
41
51
  value_type: str
42
52
  labels: Optional[dict] = {}
43
53
 
44
54
  class Config:
45
- extra = Extra.allow
55
+ extra = pydantic.Extra.allow
46
56
 
47
57
 
48
58
  class FeatureSetSpec(ObjectSpec):
49
59
  entities: list[Entity] = []
50
60
  features: list[Feature] = []
51
- engine: Optional[str] = Field(default="storey")
61
+ engine: Optional[str] = pydantic.Field(default="storey")
52
62
 
53
63
 
54
- class FeatureSet(BaseModel):
55
- kind: ObjectKind = Field(ObjectKind.feature_set, const=True)
64
+ class FeatureSet(FeatureStoreBaseModel):
65
+ kind: ObjectKind = pydantic.Field(ObjectKind.feature_set, const=True)
56
66
  metadata: ObjectMetadata
57
67
  spec: FeatureSetSpec
58
68
  status: ObjectStatus
@@ -62,7 +72,7 @@ class FeatureSet(BaseModel):
62
72
  return AuthorizationResourceTypes.feature_set
63
73
 
64
74
 
65
- class EntityRecord(BaseModel):
75
+ class EntityRecord(FeatureStoreBaseModel):
66
76
  name: str
67
77
  value_type: str
68
78
  labels: list[LabelRecord]
@@ -71,7 +81,7 @@ class EntityRecord(BaseModel):
71
81
  orm_mode = True
72
82
 
73
83
 
74
- class FeatureRecord(BaseModel):
84
+ class FeatureRecord(FeatureStoreBaseModel):
75
85
  name: str
76
86
  value_type: str
77
87
  labels: list[LabelRecord]
@@ -88,44 +98,64 @@ class FeatureSetRecord(ObjectRecord):
88
98
  orm_mode = True
89
99
 
90
100
 
91
- class FeatureSetsOutput(BaseModel):
101
+ class FeatureSetsOutput(FeatureStoreBaseModel):
92
102
  feature_sets: list[FeatureSet]
93
103
 
94
104
 
95
- class FeatureSetsTagsOutput(BaseModel):
105
+ class FeatureSetsTagsOutput(FeatureStoreBaseModel):
96
106
  tags: list[str] = []
97
107
 
98
108
 
99
- class FeatureSetDigestSpec(BaseModel):
109
+ class FeatureSetDigestSpec(FeatureStoreBaseModel):
100
110
  entities: list[Entity]
101
111
  features: list[Feature]
102
112
 
103
113
 
104
- class FeatureSetDigestOutput(BaseModel):
114
+ class FeatureSetDigestOutput(FeatureStoreBaseModel):
105
115
  metadata: ObjectMetadata
106
116
  spec: FeatureSetDigestSpec
107
117
 
108
118
 
109
- class FeatureListOutput(BaseModel):
119
+ class FeatureSetDigestSpecV2(FeatureStoreBaseModel):
120
+ entities: list[Entity]
121
+
122
+
123
+ class FeatureSetDigestOutputV2(FeatureStoreBaseModel):
124
+ feature_set_index: int
125
+ metadata: ObjectMetadata
126
+ spec: FeatureSetDigestSpecV2
127
+
128
+
129
+ class FeatureListOutput(FeatureStoreBaseModel):
110
130
  feature: Feature
111
131
  feature_set_digest: FeatureSetDigestOutput
112
132
 
113
133
 
114
- class FeaturesOutput(BaseModel):
134
+ class FeaturesOutput(FeatureStoreBaseModel):
115
135
  features: list[FeatureListOutput]
116
136
 
117
137
 
118
- class EntityListOutput(BaseModel):
138
+ class FeaturesOutputV2(FeatureStoreBaseModel):
139
+ features: list[Feature]
140
+ feature_set_digests: list[FeatureSetDigestOutputV2]
141
+
142
+
143
+ class EntityListOutput(FeatureStoreBaseModel):
119
144
  entity: Entity
120
145
  feature_set_digest: FeatureSetDigestOutput
121
146
 
122
147
 
123
- class EntitiesOutput(BaseModel):
148
+ class EntitiesOutputV2(FeatureStoreBaseModel):
149
+ entities: list[Entity]
150
+ feature_set_digests: list[FeatureSetDigestOutputV2]
151
+
152
+
153
+ class EntitiesOutput(FeatureStoreBaseModel):
124
154
  entities: list[EntityListOutput]
125
155
 
126
156
 
127
- class FeatureVector(BaseModel):
128
- kind: ObjectKind = Field(ObjectKind.feature_vector, const=True)
157
+ class FeatureVector(FeatureStoreBaseModel):
158
+ kind: ObjectKind = pydantic.Field(ObjectKind.feature_vector, const=True)
129
159
  metadata: ObjectMetadata
130
160
  spec: ObjectSpec
131
161
  status: ObjectStatus
@@ -139,39 +169,39 @@ class FeatureVectorRecord(ObjectRecord):
139
169
  pass
140
170
 
141
171
 
142
- class FeatureVectorsOutput(BaseModel):
172
+ class FeatureVectorsOutput(FeatureStoreBaseModel):
143
173
  feature_vectors: list[FeatureVector]
144
174
 
145
175
 
146
- class FeatureVectorsTagsOutput(BaseModel):
176
+ class FeatureVectorsTagsOutput(FeatureStoreBaseModel):
147
177
  tags: list[str] = []
148
178
 
149
179
 
150
- class DataSource(BaseModel):
180
+ class DataSource(FeatureStoreBaseModel):
151
181
  kind: str
152
182
  name: str
153
183
  path: str
154
184
 
155
185
  class Config:
156
- extra = Extra.allow
186
+ extra = pydantic.Extra.allow
157
187
 
158
188
 
159
- class DataTarget(BaseModel):
189
+ class DataTarget(FeatureStoreBaseModel):
160
190
  kind: str
161
191
  name: str
162
192
  path: Optional[str]
163
193
 
164
194
  class Config:
165
- extra = Extra.allow
195
+ extra = pydantic.Extra.allow
166
196
 
167
197
 
168
- class FeatureSetIngestInput(BaseModel):
198
+ class FeatureSetIngestInput(FeatureStoreBaseModel):
169
199
  source: Optional[DataSource]
170
200
  targets: Optional[list[DataTarget]]
171
201
  infer_options: Optional[int]
172
202
  credentials: Credentials = Credentials()
173
203
 
174
204
 
175
- class FeatureSetIngestOutput(BaseModel):
205
+ class FeatureSetIngestOutput(FeatureStoreBaseModel):
176
206
  feature_set: FeatureSet
177
207
  run_object: dict