mlrun 1.7.0rc49__py3-none-any.whl → 1.7.0rc51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlrun/common/formatters/run.py +3 -0
- mlrun/common/schemas/auth.py +3 -0
- mlrun/common/schemas/workflow.py +9 -2
- mlrun/data_types/data_types.py +1 -1
- mlrun/execution.py +7 -1
- mlrun/feature_store/retrieval/spark_merger.py +0 -4
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +41 -8
- mlrun/projects/operations.py +11 -8
- mlrun/projects/pipelines.py +12 -7
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/METADATA +103 -25
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/RECORD +16 -16
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc49.dist-info → mlrun-1.7.0rc51.dist-info}/top_level.txt +0 -0
mlrun/common/formatters/run.py
CHANGED
|
@@ -22,5 +22,8 @@ class RunFormat(ObjectFormat, mlrun.common.types.StrEnum):
|
|
|
22
22
|
# No enrichment, data is pulled as-is from the database.
|
|
23
23
|
standard = "standard"
|
|
24
24
|
|
|
25
|
+
# Enrich run with full notifications since the notification params are subtracted from the run body.
|
|
26
|
+
notifications = "notifications"
|
|
27
|
+
|
|
25
28
|
# Performs run enrichment, including the run's artifacts. Only available for the `get` run API.
|
|
26
29
|
full = "full"
|
mlrun/common/schemas/auth.py
CHANGED
|
@@ -141,6 +141,9 @@ class AuthInfo(pydantic.BaseModel):
|
|
|
141
141
|
member_ids.extend(self.user_group_ids)
|
|
142
142
|
return member_ids
|
|
143
143
|
|
|
144
|
+
def get_session(self) -> str:
|
|
145
|
+
return self.data_session or self.session
|
|
146
|
+
|
|
144
147
|
|
|
145
148
|
class Credentials(pydantic.BaseModel):
|
|
146
149
|
access_key: typing.Optional[str]
|
mlrun/common/schemas/workflow.py
CHANGED
|
@@ -16,8 +16,9 @@ import typing
|
|
|
16
16
|
|
|
17
17
|
import pydantic
|
|
18
18
|
|
|
19
|
-
from .notification import Notification
|
|
20
|
-
from .schedule import ScheduleCronTrigger
|
|
19
|
+
from mlrun.common.schemas.notification import Notification
|
|
20
|
+
from mlrun.common.schemas.schedule import ScheduleCronTrigger
|
|
21
|
+
from mlrun.common.types import StrEnum
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
class WorkflowSpec(pydantic.BaseModel):
|
|
@@ -55,3 +56,9 @@ class WorkflowResponse(pydantic.BaseModel):
|
|
|
55
56
|
|
|
56
57
|
class GetWorkflowResponse(pydantic.BaseModel):
|
|
57
58
|
workflow_id: str = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class EngineType(StrEnum):
|
|
62
|
+
LOCAL = "local"
|
|
63
|
+
REMOTE = "remote"
|
|
64
|
+
KFP = "kfp"
|
mlrun/data_types/data_types.py
CHANGED
mlrun/execution.py
CHANGED
|
@@ -24,6 +24,7 @@ from dateutil import parser
|
|
|
24
24
|
|
|
25
25
|
import mlrun
|
|
26
26
|
import mlrun.common.constants as mlrun_constants
|
|
27
|
+
import mlrun.common.formatters
|
|
27
28
|
from mlrun.artifacts import ModelArtifact
|
|
28
29
|
from mlrun.datastore.store_resources import get_store_resource
|
|
29
30
|
from mlrun.errors import MLRunInvalidArgumentError
|
|
@@ -928,9 +929,14 @@ class MLClientCtx:
|
|
|
928
929
|
|
|
929
930
|
def get_notifications(self):
|
|
930
931
|
"""Get the list of notifications"""
|
|
932
|
+
|
|
933
|
+
# Get the full notifications from the DB since the run context does not contain the params due to bloating
|
|
934
|
+
run = self._rundb.read_run(
|
|
935
|
+
self.uid, format_=mlrun.common.formatters.RunFormat.notifications
|
|
936
|
+
)
|
|
931
937
|
return [
|
|
932
938
|
mlrun.model.Notification.from_dict(notification)
|
|
933
|
-
for notification in
|
|
939
|
+
for notification in run["spec"]["notifications"]
|
|
934
940
|
]
|
|
935
941
|
|
|
936
942
|
def to_dict(self):
|
|
@@ -206,10 +206,6 @@ class SparkFeatureMerger(BaseMerger):
|
|
|
206
206
|
time_column=None,
|
|
207
207
|
additional_filters=None,
|
|
208
208
|
):
|
|
209
|
-
mlrun.utils.helpers.additional_filters_warning(
|
|
210
|
-
additional_filters, self.__class__
|
|
211
|
-
)
|
|
212
|
-
|
|
213
209
|
source_kwargs = {}
|
|
214
210
|
if feature_set.spec.passthrough:
|
|
215
211
|
if not feature_set.spec.source:
|
|
@@ -57,8 +57,25 @@ class TDEngineConnector(TSDBConnector):
|
|
|
57
57
|
self._connection = self._create_connection()
|
|
58
58
|
return self._connection
|
|
59
59
|
|
|
60
|
+
def with_retry_on_closed_connection(self, fn, **kwargs):
|
|
61
|
+
try:
|
|
62
|
+
return fn(self.connection, **kwargs)
|
|
63
|
+
except (taosws.QueryError, taosws.FetchError) as err:
|
|
64
|
+
logger.warn(f"TDEngine error: {err}")
|
|
65
|
+
if "Internal error:" in str(err):
|
|
66
|
+
logger.info("Retrying TDEngine query with a new connection")
|
|
67
|
+
try:
|
|
68
|
+
self._connection.close()
|
|
69
|
+
except Exception:
|
|
70
|
+
pass
|
|
71
|
+
self._connection = None
|
|
72
|
+
return fn(self.connection, **kwargs)
|
|
73
|
+
else:
|
|
74
|
+
raise err
|
|
75
|
+
|
|
60
76
|
def _create_connection(self) -> taosws.Connection:
|
|
61
77
|
"""Establish a connection to the TSDB server."""
|
|
78
|
+
logger.debug("Creating a new connection to TDEngine", project=self.project)
|
|
62
79
|
conn = taosws.connect(self._tdengine_connection_string)
|
|
63
80
|
try:
|
|
64
81
|
conn.execute(f"CREATE DATABASE {self.database}")
|
|
@@ -71,6 +88,7 @@ class TDEngineConnector(TSDBConnector):
|
|
|
71
88
|
raise mlrun.errors.MLRunTSDBConnectionFailureError(
|
|
72
89
|
f"Failed to use TDEngine database {self.database}, {mlrun.errors.err_to_str(e)}"
|
|
73
90
|
)
|
|
91
|
+
logger.debug("Connected to TDEngine", project=self.project)
|
|
74
92
|
return conn
|
|
75
93
|
|
|
76
94
|
def _init_super_tables(self):
|
|
@@ -91,7 +109,9 @@ class TDEngineConnector(TSDBConnector):
|
|
|
91
109
|
"""Create TDEngine supertables."""
|
|
92
110
|
for table in self.tables:
|
|
93
111
|
create_table_query = self.tables[table]._create_super_table_query()
|
|
94
|
-
self.
|
|
112
|
+
self.with_retry_on_closed_connection(
|
|
113
|
+
lambda conn: conn.execute(create_table_query)
|
|
114
|
+
)
|
|
95
115
|
|
|
96
116
|
def write_application_event(
|
|
97
117
|
self,
|
|
@@ -137,10 +157,14 @@ class TDEngineConnector(TSDBConnector):
|
|
|
137
157
|
)
|
|
138
158
|
|
|
139
159
|
create_table_sql = table._create_subtable_sql(subtable=table_name, values=event)
|
|
140
|
-
self.
|
|
160
|
+
self.with_retry_on_closed_connection(
|
|
161
|
+
lambda conn: conn.execute(create_table_sql)
|
|
162
|
+
)
|
|
141
163
|
|
|
142
|
-
insert_statement =
|
|
143
|
-
|
|
164
|
+
insert_statement = self.with_retry_on_closed_connection(
|
|
165
|
+
lambda conn: table._insert_subtable_stmt(
|
|
166
|
+
conn, subtable=table_name, values=event
|
|
167
|
+
)
|
|
144
168
|
)
|
|
145
169
|
insert_statement.add_batch()
|
|
146
170
|
insert_statement.execute()
|
|
@@ -200,18 +224,25 @@ class TDEngineConnector(TSDBConnector):
|
|
|
200
224
|
"""
|
|
201
225
|
Delete all project resources in the TSDB connector, such as model endpoints data and drift results.
|
|
202
226
|
"""
|
|
227
|
+
logger.debug(
|
|
228
|
+
"Deleting all project resources using the TDEngine connector",
|
|
229
|
+
project=self.project,
|
|
230
|
+
)
|
|
203
231
|
for table in self.tables:
|
|
204
232
|
get_subtable_names_query = self.tables[table]._get_subtables_query(
|
|
205
233
|
values={mm_schemas.EventFieldType.PROJECT: self.project}
|
|
206
234
|
)
|
|
207
|
-
subtables = self.
|
|
235
|
+
subtables = self.with_retry_on_closed_connection(
|
|
236
|
+
lambda conn: conn.query(get_subtable_names_query)
|
|
237
|
+
)
|
|
208
238
|
for subtable in subtables:
|
|
209
239
|
drop_query = self.tables[table]._drop_subtable_query(
|
|
210
240
|
subtable=subtable[0]
|
|
211
241
|
)
|
|
212
242
|
self.connection.execute(drop_query)
|
|
213
|
-
logger.
|
|
214
|
-
|
|
243
|
+
logger.debug(
|
|
244
|
+
"Deleted all project resources using the TDEngine connector",
|
|
245
|
+
project=self.project,
|
|
215
246
|
)
|
|
216
247
|
|
|
217
248
|
def get_model_endpoint_real_time_metrics(
|
|
@@ -282,7 +313,9 @@ class TDEngineConnector(TSDBConnector):
|
|
|
282
313
|
)
|
|
283
314
|
logger.debug("Querying TDEngine", query=full_query)
|
|
284
315
|
try:
|
|
285
|
-
query_result = self.
|
|
316
|
+
query_result = self.with_retry_on_closed_connection(
|
|
317
|
+
lambda conn: conn.query(full_query)
|
|
318
|
+
)
|
|
286
319
|
except taosws.QueryError as e:
|
|
287
320
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
288
321
|
f"Failed to query table {table} in database {self.database}, {str(e)}"
|
mlrun/projects/operations.py
CHANGED
|
@@ -15,10 +15,13 @@
|
|
|
15
15
|
import warnings
|
|
16
16
|
from typing import Optional, Union
|
|
17
17
|
|
|
18
|
-
|
|
18
|
+
import mlrun_pipelines.common.models
|
|
19
|
+
import mlrun_pipelines.models
|
|
19
20
|
|
|
20
21
|
import mlrun
|
|
21
22
|
import mlrun.common.constants as mlrun_constants
|
|
23
|
+
import mlrun.common.schemas.function
|
|
24
|
+
import mlrun.common.schemas.workflow
|
|
22
25
|
from mlrun.utils import hub_prefix
|
|
23
26
|
|
|
24
27
|
from .pipelines import enrich_function_object, pipeline_context
|
|
@@ -49,7 +52,7 @@ def _get_engine_and_function(function, project=None):
|
|
|
49
52
|
function = enrich_function_object(project, function, copy_function=False)
|
|
50
53
|
|
|
51
54
|
if not pipeline_context.workflow:
|
|
52
|
-
return
|
|
55
|
+
return mlrun.common.schemas.workflow.EngineType.LOCAL, function
|
|
53
56
|
|
|
54
57
|
return pipeline_context.workflow.engine, function
|
|
55
58
|
|
|
@@ -78,7 +81,7 @@ def run_function(
|
|
|
78
81
|
returns: Optional[list[Union[str, dict[str, str]]]] = None,
|
|
79
82
|
builder_env: Optional[list] = None,
|
|
80
83
|
reset_on_run: Optional[bool] = None,
|
|
81
|
-
) -> Union[mlrun.model.RunObject, PipelineNodeWrapper]:
|
|
84
|
+
) -> Union[mlrun.model.RunObject, mlrun_pipelines.models.PipelineNodeWrapper]:
|
|
82
85
|
"""Run a local or remote task as part of a local/kubeflow pipeline
|
|
83
86
|
|
|
84
87
|
run_function() allow you to execute a function locally, on a remote cluster, or as part of an automated workflow
|
|
@@ -186,7 +189,7 @@ def run_function(
|
|
|
186
189
|
)
|
|
187
190
|
task.spec.verbose = task.spec.verbose or verbose
|
|
188
191
|
|
|
189
|
-
if engine ==
|
|
192
|
+
if engine == mlrun.common.schemas.workflow.EngineType.KFP:
|
|
190
193
|
if schedule:
|
|
191
194
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
192
195
|
"Scheduling jobs is not supported when running a workflow with the kfp engine."
|
|
@@ -266,7 +269,7 @@ def build_function(
|
|
|
266
269
|
overwrite_build_params: bool = False,
|
|
267
270
|
extra_args: str = None,
|
|
268
271
|
force_build: bool = False,
|
|
269
|
-
) -> Union[BuildStatus, PipelineNodeWrapper]:
|
|
272
|
+
) -> Union[BuildStatus, mlrun_pipelines.models.PipelineNodeWrapper]:
|
|
270
273
|
"""deploy ML function, build container with its dependencies
|
|
271
274
|
|
|
272
275
|
:param function: Name of the function (in the project) or function object
|
|
@@ -302,7 +305,7 @@ def build_function(
|
|
|
302
305
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
303
306
|
"Cannot build use deploy_function()"
|
|
304
307
|
)
|
|
305
|
-
if engine ==
|
|
308
|
+
if engine == mlrun.common.schemas.workflow.EngineType.KFP:
|
|
306
309
|
if overwrite_build_params:
|
|
307
310
|
function.spec.build.commands = None
|
|
308
311
|
if requirements or requirements_file:
|
|
@@ -375,7 +378,7 @@ def deploy_function(
|
|
|
375
378
|
builder_env: dict = None,
|
|
376
379
|
project_object=None,
|
|
377
380
|
mock: bool = None,
|
|
378
|
-
) -> Union[DeployStatus, PipelineNodeWrapper]:
|
|
381
|
+
) -> Union[DeployStatus, mlrun_pipelines.models.PipelineNodeWrapper]:
|
|
379
382
|
"""deploy real-time (nuclio based) functions
|
|
380
383
|
|
|
381
384
|
:param function: name of the function (in the project) or function object
|
|
@@ -392,7 +395,7 @@ def deploy_function(
|
|
|
392
395
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
393
396
|
"deploy is used with real-time functions, for other kinds use build_function()"
|
|
394
397
|
)
|
|
395
|
-
if engine ==
|
|
398
|
+
if engine == mlrun.common.schemas.workflow.EngineType.KFP:
|
|
396
399
|
return function.deploy_step(models=models, env=env, tag=tag, verbose=verbose)
|
|
397
400
|
else:
|
|
398
401
|
if env:
|
mlrun/projects/pipelines.py
CHANGED
|
@@ -27,6 +27,8 @@ import mlrun_pipelines.utils
|
|
|
27
27
|
import mlrun
|
|
28
28
|
import mlrun.common.runtimes.constants
|
|
29
29
|
import mlrun.common.schemas
|
|
30
|
+
import mlrun.common.schemas.function
|
|
31
|
+
import mlrun.common.schemas.workflow
|
|
30
32
|
import mlrun.utils.notifications
|
|
31
33
|
from mlrun.errors import err_to_str
|
|
32
34
|
from mlrun.utils import (
|
|
@@ -44,21 +46,21 @@ from ..runtimes.pod import AutoMountType
|
|
|
44
46
|
|
|
45
47
|
def get_workflow_engine(engine_kind, local=False):
|
|
46
48
|
if pipeline_context.is_run_local(local):
|
|
47
|
-
if engine_kind ==
|
|
49
|
+
if engine_kind == mlrun.common.schemas.workflow.EngineType.KFP:
|
|
48
50
|
logger.warning(
|
|
49
51
|
"Running kubeflow pipeline locally, note some ops may not run locally!"
|
|
50
52
|
)
|
|
51
|
-
elif engine_kind ==
|
|
53
|
+
elif engine_kind == mlrun.common.schemas.workflow.EngineType.REMOTE:
|
|
52
54
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
53
55
|
"Cannot run a remote pipeline locally using `kind='remote'` and `local=True`. "
|
|
54
56
|
"in order to run a local pipeline remotely, please use `engine='remote:local'` instead"
|
|
55
57
|
)
|
|
56
58
|
return _LocalRunner
|
|
57
|
-
if not engine_kind or engine_kind ==
|
|
59
|
+
if not engine_kind or engine_kind == mlrun.common.schemas.workflow.EngineType.KFP:
|
|
58
60
|
return _KFPRunner
|
|
59
|
-
if engine_kind ==
|
|
61
|
+
if engine_kind == mlrun.common.schemas.workflow.EngineType.LOCAL:
|
|
60
62
|
return _LocalRunner
|
|
61
|
-
if engine_kind ==
|
|
63
|
+
if engine_kind == mlrun.common.schemas.workflow.EngineType.REMOTE:
|
|
62
64
|
return _RemoteRunner
|
|
63
65
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
64
66
|
f"Provided workflow engine is not supported. engine_kind={engine_kind}"
|
|
@@ -313,7 +315,11 @@ def get_db_function(project, key) -> mlrun.runtimes.BaseRuntime:
|
|
|
313
315
|
|
|
314
316
|
|
|
315
317
|
def enrich_function_object(
|
|
316
|
-
project
|
|
318
|
+
project: mlrun.common.schemas.Project,
|
|
319
|
+
function: mlrun.runtimes.BaseRuntime,
|
|
320
|
+
decorator: typing.Callable = None,
|
|
321
|
+
copy_function: bool = True,
|
|
322
|
+
try_auto_mount: bool = True,
|
|
317
323
|
) -> mlrun.runtimes.BaseRuntime:
|
|
318
324
|
if hasattr(function, "_enriched"):
|
|
319
325
|
return function
|
|
@@ -354,7 +360,6 @@ def enrich_function_object(
|
|
|
354
360
|
f.enrich_runtime_spec(
|
|
355
361
|
project.spec.default_function_node_selector,
|
|
356
362
|
)
|
|
357
|
-
|
|
358
363
|
if try_auto_mount:
|
|
359
364
|
if (
|
|
360
365
|
decorator and AutoMountType.is_auto_modifier(decorator)
|
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.0rc51
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -50,7 +50,7 @@ Requires-Dist: setuptools ~=71.0
|
|
|
50
50
|
Requires-Dist: deprecated ~=1.2
|
|
51
51
|
Requires-Dist: jinja2 >=3.1.3,~=3.1
|
|
52
52
|
Requires-Dist: orjson <4,>=3.9.15
|
|
53
|
-
Requires-Dist: mlrun-pipelines-kfp-common ~=0.1.
|
|
53
|
+
Requires-Dist: mlrun-pipelines-kfp-common ~=0.1.8
|
|
54
54
|
Requires-Dist: mlrun-pipelines-kfp-v1-8 ~=0.1.6
|
|
55
55
|
Provides-Extra: alibaba-oss
|
|
56
56
|
Requires-Dist: ossfs ==2023.12.0 ; extra == 'alibaba-oss'
|
|
@@ -84,7 +84,7 @@ Requires-Dist: redis ~=4.3 ; extra == 'all'
|
|
|
84
84
|
Requires-Dist: s3fs <2024.7,>=2023.9.2 ; extra == 'all'
|
|
85
85
|
Requires-Dist: snowflake-connector-python ~=3.7 ; extra == 'all'
|
|
86
86
|
Requires-Dist: sqlalchemy ~=1.4 ; extra == 'all'
|
|
87
|
-
Requires-Dist: taos-ws-py
|
|
87
|
+
Requires-Dist: taos-ws-py ==0.3.2 ; extra == 'all'
|
|
88
88
|
Provides-Extra: api
|
|
89
89
|
Requires-Dist: uvicorn ~=0.27.1 ; extra == 'api'
|
|
90
90
|
Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'api'
|
|
@@ -137,7 +137,7 @@ Requires-Dist: redis ~=4.3 ; extra == 'complete'
|
|
|
137
137
|
Requires-Dist: s3fs <2024.7,>=2023.9.2 ; extra == 'complete'
|
|
138
138
|
Requires-Dist: snowflake-connector-python ~=3.7 ; extra == 'complete'
|
|
139
139
|
Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete'
|
|
140
|
-
Requires-Dist: taos-ws-py
|
|
140
|
+
Requires-Dist: taos-ws-py ==0.3.2 ; extra == 'complete'
|
|
141
141
|
Provides-Extra: complete-api
|
|
142
142
|
Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete-api'
|
|
143
143
|
Requires-Dist: aiobotocore <2.16,>=2.5.0 ; extra == 'complete-api'
|
|
@@ -174,7 +174,7 @@ Requires-Dist: redis ~=4.3 ; extra == 'complete-api'
|
|
|
174
174
|
Requires-Dist: s3fs <2024.7,>=2023.9.2 ; extra == 'complete-api'
|
|
175
175
|
Requires-Dist: snowflake-connector-python ~=3.7 ; extra == 'complete-api'
|
|
176
176
|
Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete-api'
|
|
177
|
-
Requires-Dist: taos-ws-py
|
|
177
|
+
Requires-Dist: taos-ws-py ==0.3.2 ; extra == 'complete-api'
|
|
178
178
|
Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
|
|
179
179
|
Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
|
|
180
180
|
Requires-Dist: memray ~=1.12 ; (sys_platform != "win32") and extra == 'complete-api'
|
|
@@ -209,7 +209,7 @@ Requires-Dist: snowflake-connector-python ~=3.7 ; extra == 'snowflake'
|
|
|
209
209
|
Provides-Extra: sqlalchemy
|
|
210
210
|
Requires-Dist: sqlalchemy ~=1.4 ; extra == 'sqlalchemy'
|
|
211
211
|
Provides-Extra: tdengine
|
|
212
|
-
Requires-Dist: taos-ws-py
|
|
212
|
+
Requires-Dist: taos-ws-py ==0.3.2 ; extra == 'tdengine'
|
|
213
213
|
|
|
214
214
|
<a id="top"></a>
|
|
215
215
|
[](https://github.com/mlrun/mlrun/actions/workflows/build.yaml?query=branch%3Adevelopment)
|
|
@@ -225,19 +225,86 @@ Requires-Dist: taos-ws-py ~=0.3.3 ; extra == 'tdengine'
|
|
|
225
225
|
|
|
226
226
|
# Using MLRun
|
|
227
227
|
|
|
228
|
-
MLRun is an open
|
|
228
|
+
MLRun is an open source AI orchestration platform for quickly building and managing continuous (gen) AI applications across their lifecycle. MLRun integrates into your development and CI/CD environment and automates the delivery of production data, ML pipelines, and online applications.
|
|
229
|
+
MLRun significantly reduces engineering efforts, time to production, and computation resources.
|
|
229
230
|
With MLRun, you can choose any IDE on your local machine or on the cloud. MLRun breaks the silos between data, ML, software, and DevOps/MLOps teams, enabling collaboration and fast continuous improvements.
|
|
230
231
|
|
|
231
|
-
Get started with MLRun [**Tutorials and Examples**](https://docs.mlrun.org/en/
|
|
232
|
+
Get started with the MLRun [**Tutorials and Examples**](https://docs.mlrun.org/en/stable/tutorials/index.html) and the [**Installation and setup guide**](https://docs.mlrun.org/en/stable/install.html), or read about the [**MLRun Architecture**](https://docs.mlrun.org/en/stable/architecture.html).
|
|
233
|
+
|
|
234
|
+
This page explains how MLRun addresses the [**gen AI tasks**](#genai-tasks), [**MLOps tasks**](#mlops-tasks), and presents the [**MLRun core components**](#core-components).
|
|
235
|
+
|
|
236
|
+
See the supported data stores, development tools, services, platforms, etc., supported by MLRun's open architecture in **https://docs.mlrun.org/en/stable/ecosystem.html**.
|
|
237
|
+
|
|
238
|
+
## Gen AI tasks
|
|
239
|
+
|
|
240
|
+
<p align="center"><img src="https://github.com/mlrun/mlrun/raw/development/docs/_static/images/ai-tasks.png" alt="ai-tasks" width="800"/></p><br>
|
|
241
|
+
|
|
242
|
+
Use MLRun to develop, scale, deploy, and monitor your AI model across your enterprise. The [**gen AI development workflow**](https://docs.mlrun.org/en/stable/genai/genai-flow.html)
|
|
243
|
+
section describes the different tasks and stages in detail.
|
|
244
|
+
|
|
245
|
+
### Data management
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
MLRun supports batch or realtime data processing at scale, data lineage and versioning, structured and unstructured data, and more.
|
|
249
|
+
Removing inappropriate data at an early stage saves resources that would otherwise be required later on.
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
**Docs:**
|
|
253
|
+
[Using LLMs to process unstructured data](https://docs.mlrun.org/en/stable/genai/data-mgmt/unstructured-data.html)
|
|
254
|
+
[Vector databases](https://docs.mlrun.org/en/stable/genai/data-mgmt/vector-databases.html)
|
|
255
|
+
[Guardrails for data management](https://docs.mlrun.org/en/stable/genai/data-mgmt/guardrails-data.html)
|
|
256
|
+
**Demo:**
|
|
257
|
+
[Call center demo](https://github.com/mlrun/demo-call-center>`
|
|
258
|
+
**Video:**
|
|
259
|
+
[Call center](https://youtu.be/YycMbxRgLBA>`
|
|
260
|
+
|
|
261
|
+
### Development
|
|
262
|
+
Use MLRun to build an automated ML pipeline to: collect data,
|
|
263
|
+
preprocess (prepare) the data, run the training pipeline, and evaluate the model.
|
|
264
|
+
|
|
265
|
+
**Docs:**
|
|
266
|
+
[Working with RAG](https://docs.mlrun.org/en/stable/genai/development/working-with-rag.html), [Evalating LLMs](https://docs.mlrun.org/en/stable/genai/development/evaluating-llms.html), [Fine tuning LLMS](https://docs.mlrun.org/en/stable/genai/development/fine-tuning-llms.html)
|
|
267
|
+
**Demos:**
|
|
268
|
+
[Call center demo](https://github.com/mlrun/demo-call-center), [Build & deploy custom (fine-tuned) LLM models and applications](https://github.com/mlrun/demo-llm-tuning/blob/main), [Interactive bot demo using LLMs](https://github.com/mlrun/demo-llm-bot/blob/main)
|
|
269
|
+
**Video:**
|
|
270
|
+
[Call center](https://youtu.be/YycMbxRgLBA)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
### Deployment
|
|
274
|
+
MLRun serving can productize the newly trained LLM as a serverless function using real-time auto-scaling Nuclio serverless functions.
|
|
275
|
+
The application pipeline includes all the steps from accepting events or data, contextualizing it with a state preparing the required model features,
|
|
276
|
+
inferring results using one or more models, and driving actions.
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
**Docs:**
|
|
280
|
+
[Serving gen AI models](https://docs.mlrun.org/en/stable/genai/deployment/genai_serving.html), GPU utilization](https://docs.mlrun.org/en/stable/genai/deployment/gpu_utilization.html), [Gen AI realtime serving graph](https://docs.mlrun.org/en/stable/genai/deployment/genai_serving_graph.html)
|
|
281
|
+
**Tutorial:**
|
|
282
|
+
[Deploy LLM using MLRun](https://docs.mlrun.org/en/stable/tutorials/genai_01_basic_tutorial.html)
|
|
283
|
+
**Demos:**
|
|
284
|
+
[Call center demo](https://github.com/mlrun/demo-call-center), [Build & deploy custom(fine-tuned)]LLM models and applications <https://github.com/mlrun/demo-llm-tuning/blob/main), [Interactive bot demo using LLMs]<https://github.com/mlrun/demo-llm-bot/blob/main)
|
|
285
|
+
**Video:**
|
|
286
|
+
[Call center]<https://youtu.be/YycMbxRgLBA)
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
### Live Ops
|
|
290
|
+
Monitor all resources, data, model and application metrics to ensure performance. Then identify risks, control costs, and measure business KPIs.
|
|
291
|
+
Collect production data, metadata, and metrics to tune the model and application further, and to enable governance and explainability.
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
**Docs:**
|
|
295
|
+
[Model monitoring <monitoring](https://docs.mlrun.org/en/stable/concepts/monitoring.html), [Alerts and notifications](https://docs.mlrun.org/en/stable/concepts/alerts-notifications.html)
|
|
296
|
+
**Tutorials:**
|
|
297
|
+
[Deploy LLM using MLRun](https://docs.mlrun.org/en/stable/tutorials/genai_01_basic_tutorial.html), [Model monitoring using LLM](https://docs.mlrun.org/en/stable/tutorials/genai-02-monitoring-llm.html)
|
|
298
|
+
**Demo:**
|
|
299
|
+
[Build & deploy custom (fine-tuned) LLM models and applications](https://github.com/mlrun/demo-llm-tuning/blob/main)
|
|
232
300
|
|
|
233
|
-
This page explains how MLRun addresses the [**MLOps Tasks**](#mlops-tasks) and the [**MLRun core components**](#core-components).
|
|
234
301
|
|
|
235
302
|
<a id="mlops-tasks"></a>
|
|
236
303
|
## MLOps tasks
|
|
237
304
|
|
|
238
305
|
<p align="center"><img src="https://github.com/mlrun/mlrun/raw/development/docs/_static/images/mlops-task.png" alt="mlrun-tasks" width="800"/></p><br>
|
|
239
306
|
|
|
240
|
-
The [**MLOps development workflow**](https://docs.mlrun.org/en/
|
|
307
|
+
The [**MLOps development workflow**](https://docs.mlrun.org/en/stable/mlops-dev-flow.html) section describes the different tasks and stages in detail.
|
|
241
308
|
MLRun can be used to automate and orchestrate all the different tasks or just specific tasks (and integrate them with what you have already deployed).
|
|
242
309
|
|
|
243
310
|
### Project management and CI/CD automation
|
|
@@ -246,32 +313,40 @@ In MLRun the assets, metadata, and services (data, functions, jobs, artifacts, m
|
|
|
246
313
|
Projects can be imported/exported as a whole, mapped to git repositories or IDE projects (in PyCharm, VSCode, etc.), which enables versioning, collaboration, and CI/CD.
|
|
247
314
|
Project access can be restricted to a set of users and roles.
|
|
248
315
|
|
|
249
|
-
|
|
316
|
+
**Docs:** [Projects and Automation](https://docs.mlrun.org/en/stable/projects/project.html), [CI/CD Integration](https://docs.mlrun.org/en/stable/projects/ci-integration.html)
|
|
317
|
+
**Tutorials:** [Quick start](https://docs.mlrun.org/en/stable/tutorials/01-mlrun-basics.html), [Automated ML Pipeline](https://docs.mlrun.org/en/stable/tutorials/04-pipeline.html)
|
|
318
|
+
**Video:** [Quick start](https://youtu.be/xI8KVGLlj7Q).
|
|
250
319
|
|
|
251
320
|
### Ingest and process data
|
|
252
321
|
|
|
253
|
-
MLRun provides abstract interfaces to various offline and online [**data sources**](https://docs.mlrun.org/en/
|
|
254
|
-
In addition, the MLRun [**Feature Store**](https://docs.mlrun.org/en/
|
|
322
|
+
MLRun provides abstract interfaces to various offline and online [**data sources**](https://docs.mlrun.org/en/stable/store/datastore.html), supports batch or realtime data processing at scale, data lineage and versioning, structured and unstructured data, and more.
|
|
323
|
+
In addition, the MLRun [**Feature Store**](https://docs.mlrun.org/en/stable/feature-store/feature-store.html) automates the collection, transformation, storage, catalog, serving, and monitoring of data features across the ML lifecycle and enables feature reuse and sharing.
|
|
255
324
|
|
|
256
|
-
See: **Docs:** [Ingest and process data](https://docs.mlrun.org/en/
|
|
325
|
+
See: **Docs:** [Ingest and process data](https://docs.mlrun.org/en/stable/data-prep/index.html), [Feature Store](https://docs.mlrun.org/en/stable/feature-store/feature-store.html), [Data & Artifacts](https://docs.mlrun.org/en/stable/concepts/data.html)
|
|
326
|
+
**Tutorials:** [Quick start](https://docs.mlrun.org/en/stable/tutorials/01-mlrun-basics.html), [Feature Store](https://docs.mlrun.org/en/stable/feature-store/basic-demo.html).
|
|
257
327
|
|
|
258
328
|
### Develop and train models
|
|
259
329
|
|
|
260
330
|
MLRun allows you to easily build ML pipelines that take data from various sources or the Feature Store and process it, train models at scale with multiple parameters, test models, tracks each experiments, register, version and deploy models, etc. MLRun provides scalable built-in or custom model training services, integrate with any framework and can work with 3rd party training/auto-ML services. You can also bring your own pre-trained model and use it in the pipeline.
|
|
261
331
|
|
|
262
|
-
|
|
332
|
+
**Docs:** [Develop and train models](https://docs.mlrun.org/en/stable/development/index.html), [Model Training and Tracking](https://docs.mlrun.org/en/stable/development/model-training-tracking.html), [Batch Runs and Workflows](https://docs.mlrun.org/en/stable/concepts/runs-workflows.html)
|
|
333
|
+
**Tutorials:** [Train, compare, and register models](https://docs.mlrun.org/en/stable/tutorials/02-model-training.html), [Automated ML Pipeline](https://docs.mlrun.org/en/stable/tutorials/04-pipeline.html)
|
|
334
|
+
**Video:** [Train and compare models](https://youtu.be/bZgBsmLMdQo).
|
|
263
335
|
|
|
264
336
|
### Deploy models and applications
|
|
265
337
|
|
|
266
338
|
MLRun rapidly deploys and manages production-grade real-time or batch application pipelines using elastic and resilient serverless functions. MLRun addresses the entire ML application: intercepting application/user requests, running data processing tasks, inferencing using one or more models, driving actions, and integrating with the application logic.
|
|
267
339
|
|
|
268
|
-
|
|
340
|
+
**Docs:** [Deploy models and applications](https://docs.mlrun.org/en/stable/deployment/index.html), [Realtime Pipelines](https://docs.mlrun.org/en/stable/serving/serving-graph.html), [Batch Inference](https://docs.mlrun.org/en/stable/deployment/batch_inference.html)
|
|
341
|
+
**Tutorials:** [Realtime Serving](https://docs.mlrun.org/en/stable/tutorials/03-model-serving.html), [Batch Inference](https://docs.mlrun.org/en/stable/tutorials/07-batch-infer.html), [Advanced Pipeline](https://docs.mlrun.org/en/stable/tutorials/07-batch-infer.html)
|
|
342
|
+
**Video:** [Serving pre-trained models](https://youtu.be/OUjOus4dZfw).
|
|
269
343
|
|
|
270
|
-
###
|
|
344
|
+
### Model Monitoring
|
|
271
345
|
|
|
272
346
|
Observability is built into the different MLRun objects (data, functions, jobs, models, pipelines, etc.), eliminating the need for complex integrations and code instrumentation. With MLRun, you can observe the application/model resource usage and model behavior (drift, performance, etc.), define custom app metrics, and trigger alerts or retraining jobs.
|
|
273
347
|
|
|
274
|
-
|
|
348
|
+
**Docs:** [Model monitoring](https://docs.mlrun.org/en/stable/concepts/model-monitoring.html), [Model Monitoring Overview](https://docs.mlrun.org/en/stable/monitoring/model-monitoring-deployment.html)
|
|
349
|
+
**Tutorials:** [Model Monitoring & Drift Detection](https://docs.mlrun.org/en/stable/tutorials/05-model-monitoring.html).
|
|
275
350
|
|
|
276
351
|
|
|
277
352
|
<a id="core-components"></a>
|
|
@@ -279,18 +354,21 @@ See: **Docs:** [Monitor and alert](https://docs.mlrun.org/en/latest/monitoring/i
|
|
|
279
354
|
|
|
280
355
|
<p align="center"><img src="https://github.com/mlrun/mlrun/raw/development/docs/_static/images/mlops-core.png" alt="mlrun-core" width="800"/></p><br>
|
|
281
356
|
|
|
357
|
+
|
|
282
358
|
MLRun includes the following major components:
|
|
283
359
|
|
|
284
|
-
[**Project Management:**](https://docs.mlrun.org/en/
|
|
360
|
+
[**Project Management:**](https://docs.mlrun.org/en/stable/projects/project.html) A service (API, SDK, DB, UI) that manages the different project assets (data, functions, jobs, workflows, secrets, etc.) and provides central control and metadata layer.
|
|
361
|
+
|
|
362
|
+
[**Functions:**](https://docs.mlrun.org/en/stable/runtimes/functions.html) automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.).
|
|
285
363
|
|
|
286
|
-
[**
|
|
364
|
+
[**Data & Artifacts:**](https://docs.mlrun.org/en/stable/concepts/data.html) Glueless connectivity to various data sources, metadata management, catalog, and versioning for structures/unstructured artifacts.
|
|
287
365
|
|
|
288
|
-
[**
|
|
366
|
+
[**Batch Runs & Workflows:**](https://docs.mlrun.org/en/stable/concepts/runs-workflows.html) Execute one or more functions with specific parameters and collect, track, and compare all their results and artifacts.
|
|
289
367
|
|
|
290
|
-
[**
|
|
368
|
+
[**Real-Time Serving Pipeline:**](https://docs.mlrun.org/en/stable/serving/serving-graph.html) Rapid deployment of scalable data and ML pipelines using real-time serverless technology, including API handling, data preparation/enrichment, model serving, ensembles, driving and measuring actions, etc.
|
|
291
369
|
|
|
292
|
-
[**
|
|
370
|
+
[**Model monitoring:**](https://docs.mlrun.org/en/stable/monitoring/index.html) monitors data, models, resources, and production components and provides a feedback loop for exploring production data, identifying drift, alerting on anomalies or data quality issues, triggering retraining jobs, measuring business impact, etc.
|
|
293
371
|
|
|
294
|
-
[**
|
|
372
|
+
[**Alerts and notifications:**](https://docs.mlrun.org/en/stable/concepts/model-monitoring.html) Use alerts to identify and inform you of possible problem situations. Use notifications to report status on runs and pipelines.
|
|
295
373
|
|
|
296
|
-
[**
|
|
374
|
+
[**Feature Store:**](https://docs.mlrun.org/en/stable/feature-store/feature-store.html) automatically collects, prepares, catalogs, and serves production data features for development (offline) and real-time (online) deployment using minimal engineering effort.
|
|
@@ -2,7 +2,7 @@ mlrun/__init__.py,sha256=y08M1JcKXy5-9_5WaI9fn5aV5BxIQ5QkbduJK0OxWbA,7470
|
|
|
2
2
|
mlrun/__main__.py,sha256=mC_Izs4kuHUHQi88QJFLN22n1kbygGM0wAirjNt7uj4,45938
|
|
3
3
|
mlrun/config.py,sha256=NJG59Rl_5-mwgCdPDboRhjHD1ujW9ITYL7gtCbSMkM8,67308
|
|
4
4
|
mlrun/errors.py,sha256=nY23dns_kTzbOrelJf0FyxLw5mglv7jo4Sx3efKS9Fs,7798
|
|
5
|
-
mlrun/execution.py,sha256=
|
|
5
|
+
mlrun/execution.py,sha256=u1nDWc7X3_B_w6-8AFuG52t11B9nd3ee5rLLGbalRDI,42843
|
|
6
6
|
mlrun/features.py,sha256=m17K_3l9Jktwb9dOwlHLTAPTlemsWrRF7dJhXUX0iJU,15429
|
|
7
7
|
mlrun/k8s_utils.py,sha256=mRQMs6NzPq36vx1n5_2BfFapXysc8wv3NcrZ77_2ANA,8949
|
|
8
8
|
mlrun/lists.py,sha256=3PqBdcajdwhTe1XuFsAaHTuFVM2kjwepf31qqE82apg,8384
|
|
@@ -33,7 +33,7 @@ mlrun/common/formatters/feature_set.py,sha256=lH5RL9Mo6weRexHrruUnmL1qqv_mZocBOQ
|
|
|
33
33
|
mlrun/common/formatters/function.py,sha256=fGa5m5aI_XvQdvrUr73dmUwrEJrE_8wM4_P4q8RgBTg,1477
|
|
34
34
|
mlrun/common/formatters/pipeline.py,sha256=hGUV_3wcTEMa-JouspbjgJ1JGKa2Wc5cXSaH2XhOdMc,1763
|
|
35
35
|
mlrun/common/formatters/project.py,sha256=rdGf7fq_CfwFwd8iKWl8sW-tqTJilK3gJtV5oLdaY-M,1756
|
|
36
|
-
mlrun/common/formatters/run.py,sha256=
|
|
36
|
+
mlrun/common/formatters/run.py,sha256=Gcf9lVDqxPMNfWcPX0RJasjTC_N_U0yTBkQ02jOPJ7A,1062
|
|
37
37
|
mlrun/common/model_monitoring/__init__.py,sha256=x0EMEvxVjHsm858J1t6IEA9dtKTdFpJ9sKhss10ld8A,721
|
|
38
38
|
mlrun/common/model_monitoring/helpers.py,sha256=1CpxIDQPumFnpUB1eqcvCpLlyPFVeW2sL6prM-N5A1A,4405
|
|
39
39
|
mlrun/common/runtimes/constants.py,sha256=Rl0Sd8n_L7Imo-uF1LL9CJ5Szi0W1gUm36yrF8PXfSc,10989
|
|
@@ -41,7 +41,7 @@ mlrun/common/schemas/__init__.py,sha256=QZMyVHjIoa88JmyVy45JGkNGz5K39XX7A72TUnXr
|
|
|
41
41
|
mlrun/common/schemas/alert.py,sha256=qWYCISNYMdkgAARVQNxshVr9d-s8LGscfLKpczkTBms,6749
|
|
42
42
|
mlrun/common/schemas/api_gateway.py,sha256=9ilorgLOiWxFZbv89-dbPNfVdaChlGOIdC4SLTxQwNI,7118
|
|
43
43
|
mlrun/common/schemas/artifact.py,sha256=V3ngobnzI1v2eoOroWBEedjAZu0ntCSIQ-LzsOK1Z9k,3570
|
|
44
|
-
mlrun/common/schemas/auth.py,sha256=
|
|
44
|
+
mlrun/common/schemas/auth.py,sha256=7XpEXICjDhHHkAppOp0mHvEtCwG68L3mhgSHPqqTBMk,6584
|
|
45
45
|
mlrun/common/schemas/background_task.py,sha256=2qZxib2qrF_nPZj0ncitCG-2jxz2hg1qj0hFc8eswWQ,1707
|
|
46
46
|
mlrun/common/schemas/client_spec.py,sha256=wqzQ5R4Zc7FL-8lV_BRN6nLrD0jK1kon05-JQ3fy2KY,2892
|
|
47
47
|
mlrun/common/schemas/clusterization_spec.py,sha256=aeaFJZms7r7h2HDv6ML_GDAT6gboW-PxBbc3GKPalGk,888
|
|
@@ -67,13 +67,13 @@ mlrun/common/schemas/runtime_resource.py,sha256=2rSuYL-9JkESSomlnU91mYDbfV-IkqZe
|
|
|
67
67
|
mlrun/common/schemas/schedule.py,sha256=nD9kxH2KjXkbGZPNfzVNlNSxbyFZmZUlwtT04_z2xCw,4289
|
|
68
68
|
mlrun/common/schemas/secret.py,sha256=51tCN1F8DFTq4y_XdHIMDy3I1TnMEBX8kO8BHKavYF4,1484
|
|
69
69
|
mlrun/common/schemas/tag.py,sha256=OAn9Qt6z8ibqw8uU8WQSvuwY8irUv45Dhx2Ko5FzUss,884
|
|
70
|
-
mlrun/common/schemas/workflow.py,sha256=
|
|
70
|
+
mlrun/common/schemas/workflow.py,sha256=K5kZdbdKMg21pqwJyTRn41p3Ws220Sjhn0Xl4Z5iDRg,2063
|
|
71
71
|
mlrun/common/schemas/model_monitoring/__init__.py,sha256=q2icasMdgI7OG-p5eVwCu6sBuPrBMpRxByC6rxYk0DM,1813
|
|
72
72
|
mlrun/common/schemas/model_monitoring/constants.py,sha256=Wha21Iev3Nr9ugB1Ms_wrmcY42YzWTQqLKPYZD2dRHA,9896
|
|
73
73
|
mlrun/common/schemas/model_monitoring/grafana.py,sha256=SG13MFUUz_tk6-mWeSx17qcdEW4ekicxqNtnMSwRTCY,1559
|
|
74
74
|
mlrun/common/schemas/model_monitoring/model_endpoints.py,sha256=5vvjNX1bV98VSGdT4jwHr5ArKC9v_c1iHlaTf82fSUY,13198
|
|
75
75
|
mlrun/data_types/__init__.py,sha256=EkxfkFoHb91zz3Aymq-KZfCHlPMzEc3bBqgzPUwmHWY,1087
|
|
76
|
-
mlrun/data_types/data_types.py,sha256=
|
|
76
|
+
mlrun/data_types/data_types.py,sha256=uB9qJusSvPRK2PTvrFBXrS5jcDXMuwqXokJGToDg4VA,4953
|
|
77
77
|
mlrun/data_types/infer.py,sha256=z2EbSpR6xWEE5-HRUtDZkapHQld3xMbzXtTX83K-690,6134
|
|
78
78
|
mlrun/data_types/spark.py,sha256=xfcr6lcaLcHepnrHavx_vacMJK7BC8FWsUKjwrjjn6w,9509
|
|
79
79
|
mlrun/data_types/to_pandas.py,sha256=-ZbJBg00x4xxyqqqu3AVbEh-HaO2--DrChyPuedRhHA,11215
|
|
@@ -119,7 +119,7 @@ mlrun/feature_store/retrieval/base.py,sha256=zgDsRsYQz8eqReKBEeTP0O4UoLoVYjWpO1o
|
|
|
119
119
|
mlrun/feature_store/retrieval/dask_merger.py,sha256=t60xciYp6StUQLEyFyI4JK5NpWkdBy2MGCs6beimaWU,5575
|
|
120
120
|
mlrun/feature_store/retrieval/job.py,sha256=xNIe3fAZ-wQ_sVLG2iTMLrnWSRIJ3EbDR10mnUUiSKE,8593
|
|
121
121
|
mlrun/feature_store/retrieval/local_merger.py,sha256=jM-8ta44PeNUc1cKMPs-TxrO9t8pXbwu_Tw8MZrLxUY,4513
|
|
122
|
-
mlrun/feature_store/retrieval/spark_merger.py,sha256=
|
|
122
|
+
mlrun/feature_store/retrieval/spark_merger.py,sha256=XTMK40Y0bUli1Z9KwtYmMSQ8a4WOHEHzIq9uzk1mfc4,10548
|
|
123
123
|
mlrun/feature_store/retrieval/storey_merger.py,sha256=5YM0UPrLjGOobulHkowRO-1LuvFD2cm_0GxcpnTdu0I,6314
|
|
124
124
|
mlrun/frameworks/__init__.py,sha256=qRHe_nUfxpoLaSASAkIxcW6IyunMtxq5LXhjzZMO_1E,743
|
|
125
125
|
mlrun/frameworks/parallel_coordinates.py,sha256=XY2C1Q29VWxcWIsIhcluUivpEHglr8PcZHCMs2MH4GM,11485
|
|
@@ -245,7 +245,7 @@ mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsV
|
|
|
245
245
|
mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
|
|
246
246
|
mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=7yZFn42sF597TBumVM-xhh1bjIQCbIo6qIvMK5WpWO0,10503
|
|
247
247
|
mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Hb0vcCBP-o0ET78mU4P32fnhUL65QZv-pMuv2lnCby4,1586
|
|
248
|
-
mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=
|
|
248
|
+
mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=w9BzsqMWGvc3knMm6J0B6jeDAo4iWLKm-66QKIf3JaQ,19942
|
|
249
249
|
mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
|
|
250
250
|
mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=mbmhN4f_F58ptVjhwoMF6ifZSdnZWhK7x8eNsWS39IA,6217
|
|
251
251
|
mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=1H-IBXPNJPRAaxDMGWpUU25QqfR87LpZbJ03vaJkICs,32858
|
|
@@ -271,8 +271,8 @@ mlrun/package/utils/type_hint_utils.py,sha256=JYrek6vuN3z7e6MGUD3qBLDfQ03C4puZXN
|
|
|
271
271
|
mlrun/platforms/__init__.py,sha256=ggSGF7inITs6S-vj9u4S9X_5psgbA0G3GVqf7zu8qYc,2406
|
|
272
272
|
mlrun/platforms/iguazio.py,sha256=1h5BpdAEQJBg2vIt7ySjUADU0ip5OkaMYr0_VREi9ys,13084
|
|
273
273
|
mlrun/projects/__init__.py,sha256=Lv5rfxyXJrw6WGOWJKhBz66M6t3_zsNMCfUD6waPwx4,1153
|
|
274
|
-
mlrun/projects/operations.py,sha256=
|
|
275
|
-
mlrun/projects/pipelines.py,sha256=
|
|
274
|
+
mlrun/projects/operations.py,sha256=gtqSU9OvYOV-b681uQtWgnW7YSnX6qfa1Mt1Xm4f1ZI,19752
|
|
275
|
+
mlrun/projects/pipelines.py,sha256=RP9lTRuRRCuA4Vf0Z2-NwuPL9XRJ28S2v6tfLzmD9B0,40874
|
|
276
276
|
mlrun/projects/project.py,sha256=FjgkBBBP6geuxOGGp1Es5EFqsrs3M6PNWejBdoM08ng,190769
|
|
277
277
|
mlrun/runtimes/__init__.py,sha256=egLM94cDMUyQ1GVABdFGXUQcDhU70lP3k7qSnM_UnHY,9008
|
|
278
278
|
mlrun/runtimes/base.py,sha256=JXWmTIcm3b0klGUOHDlyFNa3bUgsNzQIgWhUQpSZoE0,37692
|
|
@@ -341,11 +341,11 @@ mlrun/utils/notifications/notification/ipython.py,sha256=ZtVL30B_Ha0VGoo4LxO-voT
|
|
|
341
341
|
mlrun/utils/notifications/notification/slack.py,sha256=wqpFGr5BTvFO5KuUSzFfxsgmyU1Ohq7fbrGeNe9TXOk,7006
|
|
342
342
|
mlrun/utils/notifications/notification/webhook.py,sha256=cb9w1Mc8ENfJBdgan7iiVHK9eVls4-R3tUxmXM-P-8I,4746
|
|
343
343
|
mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
|
|
344
|
-
mlrun/utils/version/version.json,sha256=
|
|
344
|
+
mlrun/utils/version/version.json,sha256=mIhH_8cnM4P-kdwi7BT1WdUxal7WaIr2QqHt5HbvdtI,89
|
|
345
345
|
mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
|
|
346
|
-
mlrun-1.7.
|
|
347
|
-
mlrun-1.7.
|
|
348
|
-
mlrun-1.7.
|
|
349
|
-
mlrun-1.7.
|
|
350
|
-
mlrun-1.7.
|
|
351
|
-
mlrun-1.7.
|
|
346
|
+
mlrun-1.7.0rc51.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
347
|
+
mlrun-1.7.0rc51.dist-info/METADATA,sha256=gB94QVVHmvDU2L8d0NTi70uF2KFrdBxG5n5qMPUuTyA,24262
|
|
348
|
+
mlrun-1.7.0rc51.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
349
|
+
mlrun-1.7.0rc51.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
|
|
350
|
+
mlrun-1.7.0rc51.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
|
|
351
|
+
mlrun-1.7.0rc51.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|