mlrun 1.7.0rc56__py3-none-any.whl → 1.7.1rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/common/formatters/artifact.py +1 -0
- mlrun/common/formatters/feature_set.py +12 -1
- mlrun/config.py +54 -3
- mlrun/datastore/__init__.py +2 -2
- mlrun/db/httpdb.py +3 -1
- mlrun/features.py +2 -1
- mlrun/model_monitoring/applications/_application_steps.py +12 -10
- mlrun/model_monitoring/controller.py +18 -13
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +1 -1
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +27 -6
- mlrun/platforms/iguazio.py +46 -26
- mlrun/projects/__init__.py +6 -1
- mlrun/projects/pipelines.py +182 -55
- mlrun/projects/project.py +15 -0
- mlrun/runtimes/nuclio/serving.py +1 -1
- mlrun/serving/routers.py +10 -1
- mlrun/serving/states.py +4 -2
- mlrun/serving/v2_serving.py +59 -23
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/METADATA +3 -3
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/RECORD +25 -25
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/WHEEL +1 -1
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc56.dist-info → mlrun-1.7.1rc3.dist-info}/top_level.txt +0 -0
|
@@ -28,6 +28,17 @@ class FeatureSetFormat(ObjectFormat, mlrun.common.types.StrEnum):
|
|
|
28
28
|
return {
|
|
29
29
|
FeatureSetFormat.full: None,
|
|
30
30
|
FeatureSetFormat.minimal: FeatureSetFormat.filter_obj_method(
|
|
31
|
-
[
|
|
31
|
+
[
|
|
32
|
+
"metadata.name",
|
|
33
|
+
"metadata.project",
|
|
34
|
+
"metadata.tag",
|
|
35
|
+
"metadata.uid",
|
|
36
|
+
"metadata.labels",
|
|
37
|
+
"spec.entities",
|
|
38
|
+
"spec.description",
|
|
39
|
+
"spec.targets",
|
|
40
|
+
"spec.engine", # It's not needed by the UI, but we override it anyway to storey if empty
|
|
41
|
+
"status.state",
|
|
42
|
+
]
|
|
32
43
|
),
|
|
33
44
|
}[_format]
|
mlrun/config.py
CHANGED
|
@@ -49,6 +49,7 @@ _load_lock = Lock()
|
|
|
49
49
|
_none_type = type(None)
|
|
50
50
|
default_env_file = os.getenv("MLRUN_DEFAULT_ENV_FILE", "~/.mlrun.env")
|
|
51
51
|
|
|
52
|
+
|
|
52
53
|
default_config = {
|
|
53
54
|
"namespace": "", # default kubernetes namespace
|
|
54
55
|
"kubernetes": {
|
|
@@ -532,8 +533,55 @@ default_config = {
|
|
|
532
533
|
},
|
|
533
534
|
},
|
|
534
535
|
"model_endpoint_monitoring": {
|
|
535
|
-
"
|
|
536
|
-
|
|
536
|
+
"serving_stream": {
|
|
537
|
+
"v3io": {
|
|
538
|
+
"shard_count": 2,
|
|
539
|
+
"retention_period_hours": 24,
|
|
540
|
+
"num_workers": 1,
|
|
541
|
+
"min_replicas": 2,
|
|
542
|
+
"max_replicas": 2,
|
|
543
|
+
},
|
|
544
|
+
"kafka": {
|
|
545
|
+
"partition_count": 8,
|
|
546
|
+
"replication_factor": 1,
|
|
547
|
+
"num_workers": 2,
|
|
548
|
+
"min_replicas": 1,
|
|
549
|
+
"max_replicas": 4,
|
|
550
|
+
},
|
|
551
|
+
},
|
|
552
|
+
"application_stream_args": {
|
|
553
|
+
"v3io": {
|
|
554
|
+
"shard_count": 1,
|
|
555
|
+
"retention_period_hours": 24,
|
|
556
|
+
"num_workers": 1,
|
|
557
|
+
"min_replicas": 1,
|
|
558
|
+
"max_replicas": 1,
|
|
559
|
+
},
|
|
560
|
+
"kafka": {
|
|
561
|
+
"partition_count": 1,
|
|
562
|
+
"replication_factor": 1,
|
|
563
|
+
"num_workers": 1,
|
|
564
|
+
"min_replicas": 1,
|
|
565
|
+
"max_replicas": 1,
|
|
566
|
+
},
|
|
567
|
+
},
|
|
568
|
+
"writer_stream_args": {
|
|
569
|
+
"v3io": {
|
|
570
|
+
"shard_count": 1,
|
|
571
|
+
"retention_period_hours": 24,
|
|
572
|
+
"num_workers": 1,
|
|
573
|
+
"min_replicas": 1,
|
|
574
|
+
"max_replicas": 1,
|
|
575
|
+
},
|
|
576
|
+
"kafka": {
|
|
577
|
+
"partition_count": 1,
|
|
578
|
+
# TODO: add retention period configuration
|
|
579
|
+
"replication_factor": 1,
|
|
580
|
+
"num_workers": 1,
|
|
581
|
+
"min_replicas": 1,
|
|
582
|
+
"max_replicas": 1,
|
|
583
|
+
},
|
|
584
|
+
},
|
|
537
585
|
# Store prefixes are used to handle model monitoring storing policies based on project and kind, such as events,
|
|
538
586
|
# stream, and endpoints.
|
|
539
587
|
"store_prefixes": {
|
|
@@ -556,6 +604,10 @@ default_config = {
|
|
|
556
604
|
"tsdb_connection": "",
|
|
557
605
|
# See mlrun.common.schemas.model_monitoring.constants.StreamKind for available options
|
|
558
606
|
"stream_connection": "",
|
|
607
|
+
"tdengine": {
|
|
608
|
+
"timeout": 10,
|
|
609
|
+
"retries": 1,
|
|
610
|
+
},
|
|
559
611
|
},
|
|
560
612
|
"secret_stores": {
|
|
561
613
|
# Use only in testing scenarios (such as integration tests) to avoid using k8s for secrets (will use in-memory
|
|
@@ -746,7 +798,6 @@ default_config = {
|
|
|
746
798
|
"request_timeout": 5,
|
|
747
799
|
},
|
|
748
800
|
}
|
|
749
|
-
|
|
750
801
|
_is_running_as_api = None
|
|
751
802
|
|
|
752
803
|
|
mlrun/datastore/__init__.py
CHANGED
|
@@ -131,9 +131,9 @@ class _DummyStream:
|
|
|
131
131
|
def __init__(self, event_list=None, **kwargs):
|
|
132
132
|
self.event_list = event_list or []
|
|
133
133
|
|
|
134
|
-
def push(self, data):
|
|
134
|
+
def push(self, data, **kwargs):
|
|
135
135
|
if not isinstance(data, list):
|
|
136
136
|
data = [data]
|
|
137
137
|
for item in data:
|
|
138
|
-
logger.info(f"dummy stream got event: {item}")
|
|
138
|
+
logger.info(f"dummy stream got event: {item}, kwargs={kwargs}")
|
|
139
139
|
self.event_list.append(item)
|
mlrun/db/httpdb.py
CHANGED
|
@@ -1075,7 +1075,9 @@ class HTTPRunDB(RunDBInterface):
|
|
|
1075
1075
|
category: Union[str, mlrun.common.schemas.ArtifactCategories] = None,
|
|
1076
1076
|
tree: str = None,
|
|
1077
1077
|
producer_uri: str = None,
|
|
1078
|
-
format_:
|
|
1078
|
+
format_: Optional[
|
|
1079
|
+
mlrun.common.formatters.ArtifactFormat
|
|
1080
|
+
] = mlrun.common.formatters.ArtifactFormat.full,
|
|
1079
1081
|
limit: int = None,
|
|
1080
1082
|
) -> ArtifactList:
|
|
1081
1083
|
"""List artifacts filtered by various parameters.
|
mlrun/features.py
CHANGED
|
@@ -100,7 +100,8 @@ class Feature(ModelObj):
|
|
|
100
100
|
:param name: name of the feature
|
|
101
101
|
:param validator: feature validation policy
|
|
102
102
|
:param default: default value
|
|
103
|
-
:param labels: a set of key/value labels (tags)
|
|
103
|
+
:param labels: a set of key/value labels (tags). Labels can be used to filter featues, for example,
|
|
104
|
+
in the UI Feature store page.
|
|
104
105
|
"""
|
|
105
106
|
self.name = name or ""
|
|
106
107
|
if isinstance(value_type, ValueType):
|
|
@@ -162,10 +162,17 @@ class _ApplicationErrorHandler(StepToDict):
|
|
|
162
162
|
:param event: Application event.
|
|
163
163
|
"""
|
|
164
164
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
165
|
+
error_data = {
|
|
166
|
+
"Endpoint ID": event.body.endpoint_id,
|
|
167
|
+
"Application Class": event.body.application_name,
|
|
168
|
+
"Error": "".join(
|
|
169
|
+
traceback.format_exception(None, event.error, event.error.__traceback__)
|
|
170
|
+
),
|
|
171
|
+
"Timestamp": event.timestamp,
|
|
172
|
+
}
|
|
173
|
+
logger.error("Error in application step", **error_data)
|
|
174
|
+
|
|
175
|
+
error_data["Error"] = event.error
|
|
169
176
|
|
|
170
177
|
event_data = alert_objects.Event(
|
|
171
178
|
kind=alert_objects.EventKind.MM_APP_FAILED,
|
|
@@ -174,12 +181,7 @@ class _ApplicationErrorHandler(StepToDict):
|
|
|
174
181
|
project=self.project,
|
|
175
182
|
ids=[f"{self.project}_{event.body.application_name}"],
|
|
176
183
|
),
|
|
177
|
-
value_dict=
|
|
178
|
-
"Error": event.error,
|
|
179
|
-
"Timestamp": event.timestamp,
|
|
180
|
-
"Application Class": event.body.application_name,
|
|
181
|
-
"Endpoint ID": event.body.endpoint_id,
|
|
182
|
-
},
|
|
184
|
+
value_dict=error_data,
|
|
183
185
|
)
|
|
184
186
|
|
|
185
187
|
mlrun.get_run_db().generate_event(
|
|
@@ -11,7 +11,6 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
|
|
15
14
|
import concurrent.futures
|
|
16
15
|
import datetime
|
|
17
16
|
import json
|
|
@@ -25,7 +24,9 @@ import nuclio
|
|
|
25
24
|
import mlrun
|
|
26
25
|
import mlrun.common.schemas.model_monitoring.constants as mm_constants
|
|
27
26
|
import mlrun.data_types.infer
|
|
27
|
+
import mlrun.feature_store as fstore
|
|
28
28
|
import mlrun.model_monitoring.db.stores
|
|
29
|
+
from mlrun.config import config as mlconf
|
|
29
30
|
from mlrun.datastore import get_stream_pusher
|
|
30
31
|
from mlrun.errors import err_to_str
|
|
31
32
|
from mlrun.model_monitoring.helpers import (
|
|
@@ -286,9 +287,9 @@ class MonitoringApplicationController:
|
|
|
286
287
|
)
|
|
287
288
|
|
|
288
289
|
self.model_monitoring_access_key = self._get_model_monitoring_access_key()
|
|
289
|
-
self.
|
|
290
|
-
|
|
291
|
-
|
|
290
|
+
self.storage_options = None
|
|
291
|
+
if mlconf.artifact_path.startswith("s3://"):
|
|
292
|
+
self.storage_options = mlrun.mlconf.get_s3_storage_options()
|
|
292
293
|
|
|
293
294
|
@staticmethod
|
|
294
295
|
def _get_model_monitoring_access_key() -> Optional[str]:
|
|
@@ -375,7 +376,7 @@ class MonitoringApplicationController:
|
|
|
375
376
|
batch_window_generator=self._batch_window_generator,
|
|
376
377
|
project=self.project,
|
|
377
378
|
model_monitoring_access_key=self.model_monitoring_access_key,
|
|
378
|
-
|
|
379
|
+
storage_options=self.storage_options,
|
|
379
380
|
)
|
|
380
381
|
|
|
381
382
|
@classmethod
|
|
@@ -386,7 +387,7 @@ class MonitoringApplicationController:
|
|
|
386
387
|
batch_window_generator: _BatchWindowGenerator,
|
|
387
388
|
project: str,
|
|
388
389
|
model_monitoring_access_key: str,
|
|
389
|
-
|
|
390
|
+
storage_options: Optional[dict] = None,
|
|
390
391
|
) -> None:
|
|
391
392
|
"""
|
|
392
393
|
Process a model endpoint and trigger the monitoring applications. This function running on different process
|
|
@@ -398,11 +399,13 @@ class MonitoringApplicationController:
|
|
|
398
399
|
:param batch_window_generator: (_BatchWindowGenerator) An object that generates _BatchWindow objects.
|
|
399
400
|
:param project: (str) Project name.
|
|
400
401
|
:param model_monitoring_access_key: (str) Access key to apply the model monitoring process.
|
|
401
|
-
:param
|
|
402
|
+
:param storage_options: (dict) Storage options for reading the infer parquet files.
|
|
402
403
|
"""
|
|
403
404
|
endpoint_id = endpoint[mm_constants.EventFieldType.UID]
|
|
404
|
-
# if false the endpoint represent batch infer step.
|
|
405
405
|
has_stream = endpoint[mm_constants.EventFieldType.STREAM_PATH] != ""
|
|
406
|
+
m_fs = fstore.get_feature_set(
|
|
407
|
+
endpoint[mm_constants.EventFieldType.FEATURE_SET_URI]
|
|
408
|
+
)
|
|
406
409
|
try:
|
|
407
410
|
for application in applications_names:
|
|
408
411
|
batch_window = batch_window_generator.get_batch_window(
|
|
@@ -415,12 +418,13 @@ class MonitoringApplicationController:
|
|
|
415
418
|
)
|
|
416
419
|
|
|
417
420
|
for start_infer_time, end_infer_time in batch_window.get_intervals():
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
421
|
+
df = m_fs.to_dataframe(
|
|
422
|
+
start_time=start_infer_time,
|
|
423
|
+
end_time=end_infer_time,
|
|
424
|
+
time_column=mm_constants.EventFieldType.TIMESTAMP,
|
|
425
|
+
storage_options=storage_options,
|
|
422
426
|
)
|
|
423
|
-
if
|
|
427
|
+
if len(df) == 0:
|
|
424
428
|
logger.info(
|
|
425
429
|
"No data found for the given interval",
|
|
426
430
|
start=start_infer_time,
|
|
@@ -442,6 +446,7 @@ class MonitoringApplicationController:
|
|
|
442
446
|
applications_names=[application],
|
|
443
447
|
model_monitoring_access_key=model_monitoring_access_key,
|
|
444
448
|
)
|
|
449
|
+
|
|
445
450
|
except Exception:
|
|
446
451
|
logger.exception(
|
|
447
452
|
"Encountered an exception",
|
|
@@ -159,7 +159,7 @@ class TDEngineSchema:
|
|
|
159
159
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
160
160
|
f"values must contain at least one tag: {self.tags.keys()}"
|
|
161
161
|
)
|
|
162
|
-
return f"SELECT tbname FROM {self.database}.{self.super_table} WHERE {values};"
|
|
162
|
+
return f"SELECT DISTINCT tbname FROM {self.database}.{self.super_table} WHERE {values};"
|
|
163
163
|
|
|
164
164
|
@staticmethod
|
|
165
165
|
def _get_records_query(
|
|
@@ -56,6 +56,9 @@ class TDEngineConnector(TSDBConnector):
|
|
|
56
56
|
self._connection = None
|
|
57
57
|
self._init_super_tables()
|
|
58
58
|
|
|
59
|
+
self._timeout = mlrun.mlconf.model_endpoint_monitoring.tdengine.timeout
|
|
60
|
+
self._retries = mlrun.mlconf.model_endpoint_monitoring.tdengine.retries
|
|
61
|
+
|
|
59
62
|
@property
|
|
60
63
|
def connection(self) -> TDEngineConnection:
|
|
61
64
|
if not self._connection:
|
|
@@ -66,7 +69,11 @@ class TDEngineConnector(TSDBConnector):
|
|
|
66
69
|
"""Establish a connection to the TSDB server."""
|
|
67
70
|
logger.debug("Creating a new connection to TDEngine", project=self.project)
|
|
68
71
|
conn = TDEngineConnection(self._tdengine_connection_string)
|
|
69
|
-
conn.run(
|
|
72
|
+
conn.run(
|
|
73
|
+
statements=f"CREATE DATABASE IF NOT EXISTS {self.database}",
|
|
74
|
+
timeout=self._timeout,
|
|
75
|
+
retries=self._retries,
|
|
76
|
+
)
|
|
70
77
|
conn.prefix_statements = [f"USE {self.database}"]
|
|
71
78
|
logger.debug("Connected to TDEngine", project=self.project)
|
|
72
79
|
return conn
|
|
@@ -89,7 +96,11 @@ class TDEngineConnector(TSDBConnector):
|
|
|
89
96
|
"""Create TDEngine supertables."""
|
|
90
97
|
for table in self.tables:
|
|
91
98
|
create_table_query = self.tables[table]._create_super_table_query()
|
|
92
|
-
self.connection.run(
|
|
99
|
+
self.connection.run(
|
|
100
|
+
statements=create_table_query,
|
|
101
|
+
timeout=self._timeout,
|
|
102
|
+
retries=self._retries,
|
|
103
|
+
)
|
|
93
104
|
|
|
94
105
|
def write_application_event(
|
|
95
106
|
self,
|
|
@@ -145,7 +156,9 @@ class TDEngineConnector(TSDBConnector):
|
|
|
145
156
|
statements=[
|
|
146
157
|
create_table_sql,
|
|
147
158
|
insert_statement,
|
|
148
|
-
]
|
|
159
|
+
],
|
|
160
|
+
timeout=self._timeout,
|
|
161
|
+
retries=self._retries,
|
|
149
162
|
)
|
|
150
163
|
|
|
151
164
|
@staticmethod
|
|
@@ -211,13 +224,19 @@ class TDEngineConnector(TSDBConnector):
|
|
|
211
224
|
get_subtable_names_query = self.tables[table]._get_subtables_query(
|
|
212
225
|
values={mm_schemas.EventFieldType.PROJECT: self.project}
|
|
213
226
|
)
|
|
214
|
-
subtables = self.connection.run(
|
|
227
|
+
subtables = self.connection.run(
|
|
228
|
+
query=get_subtable_names_query,
|
|
229
|
+
timeout=self._timeout,
|
|
230
|
+
retries=self._retries,
|
|
231
|
+
).data
|
|
215
232
|
drop_statements = []
|
|
216
233
|
for subtable in subtables:
|
|
217
234
|
drop_statements.append(
|
|
218
235
|
self.tables[table]._drop_subtable_query(subtable=subtable[0])
|
|
219
236
|
)
|
|
220
|
-
self.connection.run(
|
|
237
|
+
self.connection.run(
|
|
238
|
+
statements=drop_statements, timeout=self._timeout, retries=self._retries
|
|
239
|
+
)
|
|
221
240
|
logger.debug(
|
|
222
241
|
"Deleted all project resources using the TDEngine connector",
|
|
223
242
|
project=self.project,
|
|
@@ -291,7 +310,9 @@ class TDEngineConnector(TSDBConnector):
|
|
|
291
310
|
)
|
|
292
311
|
logger.debug("Querying TDEngine", query=full_query)
|
|
293
312
|
try:
|
|
294
|
-
query_result = self.connection.run(
|
|
313
|
+
query_result = self.connection.run(
|
|
314
|
+
query=full_query, timeout=self._timeout, retries=self._retries
|
|
315
|
+
)
|
|
295
316
|
except taosws.QueryError as e:
|
|
296
317
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
297
318
|
f"Failed to query table {table} in database {self.database}, {str(e)}"
|
mlrun/platforms/iguazio.py
CHANGED
|
@@ -97,34 +97,43 @@ class OutputStream:
|
|
|
97
97
|
|
|
98
98
|
self._v3io_client = v3io.dataplane.Client(**v3io_client_kwargs)
|
|
99
99
|
self._container, self._stream_path = split_path(stream_path)
|
|
100
|
+
self._shards = shards
|
|
101
|
+
self._retention_in_hours = retention_in_hours
|
|
102
|
+
self._create = create
|
|
103
|
+
self._endpoint = endpoint
|
|
100
104
|
self._mock = mock
|
|
101
105
|
self._mock_queue = []
|
|
102
106
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
107
|
+
def create_stream(self):
|
|
108
|
+
# this import creates an import loop via the utils module, so putting it in execution path
|
|
109
|
+
from mlrun.utils.helpers import logger
|
|
110
|
+
|
|
111
|
+
logger.debug(
|
|
112
|
+
"Creating output stream",
|
|
113
|
+
endpoint=self._endpoint,
|
|
114
|
+
container=self._container,
|
|
115
|
+
stream_path=self._stream_path,
|
|
116
|
+
shards=self._shards,
|
|
117
|
+
retention_in_hours=self._retention_in_hours,
|
|
118
|
+
)
|
|
119
|
+
response = self._v3io_client.stream.create(
|
|
120
|
+
container=self._container,
|
|
121
|
+
stream_path=self._stream_path,
|
|
122
|
+
shard_count=self._shards or 1,
|
|
123
|
+
retention_period_hours=self._retention_in_hours or 24,
|
|
124
|
+
raise_for_status=v3io.dataplane.RaiseForStatus.never,
|
|
125
|
+
)
|
|
126
|
+
if not (response.status_code == 400 and "ResourceInUse" in str(response.body)):
|
|
127
|
+
response.raise_for_status([409, 204])
|
|
106
128
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
)
|
|
115
|
-
response = self._v3io_client.stream.create(
|
|
116
|
-
container=self._container,
|
|
117
|
-
stream_path=self._stream_path,
|
|
118
|
-
shard_count=shards or 1,
|
|
119
|
-
retention_period_hours=retention_in_hours or 24,
|
|
120
|
-
raise_for_status=v3io.dataplane.RaiseForStatus.never,
|
|
121
|
-
)
|
|
122
|
-
if not (
|
|
123
|
-
response.status_code == 400 and "ResourceInUse" in str(response.body)
|
|
124
|
-
):
|
|
125
|
-
response.raise_for_status([409, 204])
|
|
129
|
+
def _lazy_init(self):
|
|
130
|
+
if self._create and not self._mock:
|
|
131
|
+
self._create = False
|
|
132
|
+
self.create_stream()
|
|
133
|
+
|
|
134
|
+
def push(self, data, partition_key=None):
|
|
135
|
+
self._lazy_init()
|
|
126
136
|
|
|
127
|
-
def push(self, data):
|
|
128
137
|
def dump_record(rec):
|
|
129
138
|
if not isinstance(rec, (str, bytes)):
|
|
130
139
|
return dict_to_json(rec)
|
|
@@ -132,7 +141,14 @@ class OutputStream:
|
|
|
132
141
|
|
|
133
142
|
if not isinstance(data, list):
|
|
134
143
|
data = [data]
|
|
135
|
-
|
|
144
|
+
|
|
145
|
+
records = []
|
|
146
|
+
for rec in data:
|
|
147
|
+
record = {"data": dump_record(rec)}
|
|
148
|
+
if partition_key is not None:
|
|
149
|
+
record["partition_key"] = partition_key
|
|
150
|
+
records.append(record)
|
|
151
|
+
|
|
136
152
|
if self._mock:
|
|
137
153
|
# for mock testing
|
|
138
154
|
self._mock_queue.extend(records)
|
|
@@ -205,7 +221,7 @@ class KafkaOutputStream:
|
|
|
205
221
|
|
|
206
222
|
self._initialized = True
|
|
207
223
|
|
|
208
|
-
def push(self, data):
|
|
224
|
+
def push(self, data, partition_key=None):
|
|
209
225
|
self._lazy_init()
|
|
210
226
|
|
|
211
227
|
def dump_record(rec):
|
|
@@ -226,7 +242,11 @@ class KafkaOutputStream:
|
|
|
226
242
|
else:
|
|
227
243
|
for record in data:
|
|
228
244
|
serialized_record = dump_record(record)
|
|
229
|
-
|
|
245
|
+
if isinstance(partition_key, str):
|
|
246
|
+
partition_key = partition_key.encode("UTF-8")
|
|
247
|
+
self._kafka_producer.send(
|
|
248
|
+
self._topic, serialized_record, key=partition_key
|
|
249
|
+
)
|
|
230
250
|
|
|
231
251
|
|
|
232
252
|
class V3ioStreamClient:
|
mlrun/projects/__init__.py
CHANGED
|
@@ -27,7 +27,12 @@ __all__ = [
|
|
|
27
27
|
]
|
|
28
28
|
|
|
29
29
|
from .operations import build_function, deploy_function, run_function # noqa
|
|
30
|
-
from .pipelines import
|
|
30
|
+
from .pipelines import (
|
|
31
|
+
import_remote_project,
|
|
32
|
+
load_and_run_workflow,
|
|
33
|
+
load_and_run,
|
|
34
|
+
pipeline_context,
|
|
35
|
+
) # noqa
|
|
31
36
|
from .project import (
|
|
32
37
|
MlrunProject,
|
|
33
38
|
ProjectMetadata,
|
mlrun/projects/pipelines.py
CHANGED
|
@@ -984,14 +984,23 @@ def github_webhook(request):
|
|
|
984
984
|
return {"msg": "pushed"}
|
|
985
985
|
|
|
986
986
|
|
|
987
|
-
def load_and_run(
|
|
987
|
+
def load_and_run(*args, **kwargs):
|
|
988
|
+
"""
|
|
989
|
+
This function serves as an alias to `load_and_run_workflow`,
|
|
990
|
+
allowing to continue using `load_and_run` without modifying existing workflows or exported runs.
|
|
991
|
+
This approach ensures backward compatibility,
|
|
992
|
+
while directing all new calls to the updated `load_and_run_workflow` function.
|
|
993
|
+
"""
|
|
994
|
+
load_and_run_workflow(kwargs.pop("load_only", None))
|
|
995
|
+
|
|
996
|
+
|
|
997
|
+
def load_and_run_workflow(
|
|
988
998
|
context: mlrun.execution.MLClientCtx,
|
|
989
999
|
url: str = None,
|
|
990
1000
|
project_name: str = "",
|
|
991
1001
|
init_git: bool = None,
|
|
992
1002
|
subpath: str = None,
|
|
993
1003
|
clone: bool = False,
|
|
994
|
-
save: bool = True,
|
|
995
1004
|
workflow_name: str = None,
|
|
996
1005
|
workflow_path: str = None,
|
|
997
1006
|
workflow_arguments: dict[str, typing.Any] = None,
|
|
@@ -1004,14 +1013,12 @@ def load_and_run(
|
|
|
1004
1013
|
local: bool = None,
|
|
1005
1014
|
schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None,
|
|
1006
1015
|
cleanup_ttl: int = None,
|
|
1007
|
-
load_only: bool = False,
|
|
1008
1016
|
wait_for_completion: bool = False,
|
|
1009
1017
|
project_context: str = None,
|
|
1010
1018
|
):
|
|
1011
1019
|
"""
|
|
1012
1020
|
Auxiliary function that the RemoteRunner run once or run every schedule.
|
|
1013
1021
|
This function loads a project from a given remote source and then runs the workflow.
|
|
1014
|
-
|
|
1015
1022
|
:param context: mlrun context.
|
|
1016
1023
|
:param url: remote url that represents the project's source.
|
|
1017
1024
|
See 'mlrun.load_project()' for details
|
|
@@ -1019,7 +1026,6 @@ def load_and_run(
|
|
|
1019
1026
|
:param init_git: if True, will git init the context dir
|
|
1020
1027
|
:param subpath: project subpath (within the archive)
|
|
1021
1028
|
:param clone: if True, always clone (delete any existing content)
|
|
1022
|
-
:param save: whether to save the created project and artifact in the DB
|
|
1023
1029
|
:param workflow_name: name of the workflow
|
|
1024
1030
|
:param workflow_path: url to a workflow file, if not a project workflow
|
|
1025
1031
|
:param workflow_arguments: kubeflow pipelines arguments (parameters)
|
|
@@ -1035,48 +1041,31 @@ def load_and_run(
|
|
|
1035
1041
|
:param schedule: ScheduleCronTrigger class instance or a standard crontab expression string
|
|
1036
1042
|
:param cleanup_ttl: pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the
|
|
1037
1043
|
workflow and all its resources are deleted)
|
|
1038
|
-
:param load_only: for just loading the project, inner use.
|
|
1039
1044
|
:param wait_for_completion: wait for workflow completion before returning
|
|
1040
1045
|
:param project_context: project context path (used for loading the project)
|
|
1041
1046
|
"""
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
["slack"]
|
|
1057
|
-
)
|
|
1058
|
-
url = get_ui_url(project_name, context.uid)
|
|
1059
|
-
link = f"<{url}|*view workflow job details*>"
|
|
1060
|
-
message = (
|
|
1061
|
-
f":x: Failed to run scheduled workflow {workflow_name} in Project {project_name} !\n"
|
|
1062
|
-
f"error: ```{error}```\n{link}"
|
|
1063
|
-
)
|
|
1064
|
-
# Sending Slack Notification without losing the original error:
|
|
1065
|
-
try:
|
|
1066
|
-
notification_pusher.push(
|
|
1067
|
-
message=message,
|
|
1068
|
-
severity=mlrun.common.schemas.NotificationSeverity.ERROR,
|
|
1069
|
-
)
|
|
1070
|
-
|
|
1071
|
-
except Exception as exc:
|
|
1072
|
-
logger.error("Failed to send slack notification", exc=err_to_str(exc))
|
|
1073
|
-
|
|
1074
|
-
raise error
|
|
1075
|
-
|
|
1076
|
-
context.logger.info(f"Loaded project {project.name} successfully")
|
|
1047
|
+
project_context = project_context or f"./{project_name}"
|
|
1048
|
+
|
|
1049
|
+
# Load the project to fetch files which the runner needs, such as remote source files
|
|
1050
|
+
pull_remote_project_files(
|
|
1051
|
+
context=context,
|
|
1052
|
+
project_context=project_context,
|
|
1053
|
+
url=url,
|
|
1054
|
+
project_name=project_name,
|
|
1055
|
+
init_git=init_git,
|
|
1056
|
+
subpath=subpath,
|
|
1057
|
+
clone=clone,
|
|
1058
|
+
schedule=schedule,
|
|
1059
|
+
workflow_name=workflow_name,
|
|
1060
|
+
)
|
|
1077
1061
|
|
|
1078
|
-
|
|
1079
|
-
|
|
1062
|
+
# Retrieve the project object:
|
|
1063
|
+
# - If the project exists in the MLRun database, it will be loaded from there.
|
|
1064
|
+
# - If it doesn't exist in the database, it will be created from the previously loaded local directory.
|
|
1065
|
+
project = mlrun.get_or_create_project(
|
|
1066
|
+
context=project_context or f"./{project_name}",
|
|
1067
|
+
name=project_name,
|
|
1068
|
+
)
|
|
1080
1069
|
|
|
1081
1070
|
# extract "start" notification if exists
|
|
1082
1071
|
start_notifications = [
|
|
@@ -1109,18 +1098,156 @@ def load_and_run(
|
|
|
1109
1098
|
raise RuntimeError(f"Workflow {workflow_log_message} failed") from run.exc
|
|
1110
1099
|
|
|
1111
1100
|
if wait_for_completion:
|
|
1101
|
+
handle_workflow_completion(
|
|
1102
|
+
run=run,
|
|
1103
|
+
project=project,
|
|
1104
|
+
context=context,
|
|
1105
|
+
workflow_log_message=workflow_log_message,
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
|
|
1109
|
+
def pull_remote_project_files(
|
|
1110
|
+
context: mlrun.execution.MLClientCtx,
|
|
1111
|
+
project_context: str,
|
|
1112
|
+
url: str,
|
|
1113
|
+
project_name: str,
|
|
1114
|
+
init_git: typing.Optional[bool],
|
|
1115
|
+
subpath: typing.Optional[str],
|
|
1116
|
+
clone: bool,
|
|
1117
|
+
schedule: typing.Optional[
|
|
1118
|
+
typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger]
|
|
1119
|
+
],
|
|
1120
|
+
workflow_name: typing.Optional[str],
|
|
1121
|
+
) -> None:
|
|
1122
|
+
"""
|
|
1123
|
+
Load the project to clone remote files if they exist.
|
|
1124
|
+
If an exception occurs during project loading, send a notification if the workflow is scheduled.
|
|
1125
|
+
|
|
1126
|
+
:param context: MLRun execution context.
|
|
1127
|
+
:param project_context: Path to the project context.
|
|
1128
|
+
:param url: URL of the project repository.
|
|
1129
|
+
:param project_name: Name of the project.
|
|
1130
|
+
:param init_git: Initialize a git repository.
|
|
1131
|
+
:param subpath: Project subpath within the repository.
|
|
1132
|
+
:param clone: Whether to clone the repository.
|
|
1133
|
+
:param schedule: Schedule for running the workflow.
|
|
1134
|
+
:param workflow_name: Name of the workflow to run.
|
|
1135
|
+
"""
|
|
1136
|
+
try:
|
|
1137
|
+
# Load the project to clone remote files if they exist.
|
|
1138
|
+
# Using save=False to avoid overriding changes from the database if it already exists.
|
|
1139
|
+
mlrun.load_project(
|
|
1140
|
+
context=project_context,
|
|
1141
|
+
url=url,
|
|
1142
|
+
name=project_name,
|
|
1143
|
+
init_git=init_git,
|
|
1144
|
+
subpath=subpath,
|
|
1145
|
+
clone=clone,
|
|
1146
|
+
save=False,
|
|
1147
|
+
)
|
|
1148
|
+
except Exception as error:
|
|
1149
|
+
notify_scheduled_workflow_failure(
|
|
1150
|
+
schedule=schedule,
|
|
1151
|
+
project_name=project_name,
|
|
1152
|
+
workflow_name=workflow_name,
|
|
1153
|
+
error=error,
|
|
1154
|
+
context_uid=context.uid,
|
|
1155
|
+
)
|
|
1156
|
+
raise error
|
|
1157
|
+
|
|
1158
|
+
|
|
1159
|
+
def notify_scheduled_workflow_failure(
|
|
1160
|
+
schedule,
|
|
1161
|
+
project_name: str,
|
|
1162
|
+
workflow_name: str,
|
|
1163
|
+
error: Exception,
|
|
1164
|
+
context_uid: str,
|
|
1165
|
+
) -> None:
|
|
1166
|
+
if schedule:
|
|
1167
|
+
notification_pusher = mlrun.utils.notifications.CustomNotificationPusher(
|
|
1168
|
+
["slack"]
|
|
1169
|
+
)
|
|
1170
|
+
url = get_ui_url(project_name, context_uid)
|
|
1171
|
+
link = f"<{url}|*view workflow job details*>"
|
|
1172
|
+
message = (
|
|
1173
|
+
f":x: Failed to run scheduled workflow {workflow_name} "
|
|
1174
|
+
f"in Project {project_name}!\n"
|
|
1175
|
+
f"Error: ```{err_to_str(error)}```\n{link}"
|
|
1176
|
+
)
|
|
1177
|
+
# Sending Slack Notification without losing the original error:
|
|
1112
1178
|
try:
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
"Failed waiting for workflow completion",
|
|
1117
|
-
workflow=workflow_log_message,
|
|
1118
|
-
exc=err_to_str(exc),
|
|
1179
|
+
notification_pusher.push(
|
|
1180
|
+
message=message,
|
|
1181
|
+
severity=mlrun.common.schemas.NotificationSeverity.ERROR,
|
|
1119
1182
|
)
|
|
1120
1183
|
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1184
|
+
except Exception as exc:
|
|
1185
|
+
logger.error("Failed to send slack notification", exc=err_to_str(exc))
|
|
1186
|
+
|
|
1187
|
+
|
|
1188
|
+
def handle_workflow_completion(
|
|
1189
|
+
run: _PipelineRunStatus,
|
|
1190
|
+
project,
|
|
1191
|
+
context: mlrun.execution.MLClientCtx,
|
|
1192
|
+
workflow_log_message: str,
|
|
1193
|
+
) -> None:
|
|
1194
|
+
"""
|
|
1195
|
+
Handle workflow completion by waiting for it to finish and logging the final state.
|
|
1196
|
+
|
|
1197
|
+
:param run: Run object containing workflow execution details.
|
|
1198
|
+
:param project: MLRun project object.
|
|
1199
|
+
:param context: MLRun execution context.
|
|
1200
|
+
:param workflow_log_message: Message used for logging.
|
|
1201
|
+
"""
|
|
1202
|
+
try:
|
|
1203
|
+
run.wait_for_completion()
|
|
1204
|
+
except Exception as exc:
|
|
1205
|
+
mlrun.utils.logger.error(
|
|
1206
|
+
"Failed waiting for workflow completion",
|
|
1207
|
+
workflow=workflow_log_message,
|
|
1208
|
+
exc=err_to_str(exc),
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
pipeline_state, _, _ = project.get_run_status(run)
|
|
1212
|
+
context.log_result(key="workflow_state", value=pipeline_state, commit=True)
|
|
1213
|
+
if pipeline_state != mlrun_pipelines.common.models.RunStatuses.succeeded:
|
|
1214
|
+
raise RuntimeError(
|
|
1215
|
+
f"Workflow {workflow_log_message} failed, state={pipeline_state}"
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
|
|
1219
|
+
def import_remote_project(
|
|
1220
|
+
context: mlrun.execution.MLClientCtx,
|
|
1221
|
+
url: str = None,
|
|
1222
|
+
project_name: str = "",
|
|
1223
|
+
init_git: bool = None,
|
|
1224
|
+
subpath: str = None,
|
|
1225
|
+
clone: bool = False,
|
|
1226
|
+
save: bool = True,
|
|
1227
|
+
project_context: str = None,
|
|
1228
|
+
):
|
|
1229
|
+
"""
|
|
1230
|
+
This function loads a project from a given remote source.
|
|
1231
|
+
|
|
1232
|
+
:param context: mlrun context.
|
|
1233
|
+
:param url: remote url that represents the project's source.
|
|
1234
|
+
See 'mlrun.load_project()' for details
|
|
1235
|
+
:param project_name: project name
|
|
1236
|
+
:param init_git: if True, will git init the context dir
|
|
1237
|
+
:param subpath: project subpath (within the archive)
|
|
1238
|
+
:param clone: if True, always clone (delete any existing content)
|
|
1239
|
+
:param save: whether to save the created project and artifact in the DB
|
|
1240
|
+
:param project_context: project context path (used for loading the project)
|
|
1241
|
+
"""
|
|
1242
|
+
project = mlrun.load_project(
|
|
1243
|
+
context=project_context or f"./{project_name}",
|
|
1244
|
+
url=url,
|
|
1245
|
+
name=project_name,
|
|
1246
|
+
init_git=init_git,
|
|
1247
|
+
subpath=subpath,
|
|
1248
|
+
clone=clone,
|
|
1249
|
+
save=save,
|
|
1250
|
+
sync_functions=True,
|
|
1251
|
+
)
|
|
1252
|
+
|
|
1253
|
+
context.logger.info(f"Loaded project {project.name} successfully")
|
mlrun/projects/project.py
CHANGED
|
@@ -40,6 +40,7 @@ import requests
|
|
|
40
40
|
import yaml
|
|
41
41
|
from mlrun_pipelines.models import PipelineNodeWrapper
|
|
42
42
|
|
|
43
|
+
import mlrun.common.formatters
|
|
43
44
|
import mlrun.common.helpers
|
|
44
45
|
import mlrun.common.runtimes.constants
|
|
45
46
|
import mlrun.common.schemas.artifact
|
|
@@ -47,6 +48,7 @@ import mlrun.common.schemas.model_monitoring.constants as mm_constants
|
|
|
47
48
|
import mlrun.db
|
|
48
49
|
import mlrun.errors
|
|
49
50
|
import mlrun.k8s_utils
|
|
51
|
+
import mlrun.lists
|
|
50
52
|
import mlrun.model_monitoring.applications as mm_app
|
|
51
53
|
import mlrun.runtimes
|
|
52
54
|
import mlrun.runtimes.nuclio.api_gateway
|
|
@@ -3799,6 +3801,9 @@ class MlrunProject(ModelObj):
|
|
|
3799
3801
|
category: typing.Union[str, mlrun.common.schemas.ArtifactCategories] = None,
|
|
3800
3802
|
tree: str = None,
|
|
3801
3803
|
limit: int = None,
|
|
3804
|
+
format_: Optional[
|
|
3805
|
+
mlrun.common.formatters.ArtifactFormat
|
|
3806
|
+
] = mlrun.common.formatters.ArtifactFormat.full,
|
|
3802
3807
|
) -> mlrun.lists.ArtifactList:
|
|
3803
3808
|
"""List artifacts filtered by various parameters.
|
|
3804
3809
|
|
|
@@ -3829,6 +3834,7 @@ class MlrunProject(ModelObj):
|
|
|
3829
3834
|
:param category: Return artifacts of the requested category.
|
|
3830
3835
|
:param tree: Return artifacts of the requested tree.
|
|
3831
3836
|
:param limit: Maximum number of artifacts to return.
|
|
3837
|
+
:param format_: The format in which to return the artifacts. Default is 'full'.
|
|
3832
3838
|
"""
|
|
3833
3839
|
db = mlrun.db.get_run_db(secrets=self._secrets)
|
|
3834
3840
|
return db.list_artifacts(
|
|
@@ -3843,6 +3849,7 @@ class MlrunProject(ModelObj):
|
|
|
3843
3849
|
kind=kind,
|
|
3844
3850
|
category=category,
|
|
3845
3851
|
tree=tree,
|
|
3852
|
+
format_=format_,
|
|
3846
3853
|
limit=limit,
|
|
3847
3854
|
)
|
|
3848
3855
|
|
|
@@ -3856,6 +3863,10 @@ class MlrunProject(ModelObj):
|
|
|
3856
3863
|
iter: int = None,
|
|
3857
3864
|
best_iteration: bool = False,
|
|
3858
3865
|
tree: str = None,
|
|
3866
|
+
limit: int = None,
|
|
3867
|
+
format_: Optional[
|
|
3868
|
+
mlrun.common.formatters.ArtifactFormat
|
|
3869
|
+
] = mlrun.common.formatters.ArtifactFormat.full,
|
|
3859
3870
|
):
|
|
3860
3871
|
"""List models in project, filtered by various parameters.
|
|
3861
3872
|
|
|
@@ -3879,6 +3890,8 @@ class MlrunProject(ModelObj):
|
|
|
3879
3890
|
artifacts generated from a hyper-param run. If only a single iteration exists, will return the artifact
|
|
3880
3891
|
from that iteration. If using ``best_iter``, the ``iter`` parameter must not be used.
|
|
3881
3892
|
:param tree: Return artifacts of the requested tree.
|
|
3893
|
+
:param limit: Maximum number of artifacts to return.
|
|
3894
|
+
:param format_: The format in which to return the artifacts. Default is 'full'.
|
|
3882
3895
|
"""
|
|
3883
3896
|
db = mlrun.db.get_run_db(secrets=self._secrets)
|
|
3884
3897
|
return db.list_artifacts(
|
|
@@ -3892,6 +3905,8 @@ class MlrunProject(ModelObj):
|
|
|
3892
3905
|
best_iteration=best_iteration,
|
|
3893
3906
|
kind="model",
|
|
3894
3907
|
tree=tree,
|
|
3908
|
+
limit=limit,
|
|
3909
|
+
format_=format_,
|
|
3895
3910
|
).to_objects()
|
|
3896
3911
|
|
|
3897
3912
|
def list_functions(self, name=None, tag=None, labels=None):
|
mlrun/runtimes/nuclio/serving.py
CHANGED
|
@@ -607,7 +607,7 @@ class ServingRuntime(RemoteRuntime):
|
|
|
607
607
|
):
|
|
608
608
|
# initialize or create required streams/queues
|
|
609
609
|
self.spec.graph.check_and_process_graph()
|
|
610
|
-
self.spec.graph.
|
|
610
|
+
self.spec.graph.create_queue_streams()
|
|
611
611
|
functions_in_steps = self.spec.graph.list_child_functions()
|
|
612
612
|
child_functions = list(self._spec.function_refs.keys())
|
|
613
613
|
for function in functions_in_steps:
|
mlrun/serving/routers.py
CHANGED
|
@@ -491,6 +491,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
491
491
|
executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
|
|
492
492
|
format_response_with_col_name_flag: bool = False,
|
|
493
493
|
prediction_col_name: str = "prediction",
|
|
494
|
+
shard_by_endpoint: typing.Optional[bool] = None,
|
|
494
495
|
**kwargs,
|
|
495
496
|
):
|
|
496
497
|
"""Voting Ensemble
|
|
@@ -580,6 +581,8 @@ class VotingEnsemble(ParallelRun):
|
|
|
580
581
|
`{id: <id>, model_name: <name>, outputs: {..., prediction: [<predictions>], ...}}`
|
|
581
582
|
the prediction_col_name should be `prediction`.
|
|
582
583
|
by default, `prediction`
|
|
584
|
+
:param shard_by_endpoint: whether to use the endpoint as the partition/sharding key when writing to model
|
|
585
|
+
monitoring stream. Defaults to True.
|
|
583
586
|
:param kwargs: extra arguments
|
|
584
587
|
"""
|
|
585
588
|
super().__init__(
|
|
@@ -606,6 +609,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
606
609
|
self.prediction_col_name = prediction_col_name or "prediction"
|
|
607
610
|
self.format_response_with_col_name_flag = format_response_with_col_name_flag
|
|
608
611
|
self.model_endpoint_uid = None
|
|
612
|
+
self.shard_by_endpoint = shard_by_endpoint
|
|
609
613
|
|
|
610
614
|
def post_init(self, mode="sync"):
|
|
611
615
|
server = getattr(self.context, "_server", None) or getattr(
|
|
@@ -907,7 +911,12 @@ class VotingEnsemble(ParallelRun):
|
|
|
907
911
|
if self._model_logger and self.log_router:
|
|
908
912
|
if "id" not in request:
|
|
909
913
|
request["id"] = response.body["id"]
|
|
910
|
-
|
|
914
|
+
partition_key = (
|
|
915
|
+
self.model_endpoint_uid if self.shard_by_endpoint is not False else None
|
|
916
|
+
)
|
|
917
|
+
self._model_logger.push(
|
|
918
|
+
start, request, response.body, partition_key=partition_key
|
|
919
|
+
)
|
|
911
920
|
event.body = _update_result_body(
|
|
912
921
|
self._result_path, original_body, response.body if response else None
|
|
913
922
|
)
|
mlrun/serving/states.py
CHANGED
|
@@ -839,6 +839,8 @@ class QueueStep(BaseStep):
|
|
|
839
839
|
retention_in_hours=self.retention_in_hours,
|
|
840
840
|
**self.options,
|
|
841
841
|
)
|
|
842
|
+
if hasattr(self._stream, "create_stream"):
|
|
843
|
+
self._stream.create_stream()
|
|
842
844
|
self._set_error_handler()
|
|
843
845
|
|
|
844
846
|
@property
|
|
@@ -1247,8 +1249,8 @@ class FlowStep(BaseStep):
|
|
|
1247
1249
|
links[next_step.function] = step
|
|
1248
1250
|
return links
|
|
1249
1251
|
|
|
1250
|
-
def
|
|
1251
|
-
"""
|
|
1252
|
+
def create_queue_streams(self):
|
|
1253
|
+
"""create the streams used in this flow"""
|
|
1252
1254
|
for step in self.get_children():
|
|
1253
1255
|
if step.kind == StepKinds.queue:
|
|
1254
1256
|
step.init_object(self.context, None)
|
mlrun/serving/v2_serving.py
CHANGED
|
@@ -39,6 +39,7 @@ class V2ModelServer(StepToDict):
|
|
|
39
39
|
protocol=None,
|
|
40
40
|
input_path: str = None,
|
|
41
41
|
result_path: str = None,
|
|
42
|
+
shard_by_endpoint: Optional[bool] = None,
|
|
42
43
|
**kwargs,
|
|
43
44
|
):
|
|
44
45
|
"""base model serving class (v2), using similar API to KFServing v2 and Triton
|
|
@@ -91,6 +92,8 @@ class V2ModelServer(StepToDict):
|
|
|
91
92
|
this require that the event body will behave like a dict, example:
|
|
92
93
|
event: {"x": 5} , result_path="resp" means the returned response will be written
|
|
93
94
|
to event["y"] resulting in {"x": 5, "resp": <result>}
|
|
95
|
+
:param shard_by_endpoint: whether to use the endpoint as the partition/sharding key when writing to model
|
|
96
|
+
monitoring stream. Defaults to True.
|
|
94
97
|
:param kwargs: extra arguments (can be accessed using self.get_param(key))
|
|
95
98
|
"""
|
|
96
99
|
self.name = name
|
|
@@ -119,7 +122,9 @@ class V2ModelServer(StepToDict):
|
|
|
119
122
|
if model:
|
|
120
123
|
self.model = model
|
|
121
124
|
self.ready = True
|
|
125
|
+
self._versioned_model_name = None
|
|
122
126
|
self.model_endpoint_uid = None
|
|
127
|
+
self.shard_by_endpoint = shard_by_endpoint
|
|
123
128
|
|
|
124
129
|
def _load_and_update_state(self):
|
|
125
130
|
try:
|
|
@@ -225,6 +230,23 @@ class V2ModelServer(StepToDict):
|
|
|
225
230
|
request = self.preprocess(event_body, op)
|
|
226
231
|
return self.validate(request, op)
|
|
227
232
|
|
|
233
|
+
@property
|
|
234
|
+
def versioned_model_name(self):
|
|
235
|
+
if self._versioned_model_name:
|
|
236
|
+
return self._versioned_model_name
|
|
237
|
+
|
|
238
|
+
# Generating version model value based on the model name and model version
|
|
239
|
+
if self.model_path and self.model_path.startswith("store://"):
|
|
240
|
+
# Enrich the model server with the model artifact metadata
|
|
241
|
+
self.get_model()
|
|
242
|
+
if not self.version:
|
|
243
|
+
# Enrich the model version with the model artifact tag
|
|
244
|
+
self.version = self.model_spec.tag
|
|
245
|
+
self.labels = self.model_spec.labels
|
|
246
|
+
version = self.version or "latest"
|
|
247
|
+
self._versioned_model_name = f"{self.name}:{version}"
|
|
248
|
+
return self._versioned_model_name
|
|
249
|
+
|
|
228
250
|
def do_event(self, event, *args, **kwargs):
|
|
229
251
|
"""main model event handler method"""
|
|
230
252
|
start = now_date()
|
|
@@ -232,6 +254,11 @@ class V2ModelServer(StepToDict):
|
|
|
232
254
|
event_body = _extract_input_data(self._input_path, event.body)
|
|
233
255
|
event_id = event.id
|
|
234
256
|
op = event.path.strip("/")
|
|
257
|
+
|
|
258
|
+
partition_key = (
|
|
259
|
+
self.model_endpoint_uid if self.shard_by_endpoint is not False else None
|
|
260
|
+
)
|
|
261
|
+
|
|
235
262
|
if event_body and isinstance(event_body, dict):
|
|
236
263
|
op = op or event_body.get("operation")
|
|
237
264
|
event_id = event_body.get("id", event_id)
|
|
@@ -251,7 +278,13 @@ class V2ModelServer(StepToDict):
|
|
|
251
278
|
except Exception as exc:
|
|
252
279
|
request["id"] = event_id
|
|
253
280
|
if self._model_logger:
|
|
254
|
-
self._model_logger.push(
|
|
281
|
+
self._model_logger.push(
|
|
282
|
+
start,
|
|
283
|
+
request,
|
|
284
|
+
op=op,
|
|
285
|
+
error=exc,
|
|
286
|
+
partition_key=partition_key,
|
|
287
|
+
)
|
|
255
288
|
raise exc
|
|
256
289
|
|
|
257
290
|
response = {
|
|
@@ -288,7 +321,7 @@ class V2ModelServer(StepToDict):
|
|
|
288
321
|
setattr(event, "terminated", True)
|
|
289
322
|
event_body = {
|
|
290
323
|
"name": self.name,
|
|
291
|
-
"version": self.version,
|
|
324
|
+
"version": self.version or "",
|
|
292
325
|
"inputs": [],
|
|
293
326
|
"outputs": [],
|
|
294
327
|
}
|
|
@@ -308,7 +341,13 @@ class V2ModelServer(StepToDict):
|
|
|
308
341
|
except Exception as exc:
|
|
309
342
|
request["id"] = event_id
|
|
310
343
|
if self._model_logger:
|
|
311
|
-
self._model_logger.push(
|
|
344
|
+
self._model_logger.push(
|
|
345
|
+
start,
|
|
346
|
+
request,
|
|
347
|
+
op=op,
|
|
348
|
+
error=exc,
|
|
349
|
+
partition_key=partition_key,
|
|
350
|
+
)
|
|
312
351
|
raise exc
|
|
313
352
|
|
|
314
353
|
response = {
|
|
@@ -332,12 +371,20 @@ class V2ModelServer(StepToDict):
|
|
|
332
371
|
if self._model_logger:
|
|
333
372
|
inputs, outputs = self.logged_results(request, response, op)
|
|
334
373
|
if inputs is None and outputs is None:
|
|
335
|
-
self._model_logger.push(
|
|
374
|
+
self._model_logger.push(
|
|
375
|
+
start, request, response, op, partition_key=partition_key
|
|
376
|
+
)
|
|
336
377
|
else:
|
|
337
378
|
track_request = {"id": event_id, "inputs": inputs or []}
|
|
338
379
|
track_response = {"outputs": outputs or []}
|
|
339
380
|
# TODO : check dict/list
|
|
340
|
-
self._model_logger.push(
|
|
381
|
+
self._model_logger.push(
|
|
382
|
+
start,
|
|
383
|
+
track_request,
|
|
384
|
+
track_response,
|
|
385
|
+
op,
|
|
386
|
+
partition_key=partition_key,
|
|
387
|
+
)
|
|
341
388
|
event.body = _update_result_body(self._result_path, original_body, response)
|
|
342
389
|
return event
|
|
343
390
|
|
|
@@ -454,7 +501,7 @@ class _ModelLogPusher:
|
|
|
454
501
|
base_data["labels"] = self.model.labels
|
|
455
502
|
return base_data
|
|
456
503
|
|
|
457
|
-
def push(self, start, request, resp=None, op=None, error=None):
|
|
504
|
+
def push(self, start, request, resp=None, op=None, error=None, partition_key=None):
|
|
458
505
|
start_str = start.isoformat(sep=" ", timespec="microseconds")
|
|
459
506
|
if error:
|
|
460
507
|
data = self.base_data()
|
|
@@ -465,7 +512,7 @@ class _ModelLogPusher:
|
|
|
465
512
|
if self.verbose:
|
|
466
513
|
message = f"{message}\n{traceback.format_exc()}"
|
|
467
514
|
data["error"] = message
|
|
468
|
-
self.output_stream.push([data])
|
|
515
|
+
self.output_stream.push([data], partition_key=partition_key)
|
|
469
516
|
return
|
|
470
517
|
|
|
471
518
|
self._sample_iter = (self._sample_iter + 1) % self.stream_sample
|
|
@@ -491,7 +538,7 @@ class _ModelLogPusher:
|
|
|
491
538
|
"metrics",
|
|
492
539
|
]
|
|
493
540
|
data["values"] = self._batch
|
|
494
|
-
self.output_stream.push([data])
|
|
541
|
+
self.output_stream.push([data], partition_key=partition_key)
|
|
495
542
|
else:
|
|
496
543
|
data = self.base_data()
|
|
497
544
|
data["request"] = request
|
|
@@ -501,7 +548,7 @@ class _ModelLogPusher:
|
|
|
501
548
|
data["microsec"] = microsec
|
|
502
549
|
if getattr(self.model, "metrics", None):
|
|
503
550
|
data["metrics"] = self.model.metrics
|
|
504
|
-
self.output_stream.push([data])
|
|
551
|
+
self.output_stream.push([data], partition_key=partition_key)
|
|
505
552
|
|
|
506
553
|
|
|
507
554
|
def _init_endpoint_record(
|
|
@@ -531,21 +578,10 @@ def _init_endpoint_record(
|
|
|
531
578
|
logger.error("Failed to parse function URI", exc=err_to_str(e))
|
|
532
579
|
return None
|
|
533
580
|
|
|
534
|
-
# Generating version model value based on the model name and model version
|
|
535
|
-
if model.model_path and model.model_path.startswith("store://"):
|
|
536
|
-
# Enrich the model server with the model artifact metadata
|
|
537
|
-
model.get_model()
|
|
538
|
-
if not model.version:
|
|
539
|
-
# Enrich the model version with the model artifact tag
|
|
540
|
-
model.version = model.model_spec.tag
|
|
541
|
-
model.labels = model.model_spec.labels
|
|
542
|
-
versioned_model_name = f"{model.name}:{model.version}"
|
|
543
|
-
else:
|
|
544
|
-
versioned_model_name = f"{model.name}:latest"
|
|
545
|
-
|
|
546
581
|
# Generating model endpoint ID based on function uri and model version
|
|
547
582
|
uid = mlrun.common.model_monitoring.create_model_endpoint_uid(
|
|
548
|
-
function_uri=graph_server.function_uri,
|
|
583
|
+
function_uri=graph_server.function_uri,
|
|
584
|
+
versioned_model=model.versioned_model_name,
|
|
549
585
|
).uid
|
|
550
586
|
|
|
551
587
|
try:
|
|
@@ -568,7 +604,7 @@ def _init_endpoint_record(
|
|
|
568
604
|
),
|
|
569
605
|
spec=mlrun.common.schemas.ModelEndpointSpec(
|
|
570
606
|
function_uri=graph_server.function_uri,
|
|
571
|
-
model=versioned_model_name,
|
|
607
|
+
model=model.versioned_model_name,
|
|
572
608
|
model_class=model.__class__.__name__,
|
|
573
609
|
model_uri=model.model_path,
|
|
574
610
|
stream_path=model.context.stream.stream_uri,
|
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.1rc3
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -24,7 +24,7 @@ License-File: LICENSE
|
|
|
24
24
|
Requires-Dist: urllib3 <1.27,>=1.26.9
|
|
25
25
|
Requires-Dist: GitPython >=3.1.41,~=3.1
|
|
26
26
|
Requires-Dist: aiohttp ~=3.9
|
|
27
|
-
Requires-Dist: aiohttp-retry ~=2.8
|
|
27
|
+
Requires-Dist: aiohttp-retry ~=2.8.0
|
|
28
28
|
Requires-Dist: click ~=8.1
|
|
29
29
|
Requires-Dist: nest-asyncio ~=1.0
|
|
30
30
|
Requires-Dist: ipython ~=8.10
|
|
@@ -43,7 +43,7 @@ Requires-Dist: semver ~=3.0
|
|
|
43
43
|
Requires-Dist: dependency-injector ~=4.41
|
|
44
44
|
Requires-Dist: fsspec <2024.7,>=2023.9.2
|
|
45
45
|
Requires-Dist: v3iofs ~=0.1.17
|
|
46
|
-
Requires-Dist: storey ~=1.7.
|
|
46
|
+
Requires-Dist: storey ~=1.7.50
|
|
47
47
|
Requires-Dist: inflection ~=0.5.0
|
|
48
48
|
Requires-Dist: python-dotenv ~=0.17.0
|
|
49
49
|
Requires-Dist: setuptools ~=71.0
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
mlrun/__init__.py,sha256=y08M1JcKXy5-9_5WaI9fn5aV5BxIQ5QkbduJK0OxWbA,7470
|
|
2
2
|
mlrun/__main__.py,sha256=mC_Izs4kuHUHQi88QJFLN22n1kbygGM0wAirjNt7uj4,45938
|
|
3
|
-
mlrun/config.py,sha256=
|
|
3
|
+
mlrun/config.py,sha256=DbVVAW1kgjtW0Bkm5bymE2_Wxd-0iHV_hoAlRDnQZDA,68743
|
|
4
4
|
mlrun/errors.py,sha256=G8GP4_wb3v2UEbiAS8OlamC7nYJNzbSvQ3sViZlyYhk,8063
|
|
5
5
|
mlrun/execution.py,sha256=nXvvN8euzjuxhJouJD8VxfK0keTTA6UoMrcD_17AL-4,44252
|
|
6
|
-
mlrun/features.py,sha256=
|
|
6
|
+
mlrun/features.py,sha256=1VlN5mdSvUrLSJJlJWk4mXp9YoNxkFTu36IGn9AbN7s,15539
|
|
7
7
|
mlrun/k8s_utils.py,sha256=mRQMs6NzPq36vx1n5_2BfFapXysc8wv3NcrZ77_2ANA,8949
|
|
8
8
|
mlrun/lists.py,sha256=3PqBdcajdwhTe1XuFsAaHTuFVM2kjwepf31qqE82apg,8384
|
|
9
9
|
mlrun/model.py,sha256=S6CKiRrYfgVNALA9TLy4lsXZCox4FpD-TAnR5CU51cQ,82035
|
|
@@ -27,9 +27,9 @@ mlrun/common/types.py,sha256=APVFvumnHpCG-yXlt6OSioMfkyT-DADPiW3dGG3dUFQ,1057
|
|
|
27
27
|
mlrun/common/db/__init__.py,sha256=xY3wHC4TEJgez7qtnn1pQvHosi8-5UJOCtyGBS7FcGE,571
|
|
28
28
|
mlrun/common/db/sql_session.py,sha256=J6b-0xrnFb-8n_xdksPXeA8kArSMfAiSDN4n7iOhtus,2708
|
|
29
29
|
mlrun/common/formatters/__init__.py,sha256=topwMC5auQYTDBq8dwa31-5e8bWvHcLYmUqyXysXVWQ,835
|
|
30
|
-
mlrun/common/formatters/artifact.py,sha256=
|
|
30
|
+
mlrun/common/formatters/artifact.py,sha256=_XIBWSpglDXRVflyH_xO3NSi4JvMzoy0lOC207xhvqk,1419
|
|
31
31
|
mlrun/common/formatters/base.py,sha256=LHwWWnQJCmvlnOCCmG8YtJ_xzs0xBI8PujYDL5Ky9H4,4101
|
|
32
|
-
mlrun/common/formatters/feature_set.py,sha256=
|
|
32
|
+
mlrun/common/formatters/feature_set.py,sha256=2rSgnoHasvHUvh46oYCg59flCnNu3hTthPpKrvvywUE,1478
|
|
33
33
|
mlrun/common/formatters/function.py,sha256=fGa5m5aI_XvQdvrUr73dmUwrEJrE_8wM4_P4q8RgBTg,1477
|
|
34
34
|
mlrun/common/formatters/pipeline.py,sha256=hGUV_3wcTEMa-JouspbjgJ1JGKa2Wc5cXSaH2XhOdMc,1763
|
|
35
35
|
mlrun/common/formatters/project.py,sha256=rdGf7fq_CfwFwd8iKWl8sW-tqTJilK3gJtV5oLdaY-M,1756
|
|
@@ -77,7 +77,7 @@ mlrun/data_types/data_types.py,sha256=uB9qJusSvPRK2PTvrFBXrS5jcDXMuwqXokJGToDg4V
|
|
|
77
77
|
mlrun/data_types/infer.py,sha256=z2EbSpR6xWEE5-HRUtDZkapHQld3xMbzXtTX83K-690,6134
|
|
78
78
|
mlrun/data_types/spark.py,sha256=xfcr6lcaLcHepnrHavx_vacMJK7BC8FWsUKjwrjjn6w,9509
|
|
79
79
|
mlrun/data_types/to_pandas.py,sha256=-ZbJBg00x4xxyqqqu3AVbEh-HaO2--DrChyPuedRhHA,11215
|
|
80
|
-
mlrun/datastore/__init__.py,sha256=
|
|
80
|
+
mlrun/datastore/__init__.py,sha256=y2_NkHUiz9WKJ1XWeUHX-MKErwmIag6nxZ7Z06EcSk0,4180
|
|
81
81
|
mlrun/datastore/alibaba_oss.py,sha256=-RMA4vCE4rar-D57Niy3tY_6bXKHLFpMp28z5YR7-jI,4888
|
|
82
82
|
mlrun/datastore/azure_blob.py,sha256=9qkgrEMXGiuYYcc6b6HkuHlRHDbl0p7tIzeWxAAcEVs,12724
|
|
83
83
|
mlrun/datastore/base.py,sha256=2tGtl1S59SVkk3ZaIZ_Fm2UgAdHtByXUWu3cR36aAYk,26231
|
|
@@ -105,7 +105,7 @@ mlrun/db/__init__.py,sha256=WqJ4x8lqJ7ZoKbhEyFqkYADd9P6E3citckx9e9ZLcIU,1163
|
|
|
105
105
|
mlrun/db/auth_utils.py,sha256=hpg8D2r82oN0BWabuWN04BTNZ7jYMAF242YSUpK7LFM,5211
|
|
106
106
|
mlrun/db/base.py,sha256=lUfJrCWbuRUErIrUUXAKI2sSlrwfB-dHDz-Ck_cnZHU,24297
|
|
107
107
|
mlrun/db/factory.py,sha256=ibIrE5QkIIyzDU1FXKrfbc31cZiRLYKDZb8dqCpQwyU,2397
|
|
108
|
-
mlrun/db/httpdb.py,sha256=
|
|
108
|
+
mlrun/db/httpdb.py,sha256=VSk5lCrxBQydla9Cw4lYLA7W9o0Ge4WNfmmKFB4x3WM,184966
|
|
109
109
|
mlrun/db/nopdb.py,sha256=1oCZR2EmQQDkwXUgmyI3SB76zvOwA6Ml3Lk_xvuwHfc,21620
|
|
110
110
|
mlrun/feature_store/__init__.py,sha256=FhHRc8NdqL_HWpCs7A8dKruxJS5wEm55Gs3dcgBiRUg,1522
|
|
111
111
|
mlrun/feature_store/api.py,sha256=SWBbFD4KU2U4TUaAbD2hRLSquFWxX46mZGCToI0GfFQ,49994
|
|
@@ -213,7 +213,7 @@ mlrun/launcher/local.py,sha256=pP9-ZrNL8OnNDEiXTAKAZQnmLpS_mCc2v-mJw329eks,11269
|
|
|
213
213
|
mlrun/launcher/remote.py,sha256=tGICSfWtvUHeR31mbzy6gqHejmDxjPUgjtxXTWhRubg,7699
|
|
214
214
|
mlrun/model_monitoring/__init__.py,sha256=dm5_j0_pwqrdzFwTaEtGnKfv2nVpNaM56nBI-oqLbNU,879
|
|
215
215
|
mlrun/model_monitoring/api.py,sha256=2EHCzB_5sCDgalYPkrFbI01cSO7LVWBv9yWoooJ-a0g,28106
|
|
216
|
-
mlrun/model_monitoring/controller.py,sha256=
|
|
216
|
+
mlrun/model_monitoring/controller.py,sha256=m2Z2Nwqj3A3byxrV6PAbkqzT0AsNxmlNqOk61nNJxOc,20637
|
|
217
217
|
mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
|
|
218
218
|
mlrun/model_monitoring/helpers.py,sha256=KsbSH0kEjCPajvLUpv3q5GWyvx0bZj-JkghGJlzbLZI,12757
|
|
219
219
|
mlrun/model_monitoring/model_endpoint.py,sha256=7VX0cBATqLsA4sSinDzouf41ndxqh2mf5bO9BW0G5Z4,4017
|
|
@@ -221,7 +221,7 @@ mlrun/model_monitoring/stream_processing.py,sha256=0eu1Gq1Obq87LFno6eIZ55poXoFae
|
|
|
221
221
|
mlrun/model_monitoring/tracking_policy.py,sha256=sQq956akAQpntkrJwIgFWcEq-JpyVcg0FxgNa4h3V70,5502
|
|
222
222
|
mlrun/model_monitoring/writer.py,sha256=TrBwngRmdwr67De71UCcCFsJOfcqQe8jDp0vkBvGf0o,10177
|
|
223
223
|
mlrun/model_monitoring/applications/__init__.py,sha256=QYvzgCutFdAkzqKPD3mvkX_3c1X4tzd-kW8ojUOE9ic,889
|
|
224
|
-
mlrun/model_monitoring/applications/_application_steps.py,sha256=
|
|
224
|
+
mlrun/model_monitoring/applications/_application_steps.py,sha256=FWgEldIC0Jbg0KLMBIcSNv8uULD1QZ3i7xcC4kEWmrA,7231
|
|
225
225
|
mlrun/model_monitoring/applications/base.py,sha256=uzc14lFlwTJnL0p2VBCzmp-CNoHd73cK_Iz0YHC1KAs,4380
|
|
226
226
|
mlrun/model_monitoring/applications/context.py,sha256=vOZ_ZgUuy5UsNe22-puJSt7TB32HiZtqBdN1hegykuQ,12436
|
|
227
227
|
mlrun/model_monitoring/applications/evidently_base.py,sha256=FSzmoDZP8EiSQ3tq5RmU7kJ6edh8bWaKQh0rBORjODY,5099
|
|
@@ -243,9 +243,9 @@ mlrun/model_monitoring/db/tsdb/__init__.py,sha256=Zqh_27I2YAEHk9nl0Z6lUxP7VEfrgr
|
|
|
243
243
|
mlrun/model_monitoring/db/tsdb/base.py,sha256=X89X763sDrShfRXE1N-p8k97E8NBs7O1QJFiO-CffLM,18583
|
|
244
244
|
mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsVsrb9otX9a7O4,1189
|
|
245
245
|
mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
|
|
246
|
-
mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=
|
|
246
|
+
mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=UOtb-0shOyKxfYnNzI5uNM5fdI9FbbSDGGRuzvgOKO8,10560
|
|
247
247
|
mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Hb0vcCBP-o0ET78mU4P32fnhUL65QZv-pMuv2lnCby4,1586
|
|
248
|
-
mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=
|
|
248
|
+
mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=ZpYqPLq8l9aRzgAZ-1uxY_T1eRfx2I2_k7mGfKR2vwI,19683
|
|
249
249
|
mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
|
|
250
250
|
mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=mbmhN4f_F58ptVjhwoMF6ifZSdnZWhK7x8eNsWS39IA,6217
|
|
251
251
|
mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=1H-IBXPNJPRAaxDMGWpUU25QqfR87LpZbJ03vaJkICs,32858
|
|
@@ -269,11 +269,11 @@ mlrun/package/utils/_supported_format.py,sha256=O3LPTvZ6A-nGi6mB2kTzJp2DQ-cCOgnl
|
|
|
269
269
|
mlrun/package/utils/log_hint_utils.py,sha256=40X7oVzCiAIGsTTSON0iYNHj-_1Y4l4SDMThTA85If8,3696
|
|
270
270
|
mlrun/package/utils/type_hint_utils.py,sha256=JYrek6vuN3z7e6MGUD3qBLDfQ03C4puZXNTpDSj-VrM,14695
|
|
271
271
|
mlrun/platforms/__init__.py,sha256=ggSGF7inITs6S-vj9u4S9X_5psgbA0G3GVqf7zu8qYc,2406
|
|
272
|
-
mlrun/platforms/iguazio.py,sha256=
|
|
273
|
-
mlrun/projects/__init__.py,sha256=
|
|
272
|
+
mlrun/platforms/iguazio.py,sha256=MNRzIzxcc_3wsePLjBXuKKKSaObVnnrC3ZyXgSRu8m0,13697
|
|
273
|
+
mlrun/projects/__init__.py,sha256=0Krf0WIKfnZa71WthYOg0SoaTodGg3sV_hK3f_OlTPI,1220
|
|
274
274
|
mlrun/projects/operations.py,sha256=gtqSU9OvYOV-b681uQtWgnW7YSnX6qfa1Mt1Xm4f1ZI,19752
|
|
275
|
-
mlrun/projects/pipelines.py,sha256=
|
|
276
|
-
mlrun/projects/project.py,sha256=
|
|
275
|
+
mlrun/projects/pipelines.py,sha256=IE8MpuXPnXi0_izOCEC1dtpEctcdWZUyCADnMvAZH0M,45331
|
|
276
|
+
mlrun/projects/project.py,sha256=UOu625oJUwJA9o--MboL19Zvqv_xDqO9oCx-0Rs_Khk,191436
|
|
277
277
|
mlrun/runtimes/__init__.py,sha256=egLM94cDMUyQ1GVABdFGXUQcDhU70lP3k7qSnM_UnHY,9008
|
|
278
278
|
mlrun/runtimes/base.py,sha256=JXWmTIcm3b0klGUOHDlyFNa3bUgsNzQIgWhUQpSZoE0,37692
|
|
279
279
|
mlrun/runtimes/daskjob.py,sha256=Ka_xqim8LkCYjp-M_WgteJy6ZN_3qfmLLHvXs7N6pa4,19411
|
|
@@ -296,7 +296,7 @@ mlrun/runtimes/nuclio/__init__.py,sha256=gx1kizzKv8pGT5TNloN1js1hdbxqDw3rM90sLVY
|
|
|
296
296
|
mlrun/runtimes/nuclio/api_gateway.py,sha256=oQRSOvqtODKCzT2LqlqSXZbq2vcZ7epsFZwO9jvarhc,26899
|
|
297
297
|
mlrun/runtimes/nuclio/function.py,sha256=TQt6RyxK_iyzNJr2r57BRtVXuy2GMrhdeFOlFjb2AZg,52106
|
|
298
298
|
mlrun/runtimes/nuclio/nuclio.py,sha256=sLK8KdGO1LbftlL3HqPZlFOFTAAuxJACZCVl1c0Ha6E,2942
|
|
299
|
-
mlrun/runtimes/nuclio/serving.py,sha256=
|
|
299
|
+
mlrun/runtimes/nuclio/serving.py,sha256=L1Tz5EZyo8JZmUBNmIRYL9AoWfqSm4zLQQ9DWbnlmp8,29726
|
|
300
300
|
mlrun/runtimes/nuclio/application/__init__.py,sha256=rRs5vasy_G9IyoTpYIjYDafGoL6ifFBKgBtsXn31Atw,614
|
|
301
301
|
mlrun/runtimes/nuclio/application/application.py,sha256=5XFIg7tgU9kKWwGdMFwB1OJpw79BWwlWUdGiHlDo4AY,29055
|
|
302
302
|
mlrun/runtimes/nuclio/application/reverse_proxy.go,sha256=JIIYae6bXzCLf3jXuu49KWPQYoXr_FDQ2Rbo1OWKAd0,3150
|
|
@@ -305,13 +305,13 @@ mlrun/runtimes/sparkjob/spark3job.py,sha256=RuwO9Pk1IFaUCFz8zoYLaK3pYT7w07uAjouc
|
|
|
305
305
|
mlrun/serving/__init__.py,sha256=-SMRV3q_5cGVPDxRslXPU0zGYZIygs0cSj7WKlOJJUc,1163
|
|
306
306
|
mlrun/serving/merger.py,sha256=PXLn3A21FiLteJHaDSLm5xKNT-80eTTjfHUJnBX1gKY,6116
|
|
307
307
|
mlrun/serving/remote.py,sha256=MrFByphQWmIsKXqw-MOwl2Q1hbtWReYVRKvlcKj9pfw,17980
|
|
308
|
-
mlrun/serving/routers.py,sha256=
|
|
308
|
+
mlrun/serving/routers.py,sha256=aJHO-063gaQ1N3vRDXQwKJ5zwy_X9q3RIq5CjsuCOG8,55832
|
|
309
309
|
mlrun/serving/server.py,sha256=m1HzUDconjowDtheQ71HEKbV7e9A-TUtaCdoqxTH2Pw,22092
|
|
310
310
|
mlrun/serving/serving_wrapper.py,sha256=R670-S6PX_d5ER6jiHtRvacuPyFzQH0mEf2K0sBIIOM,836
|
|
311
|
-
mlrun/serving/states.py,sha256=
|
|
311
|
+
mlrun/serving/states.py,sha256=uajsgqmf1qBkkm6es4hb9c1hUARKHUBDqxVmDFEbPLo,60332
|
|
312
312
|
mlrun/serving/utils.py,sha256=lej7XcUPX1MmHkEOi_0KZRGSpfbmpnE0GK_Sn4zLkHY,4025
|
|
313
313
|
mlrun/serving/v1_serving.py,sha256=by4myxlnwyZ0ijQ5fURilGCK1sUpdQL2Il1VR3Xqpxg,11805
|
|
314
|
-
mlrun/serving/v2_serving.py,sha256
|
|
314
|
+
mlrun/serving/v2_serving.py,sha256=y48sMhSmZwwHAeTaqdeaxeRag3hkZH1nDolx5CS8VbU,26379
|
|
315
315
|
mlrun/track/__init__.py,sha256=LWRUHJt8JyFW17FyNPOVyWd-NXTf1iptzsK9KFj5fuY,765
|
|
316
316
|
mlrun/track/tracker.py,sha256=hSi9sMxB7hhZalt6Q8GXDnK4UoCbXHzKTrpUPC9hZv4,3555
|
|
317
317
|
mlrun/track/tracker_manager.py,sha256=IYBl99I62IC6VCCmG1yt6JoHNOQXa53C4DURJ2sWgio,5726
|
|
@@ -341,11 +341,11 @@ mlrun/utils/notifications/notification/ipython.py,sha256=ZtVL30B_Ha0VGoo4LxO-voT
|
|
|
341
341
|
mlrun/utils/notifications/notification/slack.py,sha256=wqpFGr5BTvFO5KuUSzFfxsgmyU1Ohq7fbrGeNe9TXOk,7006
|
|
342
342
|
mlrun/utils/notifications/notification/webhook.py,sha256=cb9w1Mc8ENfJBdgan7iiVHK9eVls4-R3tUxmXM-P-8I,4746
|
|
343
343
|
mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
|
|
344
|
-
mlrun/utils/version/version.json,sha256=
|
|
344
|
+
mlrun/utils/version/version.json,sha256=I-O3PI0BWjB7qXvPEnhVNXuOvclLGEzZuWkJnG90q0k,88
|
|
345
345
|
mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
|
|
346
|
-
mlrun-1.7.
|
|
347
|
-
mlrun-1.7.
|
|
348
|
-
mlrun-1.7.
|
|
349
|
-
mlrun-1.7.
|
|
350
|
-
mlrun-1.7.
|
|
351
|
-
mlrun-1.7.
|
|
346
|
+
mlrun-1.7.1rc3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
347
|
+
mlrun-1.7.1rc3.dist-info/METADATA,sha256=fh1xEBBL_etiOUTU93dh9sF7R9mHY7oMfs14tc2JUls,24486
|
|
348
|
+
mlrun-1.7.1rc3.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
|
349
|
+
mlrun-1.7.1rc3.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
|
|
350
|
+
mlrun-1.7.1rc3.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
|
|
351
|
+
mlrun-1.7.1rc3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|