mlrun 1.7.0rc17__py3-none-any.whl → 1.7.0rc19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (90) hide show
  1. mlrun/__main__.py +5 -2
  2. mlrun/alerts/alert.py +1 -1
  3. mlrun/artifacts/manager.py +5 -1
  4. mlrun/common/constants.py +64 -3
  5. mlrun/common/formatters/__init__.py +16 -0
  6. mlrun/common/formatters/base.py +59 -0
  7. mlrun/common/formatters/function.py +41 -0
  8. mlrun/common/runtimes/constants.py +32 -4
  9. mlrun/common/schemas/__init__.py +1 -2
  10. mlrun/common/schemas/alert.py +31 -9
  11. mlrun/common/schemas/api_gateway.py +52 -0
  12. mlrun/common/schemas/client_spec.py +1 -0
  13. mlrun/common/schemas/frontend_spec.py +1 -0
  14. mlrun/common/schemas/function.py +4 -0
  15. mlrun/common/schemas/model_monitoring/__init__.py +9 -4
  16. mlrun/common/schemas/model_monitoring/constants.py +22 -8
  17. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  18. mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
  19. mlrun/config.py +9 -2
  20. mlrun/data_types/to_pandas.py +5 -5
  21. mlrun/datastore/datastore.py +6 -2
  22. mlrun/datastore/redis.py +2 -2
  23. mlrun/datastore/s3.py +5 -0
  24. mlrun/datastore/sources.py +106 -7
  25. mlrun/datastore/store_resources.py +5 -1
  26. mlrun/datastore/targets.py +5 -4
  27. mlrun/datastore/utils.py +42 -0
  28. mlrun/db/base.py +5 -1
  29. mlrun/db/httpdb.py +22 -3
  30. mlrun/db/nopdb.py +5 -1
  31. mlrun/errors.py +6 -0
  32. mlrun/execution.py +16 -6
  33. mlrun/feature_store/ingestion.py +7 -6
  34. mlrun/feature_store/retrieval/conversion.py +5 -5
  35. mlrun/feature_store/retrieval/job.py +7 -3
  36. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  37. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
  38. mlrun/frameworks/parallel_coordinates.py +2 -1
  39. mlrun/frameworks/tf_keras/__init__.py +4 -1
  40. mlrun/launcher/client.py +4 -2
  41. mlrun/launcher/local.py +8 -2
  42. mlrun/launcher/remote.py +8 -2
  43. mlrun/model.py +5 -1
  44. mlrun/model_monitoring/db/stores/__init__.py +0 -2
  45. mlrun/model_monitoring/db/stores/base/store.py +16 -4
  46. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +43 -21
  47. mlrun/model_monitoring/db/stores/sqldb/models/base.py +32 -2
  48. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +25 -5
  49. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +5 -0
  50. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +235 -166
  51. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
  52. mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
  53. mlrun/model_monitoring/db/tsdb/base.py +232 -38
  54. mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
  55. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  56. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +240 -0
  57. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  58. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +397 -0
  59. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +292 -104
  60. mlrun/model_monitoring/helpers.py +45 -0
  61. mlrun/model_monitoring/stream_processing.py +7 -4
  62. mlrun/model_monitoring/writer.py +50 -20
  63. mlrun/package/utils/_formatter.py +2 -2
  64. mlrun/projects/operations.py +8 -5
  65. mlrun/projects/pipelines.py +42 -15
  66. mlrun/projects/project.py +55 -14
  67. mlrun/render.py +8 -5
  68. mlrun/runtimes/base.py +2 -1
  69. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  70. mlrun/runtimes/local.py +4 -1
  71. mlrun/runtimes/nuclio/api_gateway.py +32 -8
  72. mlrun/runtimes/nuclio/application/application.py +3 -3
  73. mlrun/runtimes/nuclio/function.py +1 -4
  74. mlrun/runtimes/utils.py +5 -6
  75. mlrun/serving/server.py +2 -1
  76. mlrun/utils/async_http.py +25 -5
  77. mlrun/utils/helpers.py +28 -7
  78. mlrun/utils/logger.py +28 -1
  79. mlrun/utils/notifications/notification/__init__.py +14 -9
  80. mlrun/utils/notifications/notification/slack.py +27 -7
  81. mlrun/utils/notifications/notification_pusher.py +47 -42
  82. mlrun/utils/v3io_clients.py +0 -1
  83. mlrun/utils/version/version.json +2 -2
  84. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/METADATA +9 -4
  85. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/RECORD +89 -82
  86. mlrun/model_monitoring/db/v3io_tsdb_reader.py +0 -134
  87. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/LICENSE +0 -0
  88. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/WHEEL +0 -0
  89. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/entry_points.txt +0 -0
  90. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/top_level.txt +0 -0
@@ -136,7 +136,11 @@ class EventStreamProcessor:
136
136
  self.tsdb_batching_max_events = tsdb_batching_max_events
137
137
  self.tsdb_batching_timeout_secs = tsdb_batching_timeout_secs
138
138
 
139
- def apply_monitoring_serving_graph(self, fn: mlrun.runtimes.ServingRuntime) -> None:
139
+ def apply_monitoring_serving_graph(
140
+ self,
141
+ fn: mlrun.runtimes.ServingRuntime,
142
+ tsdb_service_provider: typing.Optional[typing.Callable] = None,
143
+ ) -> None:
140
144
  """
141
145
  Apply monitoring serving graph to a given serving function. The following serving graph includes about 4 main
142
146
  parts that each one them includes several steps of different operations that are executed on the events from
@@ -163,6 +167,7 @@ class EventStreamProcessor:
163
167
  using CE, the parquet target path is based on the defined MLRun artifact path.
164
168
 
165
169
  :param fn: A serving function.
170
+ :param tsdb_service_provider: An optional callable function that provides the TSDB connection string.
166
171
  """
167
172
 
168
173
  graph = typing.cast(
@@ -322,15 +327,13 @@ class EventStreamProcessor:
322
327
 
323
328
  # TSDB branch (skip to Prometheus if in CE env)
324
329
  if not mlrun.mlconf.is_ce_mode():
325
- # TSDB branch
326
330
  tsdb_connector = mlrun.model_monitoring.get_tsdb_connector(
327
- project=self.project,
331
+ project=self.project, secret_provider=tsdb_service_provider
328
332
  )
329
333
  tsdb_connector.apply_monitoring_stream_steps(graph=graph)
330
334
 
331
335
  else:
332
336
  # Prometheus
333
-
334
337
  # Increase the prediction counter by 1 and update the latency value
335
338
  graph.add_step(
336
339
  "IncCounter",
@@ -24,12 +24,13 @@ from mlrun.common.schemas.model_monitoring.constants import (
24
24
  HistogramDataDriftApplicationConstants,
25
25
  MetricData,
26
26
  ResultData,
27
+ ResultKindApp,
27
28
  ResultStatusApp,
28
29
  WriterEvent,
29
30
  WriterEventKind,
30
31
  )
31
32
  from mlrun.common.schemas.notification import NotificationKind, NotificationSeverity
32
- from mlrun.model_monitoring.helpers import get_endpoint_record
33
+ from mlrun.model_monitoring.helpers import get_endpoint_record, get_result_instance_fqn
33
34
  from mlrun.serving.utils import StepToDict
34
35
  from mlrun.utils import logger
35
36
  from mlrun.utils.notifications.notification_pusher import CustomNotificationPusher
@@ -101,7 +102,7 @@ class ModelMonitoringWriter(StepToDict):
101
102
 
102
103
  kind = "monitoring_application_stream_pusher"
103
104
 
104
- def __init__(self, project: str) -> None:
105
+ def __init__(self, project: str, tsdb_secret_provider=None) -> None:
105
106
  self.project = project
106
107
  self.name = project # required for the deployment process
107
108
 
@@ -113,32 +114,55 @@ class ModelMonitoringWriter(StepToDict):
113
114
  project=self.project
114
115
  )
115
116
  self._tsdb_connector = mlrun.model_monitoring.get_tsdb_connector(
116
- project=self.project,
117
+ project=self.project, secret_provider=tsdb_secret_provider
117
118
  )
118
119
  self._endpoints_records = {}
119
120
 
120
- @staticmethod
121
121
  def _generate_event_on_drift(
122
- model_endpoint: str, drift_status: str, event_value: dict, project_name: str
122
+ self,
123
+ entity_id: str,
124
+ result_status: int,
125
+ event_value: dict,
126
+ project_name: str,
127
+ result_kind: int,
123
128
  ) -> None:
124
- logger.info("Sending an alert")
129
+ logger.info("Sending an event")
125
130
  entity = mlrun.common.schemas.alert.EventEntities(
126
- kind=alert_objects.EventEntityKind.MODEL,
131
+ kind=alert_objects.EventEntityKind.MODEL_ENDPOINT_RESULT,
127
132
  project=project_name,
128
- ids=[model_endpoint],
133
+ ids=[entity_id],
129
134
  )
130
- event_kind = (
131
- alert_objects.EventKind.DRIFT_DETECTED
132
- if drift_status == ResultStatusApp.detected.value
133
- else alert_objects.EventKind.DRIFT_SUSPECTED
135
+
136
+ event_kind = self._generate_alert_event_kind(
137
+ result_status=result_status, result_kind=result_kind
134
138
  )
139
+
135
140
  event_data = mlrun.common.schemas.Event(
136
- kind=event_kind, entity=entity, value_dict=event_value
141
+ kind=alert_objects.EventKind(value=event_kind),
142
+ entity=entity,
143
+ value_dict=event_value,
137
144
  )
138
145
  mlrun.get_run_db().generate_event(event_kind, event_data)
139
146
 
140
147
  @staticmethod
141
- def _reconstruct_event(event: _RawEvent) -> tuple[_AppResultEvent, str]:
148
+ def _generate_alert_event_kind(
149
+ result_kind: int, result_status: int
150
+ ) -> alert_objects.EventKind:
151
+ """Generate the required Event Kind format for the alerting system"""
152
+ if result_kind == ResultKindApp.custom.value:
153
+ # Custom kind is represented as an anomaly detection
154
+ event_kind = "mm_app_anomaly"
155
+ else:
156
+ event_kind = ResultKindApp(value=result_kind).name
157
+
158
+ if result_status == ResultStatusApp.detected.value:
159
+ event_kind = f"{event_kind}_detected"
160
+ else:
161
+ event_kind = f"{event_kind}_suspected"
162
+ return alert_objects.EventKind(value=event_kind)
163
+
164
+ @staticmethod
165
+ def _reconstruct_event(event: _RawEvent) -> tuple[_AppResultEvent, WriterEventKind]:
142
166
  """
143
167
  Modify the raw event into the expected monitoring application event
144
168
  schema as defined in `mlrun.common.schemas.model_monitoring.constants.WriterEvent`
@@ -179,12 +203,13 @@ class ModelMonitoringWriter(StepToDict):
179
203
  def do(self, event: _RawEvent) -> None:
180
204
  event, kind = self._reconstruct_event(event)
181
205
  logger.info("Starting to write event", event=event)
182
-
183
206
  self._tsdb_connector.write_application_event(event=event.copy(), kind=kind)
184
207
  self._app_result_store.write_application_event(event=event.copy(), kind=kind)
208
+
185
209
  logger.info("Completed event DB writes")
186
210
 
187
- _Notifier(event=event, notification_pusher=self._custom_notifier).notify()
211
+ if kind == WriterEventKind.RESULT:
212
+ _Notifier(event=event, notification_pusher=self._custom_notifier).notify()
188
213
 
189
214
  if (
190
215
  mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.enabled
@@ -208,10 +233,15 @@ class ModelMonitoringWriter(StepToDict):
208
233
  "result_value": event[ResultData.RESULT_VALUE],
209
234
  }
210
235
  self._generate_event_on_drift(
211
- event[WriterEvent.ENDPOINT_ID],
212
- event[ResultData.RESULT_STATUS],
213
- event_value,
214
- self.project,
236
+ entity_id=get_result_instance_fqn(
237
+ event[WriterEvent.ENDPOINT_ID],
238
+ event[WriterEvent.APPLICATION_NAME],
239
+ event[ResultData.RESULT_NAME],
240
+ ),
241
+ result_status=event[ResultData.RESULT_STATUS],
242
+ event_value=event_value,
243
+ project_name=self.project,
244
+ result_kind=event[ResultData.RESULT_KIND],
215
245
  )
216
246
 
217
247
  if (
@@ -142,11 +142,11 @@ class _YAMLFormatter(_Formatter):
142
142
 
143
143
  :param obj: The object to write.
144
144
  :param file_path: The file path to write to.
145
- :param dump_kwargs: Additional keyword arguments to pass to the `yaml.dump` method of the formatter in use.
145
+ :param dump_kwargs: Additional keyword arguments to pass to the `yaml.safe_dump` method of the formatter in use.
146
146
  """
147
147
  dump_kwargs = dump_kwargs or cls.DEFAULT_DUMP_KWARGS
148
148
  with open(file_path, "w") as file:
149
- yaml.dump(obj, file, **dump_kwargs)
149
+ yaml.safe_dump(obj, file, **dump_kwargs)
150
150
 
151
151
  @classmethod
152
152
  def read(cls, file_path: str) -> Union[list, dict]:
@@ -18,6 +18,7 @@ from typing import Optional, Union
18
18
  from mlrun_pipelines.models import PipelineNodeWrapper
19
19
 
20
20
  import mlrun
21
+ import mlrun.common.constants as mlrun_constants
21
22
  from mlrun.utils import hub_prefix
22
23
 
23
24
  from .pipelines import enrich_function_object, pipeline_context
@@ -190,7 +191,9 @@ def run_function(
190
191
  local = pipeline_context.is_run_local(local)
191
192
  task.metadata.labels = task.metadata.labels or labels or {}
192
193
  if pipeline_context.workflow_id:
193
- task.metadata.labels["workflow"] = pipeline_context.workflow_id
194
+ task.metadata.labels[mlrun_constants.MLRunInternalLabels.workflow] = (
195
+ pipeline_context.workflow_id
196
+ )
194
197
  if function.kind == "local":
195
198
  command, function = mlrun.run.load_func_code(function)
196
199
  function.spec.command = command
@@ -225,9 +228,9 @@ def run_function(
225
228
  class BuildStatus:
226
229
  """returned status from build operation"""
227
230
 
228
- def __init__(self, ready, outputs={}, function=None):
231
+ def __init__(self, ready, outputs=None, function=None):
229
232
  self.ready = ready
230
- self.outputs = outputs
233
+ self.outputs = outputs or {}
231
234
  self.function = function
232
235
 
233
236
  def after(self, step):
@@ -340,9 +343,9 @@ def build_function(
340
343
  class DeployStatus:
341
344
  """returned status from deploy operation"""
342
345
 
343
- def __init__(self, state, outputs={}, function=None):
346
+ def __init__(self, state, outputs=None, function=None):
344
347
  self.state = state
345
- self.outputs = outputs
348
+ self.outputs = outputs or {}
346
349
  self.function = function
347
350
 
348
351
  def after(self, step):
@@ -26,6 +26,7 @@ from kfp.compiler import compiler
26
26
  from mlrun_pipelines.helpers import new_pipe_metadata
27
27
 
28
28
  import mlrun
29
+ import mlrun.common.runtimes.constants
29
30
  import mlrun.common.schemas
30
31
  import mlrun.utils.notifications
31
32
  from mlrun.errors import err_to_str
@@ -371,7 +372,7 @@ class _PipelineRunStatus:
371
372
  engine: type["_PipelineRunner"],
372
373
  project: "mlrun.projects.MlrunProject",
373
374
  workflow: WorkflowSpec = None,
374
- state: str = "",
375
+ state: mlrun_pipelines.common.models.RunStatuses = "",
375
376
  exc: Exception = None,
376
377
  ):
377
378
  """
@@ -479,6 +480,7 @@ class _PipelineRunner(abc.ABC):
479
480
  timeout=None,
480
481
  expected_statuses=None,
481
482
  notifiers: mlrun.utils.notifications.CustomNotificationPusher = None,
483
+ **kwargs,
482
484
  ):
483
485
  pass
484
486
 
@@ -610,6 +612,7 @@ class _KFPRunner(_PipelineRunner):
610
612
  timeout=None,
611
613
  expected_statuses=None,
612
614
  notifiers: mlrun.utils.notifications.CustomNotificationPusher = None,
615
+ **kwargs,
613
616
  ):
614
617
  if timeout is None:
615
618
  timeout = 60 * 60
@@ -733,6 +736,7 @@ class _LocalRunner(_PipelineRunner):
733
736
  timeout=None,
734
737
  expected_statuses=None,
735
738
  notifiers: mlrun.utils.notifications.CustomNotificationPusher = None,
739
+ **kwargs,
736
740
  ):
737
741
  pass
738
742
 
@@ -860,7 +864,7 @@ class _RemoteRunner(_PipelineRunner):
860
864
  )
861
865
  state = mlrun_pipelines.common.models.RunStatuses.failed
862
866
  else:
863
- state = mlrun_pipelines.common.models.RunStatuses.succeeded
867
+ state = mlrun_pipelines.common.models.RunStatuses.running
864
868
  project.notifiers.push_pipeline_start_message(
865
869
  project.metadata.name,
866
870
  )
@@ -877,24 +881,47 @@ class _RemoteRunner(_PipelineRunner):
877
881
  @staticmethod
878
882
  def get_run_status(
879
883
  project,
880
- run,
884
+ run: _PipelineRunStatus,
881
885
  timeout=None,
882
886
  expected_statuses=None,
883
887
  notifiers: mlrun.utils.notifications.CustomNotificationPusher = None,
888
+ inner_engine: type[_PipelineRunner] = None,
884
889
  ):
885
- # ignore notifiers, as they are handled by the remote pipeline notifications,
886
- # so overriding with CustomNotificationPusher with empty list of notifiers
887
- state, had_errors, text = _KFPRunner.get_run_status(
888
- project,
889
- run,
890
- timeout,
891
- expected_statuses,
892
- notifiers=mlrun.utils.notifications.CustomNotificationPusher([]),
893
- )
890
+ inner_engine = inner_engine or _KFPRunner
891
+ if inner_engine.engine == _KFPRunner.engine:
892
+ # ignore notifiers for remote notifications, as they are handled by the remote pipeline notifications,
893
+ # so overriding with CustomNotificationPusher with empty list of notifiers or only local notifiers
894
+ local_project_notifiers = list(
895
+ set(mlrun.utils.notifications.NotificationTypes.local()).intersection(
896
+ set(project.notifiers.notifications.keys())
897
+ )
898
+ )
899
+ notifiers = mlrun.utils.notifications.CustomNotificationPusher(
900
+ local_project_notifiers
901
+ )
902
+ return _KFPRunner.get_run_status(
903
+ project,
904
+ run,
905
+ timeout,
906
+ expected_statuses,
907
+ notifiers=notifiers,
908
+ )
894
909
 
895
- # indicate the pipeline status since we don't push the notifications in the remote runner
896
- logger.info(text)
897
- return state, had_errors, text
910
+ elif inner_engine.engine == _LocalRunner.engine:
911
+ mldb = mlrun.db.get_run_db(secrets=project._secrets)
912
+ pipeline_runner_run = mldb.read_run(run.run_id, project=project.name)
913
+ pipeline_runner_run = mlrun.run.RunObject.from_dict(pipeline_runner_run)
914
+ pipeline_runner_run.logs(db=mldb)
915
+ pipeline_runner_run.refresh()
916
+ run._state = mlrun.common.runtimes.constants.RunStates.run_state_to_pipeline_run_status(
917
+ pipeline_runner_run.status.state
918
+ )
919
+ run._exc = pipeline_runner_run.status.error
920
+
921
+ else:
922
+ raise mlrun.errors.MLRunInvalidArgumentError(
923
+ f"Unsupported inner runner engine: {inner_engine.engine}"
924
+ )
898
925
 
899
926
 
900
927
  def create_pipeline(project, pipeline, functions, secrets=None, handler=None):
mlrun/projects/project.py CHANGED
@@ -39,6 +39,7 @@ import yaml
39
39
  from mlrun_pipelines.models import PipelineNodeWrapper
40
40
 
41
41
  import mlrun.common.helpers
42
+ import mlrun.common.runtimes.constants
42
43
  import mlrun.common.schemas.artifact
43
44
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
44
45
  import mlrun.db
@@ -77,7 +78,10 @@ from ..utils.clones import (
77
78
  clone_zip,
78
79
  get_repo_url,
79
80
  )
80
- from ..utils.helpers import ensure_git_branch, resolve_git_reference_from_source
81
+ from ..utils.helpers import (
82
+ ensure_git_branch,
83
+ resolve_git_reference_from_source,
84
+ )
81
85
  from ..utils.notifications import CustomNotificationPusher, NotificationTypes
82
86
  from .operations import (
83
87
  BuildStatus,
@@ -1271,6 +1275,14 @@ class MlrunProject(ModelObj):
1271
1275
  def description(self, description):
1272
1276
  self.spec.description = description
1273
1277
 
1278
+ @property
1279
+ def default_function_node_selector(self) -> dict:
1280
+ return self.spec.default_function_node_selector
1281
+
1282
+ @default_function_node_selector.setter
1283
+ def default_function_node_selector(self, default_function_node_selector):
1284
+ self.spec.default_function_node_selector = default_function_node_selector
1285
+
1274
1286
  @property
1275
1287
  def default_image(self) -> str:
1276
1288
  return self.spec.default_image
@@ -2991,14 +3003,17 @@ class MlrunProject(ModelObj):
2991
3003
  )
2992
3004
  workflow_spec.clear_tmp()
2993
3005
  if (timeout or watch) and not workflow_spec.schedule:
3006
+ run_status_kwargs = {}
2994
3007
  status_engine = run._engine
2995
3008
  # run's engine gets replaced with inner engine if engine is remote,
2996
3009
  # so in that case we need to get the status from the remote engine manually
2997
- # TODO: support watch for remote:local
2998
- if workflow_engine.engine == "remote" and status_engine.engine != "local":
3010
+ if workflow_engine.engine == "remote":
2999
3011
  status_engine = _RemoteRunner
3012
+ run_status_kwargs["inner_engine"] = run._engine
3000
3013
 
3001
- status_engine.get_run_status(project=self, run=run, timeout=timeout)
3014
+ status_engine.get_run_status(
3015
+ project=self, run=run, timeout=timeout, **run_status_kwargs
3016
+ )
3002
3017
  return run
3003
3018
 
3004
3019
  def save_workflow(self, name, target, artifact_path=None, ttl=None):
@@ -3098,17 +3113,18 @@ class MlrunProject(ModelObj):
3098
3113
 
3099
3114
  def set_model_monitoring_credentials(
3100
3115
  self,
3101
- access_key: str = None,
3102
- endpoint_store_connection: str = None,
3103
- stream_path: str = None,
3116
+ access_key: Optional[str] = None,
3117
+ endpoint_store_connection: Optional[str] = None,
3118
+ stream_path: Optional[str] = None,
3119
+ tsdb_connection: Optional[str] = None,
3104
3120
  ):
3105
3121
  """Set the credentials that will be used by the project's model monitoring
3106
3122
  infrastructure functions.
3107
3123
 
3108
- :param access_key: Model Monitoring access key for managing user permissions
3109
3124
  :param access_key: Model Monitoring access key for managing user permissions
3110
3125
  :param endpoint_store_connection: Endpoint store connection string
3111
3126
  :param stream_path: Path to the model monitoring stream
3127
+ :param tsdb_connection: Connection string to the time series database
3112
3128
  """
3113
3129
 
3114
3130
  secrets_dict = {}
@@ -3131,6 +3147,16 @@ class MlrunProject(ModelObj):
3131
3147
  mlrun.common.schemas.model_monitoring.ProjectSecretKeys.STREAM_PATH
3132
3148
  ] = stream_path
3133
3149
 
3150
+ if tsdb_connection:
3151
+ if not tsdb_connection.startswith("taosws://"):
3152
+ raise mlrun.errors.MLRunInvalidArgumentError(
3153
+ "Currently only TDEngine websocket connection is supported for non-v3io TSDB,"
3154
+ "please provide a full URL (e.g. taosws://user:password@host:port)"
3155
+ )
3156
+ secrets_dict[
3157
+ mlrun.common.schemas.model_monitoring.ProjectSecretKeys.TSDB_CONNECTION
3158
+ ] = tsdb_connection
3159
+
3134
3160
  self.set_secrets(
3135
3161
  secrets=secrets_dict,
3136
3162
  provider=mlrun.common.schemas.SecretProviderName.kubernetes,
@@ -3689,7 +3715,10 @@ class MlrunProject(ModelObj):
3689
3715
  name: Optional[str] = None,
3690
3716
  uid: Optional[Union[str, list[str]]] = None,
3691
3717
  labels: Optional[Union[str, list[str]]] = None,
3692
- state: Optional[str] = None,
3718
+ state: Optional[
3719
+ mlrun.common.runtimes.constants.RunStates
3720
+ ] = None, # Backward compatibility
3721
+ states: typing.Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
3693
3722
  sort: bool = True,
3694
3723
  last: int = 0,
3695
3724
  iter: bool = False,
@@ -3723,10 +3752,11 @@ class MlrunProject(ModelObj):
3723
3752
  :param labels: A list of labels to filter by. Label filters work by either filtering a specific value
3724
3753
  of a label (i.e. list("key=value")) or by looking for the existence of a given
3725
3754
  key (i.e. "key").
3726
- :param state: List only runs whose state is specified.
3755
+ :param state: Deprecated - List only runs whose state is specified.
3756
+ :param states: List only runs whose state is one of the provided states.
3727
3757
  :param sort: Whether to sort the result according to their start time. Otherwise, results will be
3728
3758
  returned by their internal order in the DB (order will not be guaranteed).
3729
- :param last: Deprecated - currently not used (will be removed in 1.8.0).
3759
+ :param last: Deprecated - currently not used (will be removed in 1.9.0).
3730
3760
  :param iter: If ``True`` return runs from all iterations. Otherwise, return only runs whose ``iter`` is 0.
3731
3761
  :param start_time_from: Filter by run start time in ``[start_time_from, start_time_to]``.
3732
3762
  :param start_time_to: Filter by run start time in ``[start_time_from, start_time_to]``.
@@ -3734,13 +3764,22 @@ class MlrunProject(ModelObj):
3734
3764
  last_update_time_to)``.
3735
3765
  :param last_update_time_to: Filter by run last update time in ``(last_update_time_from, last_update_time_to)``.
3736
3766
  """
3767
+ if state:
3768
+ # TODO: Remove this in 1.9.0
3769
+ warnings.warn(
3770
+ "'state' is deprecated and will be removed in 1.9.0. Use 'states' instead.",
3771
+ FutureWarning,
3772
+ )
3773
+
3737
3774
  db = mlrun.db.get_run_db(secrets=self._secrets)
3738
3775
  return db.list_runs(
3739
3776
  name,
3740
3777
  uid,
3741
3778
  self.metadata.name,
3742
3779
  labels=labels,
3743
- state=state,
3780
+ states=mlrun.utils.helpers.as_list(state)
3781
+ if state is not None
3782
+ else states or None,
3744
3783
  sort=sort,
3745
3784
  last=last,
3746
3785
  iter=iter,
@@ -3992,8 +4031,8 @@ class MlrunProject(ModelObj):
3992
4031
  self,
3993
4032
  action: Callable,
3994
4033
  remote: str,
3995
- args: list = [],
3996
- kwargs: dict = {},
4034
+ args: list = None,
4035
+ kwargs: dict = None,
3997
4036
  secrets: Union[SecretsStore, dict] = None,
3998
4037
  ):
3999
4038
  """Run an arbitrary Git routine while the remote is enriched with secrets
@@ -4013,6 +4052,8 @@ class MlrunProject(ModelObj):
4013
4052
  try:
4014
4053
  if is_remote_enriched:
4015
4054
  self.spec.repo.remotes[remote].set_url(enriched_remote, clean_remote)
4055
+ args = args or []
4056
+ kwargs = kwargs or {}
4016
4057
  action(*args, **kwargs)
4017
4058
  except RuntimeError as e:
4018
4059
  raise mlrun.errors.MLRunRuntimeError(
mlrun/render.py CHANGED
@@ -126,7 +126,7 @@ def artifacts_html(
126
126
 
127
127
  if not attribute_value:
128
128
  mlrun.utils.logger.warning(
129
- "Artifact is incomplete, omitting from output (most likely due to a failed artifact logging)",
129
+ f"Artifact required attribute {attribute_name} is missing, omitting from output",
130
130
  artifact_key=key,
131
131
  )
132
132
  continue
@@ -400,14 +400,17 @@ def runs_to_html(
400
400
  else:
401
401
  df["labels"] = df["labels"].apply(dict_html)
402
402
  df["inputs"] = df["inputs"].apply(inputs_html)
403
- if df["artifact_uris"][0]:
404
- df["artifact_uris"] = df["artifact_uris"].apply(dict_html)
405
- df.drop("artifacts", axis=1, inplace=True)
406
- else:
403
+ if df["artifacts"][0]:
407
404
  df["artifacts"] = df["artifacts"].apply(
408
405
  lambda artifacts: artifacts_html(artifacts, "target_path"),
409
406
  )
410
407
  df.drop("artifact_uris", axis=1, inplace=True)
408
+ elif df["artifact_uris"][0]:
409
+ df["artifact_uris"] = df["artifact_uris"].apply(dict_html)
410
+ df.drop("artifacts", axis=1, inplace=True)
411
+ else:
412
+ df.drop("artifacts", axis=1, inplace=True)
413
+ df.drop("artifact_uris", axis=1, inplace=True)
411
414
 
412
415
  def expand_error(x):
413
416
  if x["state"] == "error":
mlrun/runtimes/base.py CHANGED
@@ -25,6 +25,7 @@ from mlrun_pipelines.common.ops import mlrun_op
25
25
  from nuclio.build import mlrun_footer
26
26
 
27
27
  import mlrun.common.constants
28
+ import mlrun.common.constants as mlrun_constants
28
29
  import mlrun.common.schemas
29
30
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
30
31
  import mlrun.db
@@ -473,7 +474,7 @@ class BaseRuntime(ModelObj):
473
474
  )
474
475
  if runspec.spec.output_path:
475
476
  runspec.spec.output_path = runspec.spec.output_path.replace(
476
- "{{run.user}}", meta.labels["owner"]
477
+ "{{run.user}}", meta.labels[mlrun_constants.MLRunInternalLabels.owner]
477
478
  )
478
479
 
479
480
  if db and self.kind != "handler":
@@ -99,7 +99,7 @@ def save_credentials(
99
99
  credentials["DATABRICKS_CLUSTER_ID"] = cluster_id
100
100
 
101
101
  with open(credentials_path, "w") as yaml_file:
102
- yaml.dump(credentials, yaml_file, default_flow_style=False)
102
+ yaml.safe_dump(credentials, yaml_file, default_flow_style=False)
103
103
 
104
104
 
105
105
  def run_mlrun_databricks_job(
mlrun/runtimes/local.py CHANGED
@@ -33,6 +33,7 @@ from sys import executable
33
33
  from nuclio import Event
34
34
 
35
35
  import mlrun
36
+ import mlrun.common.constants as mlrun_constants
36
37
  from mlrun.lists import RunList
37
38
 
38
39
  from ..errors import err_to_str
@@ -257,7 +258,8 @@ class LocalRuntime(BaseRuntime, ParallelRunner):
257
258
  set_paths(os.path.realpath("."))
258
259
 
259
260
  if (
260
- runobj.metadata.labels.get("kind") == RemoteSparkRuntime.kind
261
+ runobj.metadata.labels.get(mlrun_constants.MLRunInternalLabels.kind)
262
+ == RemoteSparkRuntime.kind
261
263
  and environ["MLRUN_SPARK_CLIENT_IGZ_SPARK"] == "true"
262
264
  ):
263
265
  from mlrun.runtimes.remotesparkjob import igz_spark_pre_hook
@@ -382,6 +384,7 @@ def load_module(file_name, handler, context):
382
384
  if spec is None:
383
385
  raise RunError(f"Cannot import from {file_name!r}")
384
386
  module = imputil.module_from_spec(spec)
387
+ sys.modules[mod_name] = module
385
388
  spec.loader.exec_module(module)
386
389
 
387
390
  class_args = {}
@@ -21,18 +21,17 @@ from nuclio.auth import AuthInfo as NuclioAuthInfo
21
21
  from nuclio.auth import AuthKinds as NuclioAuthKinds
22
22
 
23
23
  import mlrun
24
+ import mlrun.common.constants as mlrun_constants
24
25
  import mlrun.common.schemas as schemas
25
26
  import mlrun.common.types
26
27
  from mlrun.model import ModelObj
27
28
  from mlrun.platforms.iguazio import min_iguazio_versions
28
29
  from mlrun.utils import logger
29
30
 
30
- from .function import get_fullname, min_nuclio_versions
31
+ from .function import min_nuclio_versions
31
32
 
32
- PROJECT_NAME_LABEL = "nuclio.io/project-name"
33
33
 
34
-
35
- class APIGatewayAuthenticator(typing.Protocol):
34
+ class Authenticator(typing.Protocol):
36
35
  @property
37
36
  def authentication_mode(self) -> str:
38
37
  return schemas.APIGatewayAuthenticationMode.none.value
@@ -64,6 +63,10 @@ class APIGatewayAuthenticator(typing.Protocol):
64
63
  return None
65
64
 
66
65
 
66
+ class APIGatewayAuthenticator(Authenticator, ModelObj):
67
+ _dict_fields = ["authentication_mode"]
68
+
69
+
67
70
  class NoneAuth(APIGatewayAuthenticator):
68
71
  """
69
72
  An API gateway authenticator with no authentication.
@@ -284,7 +287,21 @@ class APIGatewaySpec(ModelObj):
284
287
  function_names = []
285
288
  for func in functions:
286
289
  if isinstance(func, str):
287
- function_names.append(func)
290
+ # check whether the function was passed as a URI or just a name
291
+ parsed_project, function_name, _, _ = (
292
+ mlrun.common.helpers.parse_versioned_object_uri(func)
293
+ )
294
+
295
+ if parsed_project and function_name:
296
+ # check that parsed project and passed project are the same
297
+ if parsed_project != project:
298
+ raise mlrun.errors.MLRunInvalidArgumentError(
299
+ "Function doesn't belong to passed project"
300
+ )
301
+ function_uri = func
302
+ else:
303
+ function_uri = mlrun.utils.generate_object_uri(project, func)
304
+ function_names.append(function_uri)
288
305
  continue
289
306
 
290
307
  function_name = (
@@ -299,8 +316,13 @@ class APIGatewaySpec(ModelObj):
299
316
  f"input function {function_name} "
300
317
  f"does not belong to this project"
301
318
  )
302
- nuclio_name = get_fullname(function_name, project, func.metadata.tag)
303
- function_names.append(nuclio_name)
319
+ function_uri = mlrun.utils.generate_object_uri(
320
+ project,
321
+ function_name,
322
+ func.metadata.tag,
323
+ func.metadata.hash,
324
+ )
325
+ function_names.append(function_uri)
304
326
  return function_names
305
327
 
306
328
 
@@ -526,7 +548,9 @@ class APIGateway(ModelObj):
526
548
 
527
549
  @classmethod
528
550
  def from_scheme(cls, api_gateway: schemas.APIGateway):
529
- project = api_gateway.metadata.labels.get(PROJECT_NAME_LABEL)
551
+ project = api_gateway.metadata.labels.get(
552
+ mlrun_constants.MLRunInternalLabels.nuclio_project_name
553
+ )
530
554
  functions, canary = cls._resolve_canary(api_gateway.spec.upstreams)
531
555
  state = (
532
556
  api_gateway.status.state