mlrun 1.7.0rc14__py3-none-any.whl → 1.7.0rc21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (152) hide show
  1. mlrun/__init__.py +10 -1
  2. mlrun/__main__.py +23 -111
  3. mlrun/alerts/__init__.py +15 -0
  4. mlrun/alerts/alert.py +144 -0
  5. mlrun/api/schemas/__init__.py +4 -3
  6. mlrun/artifacts/__init__.py +8 -3
  7. mlrun/artifacts/base.py +36 -253
  8. mlrun/artifacts/dataset.py +9 -190
  9. mlrun/artifacts/manager.py +46 -42
  10. mlrun/artifacts/model.py +9 -141
  11. mlrun/artifacts/plots.py +14 -375
  12. mlrun/common/constants.py +65 -3
  13. mlrun/common/formatters/__init__.py +19 -0
  14. mlrun/{runtimes/mpijob/v1alpha1.py → common/formatters/artifact.py} +6 -14
  15. mlrun/common/formatters/base.py +113 -0
  16. mlrun/common/formatters/function.py +46 -0
  17. mlrun/common/formatters/pipeline.py +53 -0
  18. mlrun/common/formatters/project.py +51 -0
  19. mlrun/{runtimes → common/runtimes}/constants.py +32 -4
  20. mlrun/common/schemas/__init__.py +10 -5
  21. mlrun/common/schemas/alert.py +92 -11
  22. mlrun/common/schemas/api_gateway.py +56 -0
  23. mlrun/common/schemas/artifact.py +15 -5
  24. mlrun/common/schemas/auth.py +2 -0
  25. mlrun/common/schemas/client_spec.py +1 -0
  26. mlrun/common/schemas/frontend_spec.py +1 -0
  27. mlrun/common/schemas/function.py +4 -0
  28. mlrun/common/schemas/model_monitoring/__init__.py +15 -3
  29. mlrun/common/schemas/model_monitoring/constants.py +58 -7
  30. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  31. mlrun/common/schemas/model_monitoring/model_endpoints.py +86 -2
  32. mlrun/common/schemas/pipeline.py +0 -9
  33. mlrun/common/schemas/project.py +5 -11
  34. mlrun/common/types.py +1 -0
  35. mlrun/config.py +27 -9
  36. mlrun/data_types/to_pandas.py +9 -9
  37. mlrun/datastore/base.py +41 -9
  38. mlrun/datastore/datastore.py +6 -2
  39. mlrun/datastore/datastore_profile.py +56 -4
  40. mlrun/datastore/inmem.py +2 -2
  41. mlrun/datastore/redis.py +2 -2
  42. mlrun/datastore/s3.py +5 -0
  43. mlrun/datastore/sources.py +147 -7
  44. mlrun/datastore/store_resources.py +7 -7
  45. mlrun/datastore/targets.py +110 -42
  46. mlrun/datastore/utils.py +42 -0
  47. mlrun/db/base.py +54 -10
  48. mlrun/db/httpdb.py +282 -79
  49. mlrun/db/nopdb.py +52 -10
  50. mlrun/errors.py +11 -0
  51. mlrun/execution.py +24 -9
  52. mlrun/feature_store/__init__.py +0 -2
  53. mlrun/feature_store/api.py +12 -47
  54. mlrun/feature_store/feature_set.py +9 -0
  55. mlrun/feature_store/feature_vector.py +8 -0
  56. mlrun/feature_store/ingestion.py +7 -6
  57. mlrun/feature_store/retrieval/base.py +9 -4
  58. mlrun/feature_store/retrieval/conversion.py +9 -9
  59. mlrun/feature_store/retrieval/dask_merger.py +2 -0
  60. mlrun/feature_store/retrieval/job.py +9 -3
  61. mlrun/feature_store/retrieval/local_merger.py +2 -0
  62. mlrun/feature_store/retrieval/spark_merger.py +16 -0
  63. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +7 -12
  64. mlrun/frameworks/parallel_coordinates.py +2 -1
  65. mlrun/frameworks/tf_keras/__init__.py +4 -1
  66. mlrun/k8s_utils.py +10 -11
  67. mlrun/launcher/base.py +4 -3
  68. mlrun/launcher/client.py +5 -3
  69. mlrun/launcher/local.py +8 -2
  70. mlrun/launcher/remote.py +8 -2
  71. mlrun/lists.py +6 -2
  72. mlrun/model.py +45 -21
  73. mlrun/model_monitoring/__init__.py +1 -1
  74. mlrun/model_monitoring/api.py +41 -18
  75. mlrun/model_monitoring/application.py +5 -305
  76. mlrun/model_monitoring/applications/__init__.py +11 -0
  77. mlrun/model_monitoring/applications/_application_steps.py +157 -0
  78. mlrun/model_monitoring/applications/base.py +280 -0
  79. mlrun/model_monitoring/applications/context.py +214 -0
  80. mlrun/model_monitoring/applications/evidently_base.py +211 -0
  81. mlrun/model_monitoring/applications/histogram_data_drift.py +132 -91
  82. mlrun/model_monitoring/applications/results.py +99 -0
  83. mlrun/model_monitoring/controller.py +3 -1
  84. mlrun/model_monitoring/db/__init__.py +2 -0
  85. mlrun/model_monitoring/db/stores/__init__.py +0 -2
  86. mlrun/model_monitoring/db/stores/base/store.py +22 -37
  87. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +43 -21
  88. mlrun/model_monitoring/db/stores/sqldb/models/base.py +39 -8
  89. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +27 -7
  90. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +5 -0
  91. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +246 -224
  92. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +232 -216
  93. mlrun/model_monitoring/db/tsdb/__init__.py +100 -0
  94. mlrun/model_monitoring/db/tsdb/base.py +329 -0
  95. mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
  96. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  97. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +240 -0
  98. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  99. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +397 -0
  100. mlrun/model_monitoring/db/tsdb/v3io/__init__.py +15 -0
  101. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +117 -0
  102. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +636 -0
  103. mlrun/model_monitoring/evidently_application.py +6 -118
  104. mlrun/model_monitoring/helpers.py +46 -1
  105. mlrun/model_monitoring/model_endpoint.py +3 -2
  106. mlrun/model_monitoring/stream_processing.py +57 -216
  107. mlrun/model_monitoring/writer.py +134 -124
  108. mlrun/package/utils/_formatter.py +2 -2
  109. mlrun/platforms/__init__.py +10 -9
  110. mlrun/platforms/iguazio.py +21 -202
  111. mlrun/projects/operations.py +19 -12
  112. mlrun/projects/pipelines.py +79 -102
  113. mlrun/projects/project.py +265 -103
  114. mlrun/render.py +15 -14
  115. mlrun/run.py +16 -46
  116. mlrun/runtimes/__init__.py +6 -3
  117. mlrun/runtimes/base.py +8 -7
  118. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  119. mlrun/runtimes/funcdoc.py +0 -28
  120. mlrun/runtimes/kubejob.py +2 -1
  121. mlrun/runtimes/local.py +5 -2
  122. mlrun/runtimes/mpijob/__init__.py +0 -20
  123. mlrun/runtimes/mpijob/v1.py +1 -1
  124. mlrun/runtimes/nuclio/api_gateway.py +194 -84
  125. mlrun/runtimes/nuclio/application/application.py +170 -8
  126. mlrun/runtimes/nuclio/function.py +39 -49
  127. mlrun/runtimes/pod.py +16 -36
  128. mlrun/runtimes/remotesparkjob.py +9 -3
  129. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  130. mlrun/runtimes/utils.py +6 -45
  131. mlrun/serving/server.py +2 -1
  132. mlrun/serving/v2_serving.py +5 -1
  133. mlrun/track/tracker.py +2 -1
  134. mlrun/utils/async_http.py +25 -5
  135. mlrun/utils/helpers.py +107 -75
  136. mlrun/utils/logger.py +39 -7
  137. mlrun/utils/notifications/notification/__init__.py +14 -9
  138. mlrun/utils/notifications/notification/base.py +1 -1
  139. mlrun/utils/notifications/notification/slack.py +34 -7
  140. mlrun/utils/notifications/notification/webhook.py +1 -1
  141. mlrun/utils/notifications/notification_pusher.py +147 -16
  142. mlrun/utils/regex.py +9 -0
  143. mlrun/utils/v3io_clients.py +0 -1
  144. mlrun/utils/version/version.json +2 -2
  145. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/METADATA +14 -6
  146. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/RECORD +150 -130
  147. mlrun/kfpops.py +0 -865
  148. mlrun/platforms/other.py +0 -305
  149. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/LICENSE +0 -0
  150. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/WHEEL +0 -0
  151. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/entry_points.txt +0 -0
  152. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc21.dist-info}/top_level.txt +0 -0
mlrun/runtimes/utils.py CHANGED
@@ -20,17 +20,17 @@ from io import StringIO
20
20
  from sys import stderr
21
21
 
22
22
  import pandas as pd
23
- from kubernetes import client
24
23
 
25
24
  import mlrun
26
25
  import mlrun.common.constants
26
+ import mlrun.common.constants as mlrun_constants
27
27
  import mlrun.common.schemas
28
28
  import mlrun.utils.regex
29
29
  from mlrun.artifacts import TableArtifact
30
+ from mlrun.common.runtimes.constants import RunLabels
30
31
  from mlrun.config import config
31
32
  from mlrun.errors import err_to_str
32
33
  from mlrun.frameworks.parallel_coordinates import gen_pcp_plot
33
- from mlrun.runtimes.constants import RunLabels
34
34
  from mlrun.runtimes.generators import selector
35
35
  from mlrun.utils import get_in, helpers, logger, verify_field_regex
36
36
 
@@ -39,9 +39,6 @@ class RunError(Exception):
39
39
  pass
40
40
 
41
41
 
42
- mlrun_key = "mlrun/"
43
-
44
-
45
42
  class _ContextStore:
46
43
  def __init__(self):
47
44
  self._context = None
@@ -280,43 +277,6 @@ def get_item_name(item, attr="name"):
280
277
  return getattr(item, attr, None)
281
278
 
282
279
 
283
- def apply_kfp(modify, cop, runtime):
284
- modify(cop)
285
-
286
- # Have to do it here to avoid circular dependencies
287
- from .pod import AutoMountType
288
-
289
- if AutoMountType.is_auto_modifier(modify):
290
- runtime.spec.disable_auto_mount = True
291
-
292
- api = client.ApiClient()
293
- for k, v in cop.pod_labels.items():
294
- runtime.metadata.labels[k] = v
295
- for k, v in cop.pod_annotations.items():
296
- runtime.metadata.annotations[k] = v
297
- if cop.container.env:
298
- env_names = [
299
- e.name if hasattr(e, "name") else e["name"] for e in runtime.spec.env
300
- ]
301
- for e in api.sanitize_for_serialization(cop.container.env):
302
- name = e["name"]
303
- if name in env_names:
304
- runtime.spec.env[env_names.index(name)] = e
305
- else:
306
- runtime.spec.env.append(e)
307
- env_names.append(name)
308
- cop.container.env.clear()
309
-
310
- if cop.volumes and cop.container.volume_mounts:
311
- vols = api.sanitize_for_serialization(cop.volumes)
312
- mounts = api.sanitize_for_serialization(cop.container.volume_mounts)
313
- runtime.spec.update_vols_and_mounts(vols, mounts)
314
- cop.volumes.clear()
315
- cop.container.volume_mounts.clear()
316
-
317
- return runtime
318
-
319
-
320
280
  def verify_limits(
321
281
  resources_field_name,
322
282
  mem=None,
@@ -410,10 +370,10 @@ def generate_resources(mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu")
410
370
 
411
371
 
412
372
  def get_func_selector(project, name=None, tag=None):
413
- s = [f"{mlrun_key}project={project}"]
373
+ s = [f"{mlrun_constants.MLRunInternalLabels.project}={project}"]
414
374
  if name:
415
- s.append(f"{mlrun_key}function={name}")
416
- s.append(f"{mlrun_key}tag={tag or 'latest'}")
375
+ s.append(f"{mlrun_constants.MLRunInternalLabels.function}={name}")
376
+ s.append(f"{mlrun_constants.MLRunInternalLabels.tag}={tag or 'latest'}")
417
377
  return s
418
378
 
419
379
 
@@ -476,6 +436,7 @@ def enrich_run_labels(
476
436
  ):
477
437
  labels_enrichment = {
478
438
  RunLabels.owner: os.environ.get("V3IO_USERNAME") or getpass.getuser(),
439
+ # TODO: remove this in 1.9.0
479
440
  RunLabels.v3io_user: os.environ.get("V3IO_USERNAME"),
480
441
  }
481
442
  labels_to_enrich = labels_to_enrich or RunLabels.all()
mlrun/serving/server.py CHANGED
@@ -387,7 +387,7 @@ def v2_serving_handler(context, event, get_body=False):
387
387
 
388
388
 
389
389
  def create_graph_server(
390
- parameters={},
390
+ parameters=None,
391
391
  load_mode=None,
392
392
  graph=None,
393
393
  verbose=False,
@@ -403,6 +403,7 @@ def create_graph_server(
403
403
  server.graph.add_route("my", class_name=MyModelClass, model_path="{path}", z=100)
404
404
  print(server.test("/v2/models/my/infer", testdata))
405
405
  """
406
+ parameters = parameters or {}
406
407
  server = GraphServer(graph, parameters, load_mode, verbose=verbose, **kwargs)
407
408
  server.set_current_function(
408
409
  current_function or os.environ.get("SERVING_CURRENT_FUNCTION", "")
@@ -528,7 +528,11 @@ def _init_endpoint_record(
528
528
  return None
529
529
 
530
530
  # Generating version model value based on the model name and model version
531
- if model.version:
531
+ if model.model_path and model.model_path.startswith("store://"):
532
+ # Enrich the model server with the model artifact metadata
533
+ model.get_model()
534
+ model.version = model.model_spec.tag
535
+ model.labels = model.model_spec.labels
532
536
  versioned_model_name = f"{model.name}:{model.version}"
533
537
  else:
534
538
  versioned_model_name = f"{model.name}:latest"
mlrun/track/tracker.py CHANGED
@@ -31,8 +31,9 @@ class Tracker(ABC):
31
31
  * Offline: Manually importing models and artifacts into an MLRun project using the `import_x` methods.
32
32
  """
33
33
 
34
+ @staticmethod
34
35
  @abstractmethod
35
- def is_enabled(self) -> bool:
36
+ def is_enabled() -> bool:
36
37
  """
37
38
  Checks if tracker is enabled.
38
39
 
mlrun/utils/async_http.py CHANGED
@@ -24,7 +24,7 @@ from aiohttp_retry import ExponentialRetry, RequestParams, RetryClient, RetryOpt
24
24
  from aiohttp_retry.client import _RequestContext
25
25
 
26
26
  from mlrun.config import config
27
- from mlrun.errors import err_to_str
27
+ from mlrun.errors import err_to_str, raise_for_status
28
28
 
29
29
  from .helpers import logger as mlrun_logger
30
30
 
@@ -46,12 +46,21 @@ class AsyncClientWithRetry(RetryClient):
46
46
  *args,
47
47
  **kwargs,
48
48
  ):
49
+ # do not retry on PUT / PATCH as they might have side effects (not truly idempotent)
50
+ blacklisted_methods = (
51
+ blacklisted_methods
52
+ if blacklisted_methods is not None
53
+ else [
54
+ "POST",
55
+ "PUT",
56
+ "PATCH",
57
+ ]
58
+ )
49
59
  super().__init__(
50
60
  *args,
51
61
  retry_options=ExponentialRetryOverride(
52
62
  retry_on_exception=retry_on_exception,
53
- # do not retry on PUT / PATCH as they might have side effects (not truly idempotent)
54
- blacklisted_methods=blacklisted_methods or ["POST", "PUT", "PATCH"],
63
+ blacklisted_methods=blacklisted_methods,
55
64
  attempts=max_retries,
56
65
  statuses=retry_on_status_codes,
57
66
  factor=retry_backoff_factor,
@@ -63,6 +72,12 @@ class AsyncClientWithRetry(RetryClient):
63
72
  **kwargs,
64
73
  )
65
74
 
75
+ def methods_blacklist_update_required(self, new_blacklist: str):
76
+ self._retry_options: ExponentialRetryOverride
77
+ return set(self._retry_options.blacklisted_methods).difference(
78
+ set(new_blacklist)
79
+ )
80
+
66
81
  def _make_requests(
67
82
  self,
68
83
  params_list: list[RequestParams],
@@ -173,7 +188,7 @@ class _CustomRequestContext(_RequestContext):
173
188
  last_attempt = current_attempt == self._retry_options.attempts
174
189
  if self._is_status_code_ok(response.status) or last_attempt:
175
190
  if self._raise_for_status:
176
- response.raise_for_status()
191
+ raise_for_status(response)
177
192
 
178
193
  self._response = response
179
194
  return response
@@ -275,6 +290,11 @@ class _CustomRequestContext(_RequestContext):
275
290
  if isinstance(exc.os_error, exc_type):
276
291
  return
277
292
  if exc.__cause__:
278
- return self.verify_exception_type(exc.__cause__)
293
+ # If the cause exception is retriable, return, otherwise, raise the original exception
294
+ try:
295
+ self.verify_exception_type(exc.__cause__)
296
+ except Exception:
297
+ raise exc
298
+ return
279
299
  else:
280
300
  raise exc
mlrun/utils/helpers.py CHANGED
@@ -39,7 +39,7 @@ import pandas
39
39
  import semver
40
40
  import yaml
41
41
  from dateutil import parser
42
- from deprecated import deprecated
42
+ from mlrun_pipelines.models import PipelineRun
43
43
  from pandas._libs.tslibs.timestamps import Timedelta, Timestamp
44
44
  from yaml.representer import RepresenterError
45
45
 
@@ -76,19 +76,6 @@ class OverwriteBuildParamsWarning(FutureWarning):
76
76
  pass
77
77
 
78
78
 
79
- # TODO: remove in 1.7.0
80
- @deprecated(
81
- version="1.5.0",
82
- reason="'parse_versioned_object_uri' will be removed from this file in 1.7.0, use "
83
- "'mlrun.common.helpers.parse_versioned_object_uri' instead",
84
- category=FutureWarning,
85
- )
86
- def parse_versioned_object_uri(uri: str, default_project: str = ""):
87
- return mlrun.common.helpers.parse_versioned_object_uri(
88
- uri=uri, default_project=default_project
89
- )
90
-
91
-
92
79
  class StorePrefix:
93
80
  """map mlrun store objects to prefixes"""
94
81
 
@@ -119,14 +106,9 @@ class StorePrefix:
119
106
 
120
107
 
121
108
  def get_artifact_target(item: dict, project=None):
122
- if is_legacy_artifact(item):
123
- db_key = item.get("db_key")
124
- project_str = project or item.get("project")
125
- tree = item.get("tree")
126
- else:
127
- db_key = item["spec"].get("db_key")
128
- project_str = project or item["metadata"].get("project")
129
- tree = item["metadata"].get("tree")
109
+ db_key = item["spec"].get("db_key")
110
+ project_str = project or item["metadata"].get("project")
111
+ tree = item["metadata"].get("tree")
130
112
 
131
113
  kind = item.get("kind")
132
114
  if kind in ["dataset", "model", "artifact"] and db_key:
@@ -135,11 +117,15 @@ def get_artifact_target(item: dict, project=None):
135
117
  target = f"{target}@{tree}"
136
118
  return target
137
119
 
138
- return (
139
- item.get("target_path")
140
- if is_legacy_artifact(item)
141
- else item["spec"].get("target_path")
142
- )
120
+ return item["spec"].get("target_path")
121
+
122
+
123
+ # TODO: left for migrations testing purposes. Remove in 1.8.0.
124
+ def is_legacy_artifact(artifact):
125
+ if isinstance(artifact, dict):
126
+ return "metadata" not in artifact
127
+ else:
128
+ return not hasattr(artifact, "metadata")
143
129
 
144
130
 
145
131
  logger = create_logger(config.log_level, config.log_formatter, "mlrun", sys.stdout)
@@ -195,8 +181,12 @@ def verify_field_regex(
195
181
  )
196
182
  if mode == mlrun.common.schemas.RegexMatchModes.all:
197
183
  if raise_on_failure:
184
+ if len(field_name) > max_chars:
185
+ field_name = field_name[:max_chars] + "...truncated"
186
+ if len(field_value) > max_chars:
187
+ field_value = field_value[:max_chars] + "...truncated"
198
188
  raise mlrun.errors.MLRunInvalidArgumentError(
199
- f"Field '{field_name[:max_chars]}' is malformed. '{field_value[:max_chars]}' "
189
+ f"Field '{field_name}' is malformed. '{field_value}' "
200
190
  f"does not match required pattern: {pattern}"
201
191
  )
202
192
  return False
@@ -669,7 +659,7 @@ def parse_artifact_uri(uri, default_project=""):
669
659
  [3] = tag
670
660
  [4] = tree
671
661
  """
672
- uri_pattern = r"^((?P<project>.*)/)?(?P<key>.*?)(\#(?P<iteration>.*?))?(:(?P<tag>.*?))?(@(?P<tree>.*))?$"
662
+ uri_pattern = mlrun.utils.regex.artifact_uri_pattern
673
663
  match = re.match(uri_pattern, uri)
674
664
  if not match:
675
665
  raise ValueError(
@@ -801,34 +791,6 @@ def gen_html_table(header, rows=None):
801
791
  return style + '<table class="tg">\n' + out + "</table>\n\n"
802
792
 
803
793
 
804
- def new_pipe_metadata(
805
- artifact_path: str = None,
806
- cleanup_ttl: int = None,
807
- op_transformers: list[typing.Callable] = None,
808
- ):
809
- from kfp.dsl import PipelineConf
810
-
811
- def _set_artifact_path(task):
812
- from kubernetes import client as k8s_client
813
-
814
- task.add_env_variable(
815
- k8s_client.V1EnvVar(name="MLRUN_ARTIFACT_PATH", value=artifact_path)
816
- )
817
- return task
818
-
819
- conf = PipelineConf()
820
- cleanup_ttl = cleanup_ttl or int(config.kfp_ttl)
821
-
822
- if cleanup_ttl:
823
- conf.set_ttl_seconds_after_finished(cleanup_ttl)
824
- if artifact_path:
825
- conf.add_op_transformer(_set_artifact_path)
826
- if op_transformers:
827
- for op_transformer in op_transformers:
828
- conf.add_op_transformer(op_transformer)
829
- return conf
830
-
831
-
832
794
  def _convert_python_package_version_to_image_tag(version: typing.Optional[str]):
833
795
  return (
834
796
  version.replace("+", "-").replace("0.0.0-", "") if version is not None else None
@@ -1015,17 +977,27 @@ def get_ui_url(project, uid=None):
1015
977
  return url
1016
978
 
1017
979
 
980
+ def get_model_endpoint_url(project, model_name, model_endpoint_id):
981
+ url = ""
982
+ if mlrun.mlconf.resolve_ui_url():
983
+ url = f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}/{project}/models"
984
+ if model_name:
985
+ url += f"/model-endpoints/{model_name}/{model_endpoint_id}/overview"
986
+ return url
987
+
988
+
1018
989
  def get_workflow_url(project, id=None):
1019
990
  url = ""
1020
991
  if mlrun.mlconf.resolve_ui_url():
1021
- url = "{}/{}/{}/jobs/monitor-workflows/workflow/{}".format(
1022
- mlrun.mlconf.resolve_ui_url(), mlrun.mlconf.ui.projects_prefix, project, id
992
+ url = (
993
+ f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}"
994
+ f"/{project}/jobs/monitor-workflows/workflow/{id}"
1023
995
  )
1024
996
  return url
1025
997
 
1026
998
 
1027
999
  def are_strings_in_exception_chain_messages(
1028
- exception: Exception, strings_list=list[str]
1000
+ exception: Exception, strings_list: list[str]
1029
1001
  ) -> bool:
1030
1002
  while exception is not None:
1031
1003
  if any([string in str(exception) for string in strings_list]):
@@ -1138,7 +1110,7 @@ def get_function(function, namespace):
1138
1110
 
1139
1111
 
1140
1112
  def get_handler_extended(
1141
- handler_path: str, context=None, class_args: dict = {}, namespaces=None
1113
+ handler_path: str, context=None, class_args: dict = None, namespaces=None
1142
1114
  ):
1143
1115
  """get function handler from [class_name::]handler string
1144
1116
 
@@ -1148,6 +1120,7 @@ def get_handler_extended(
1148
1120
  :param namespaces: one or list of namespaces/modules to search the handler in
1149
1121
  :return: function handler (callable)
1150
1122
  """
1123
+ class_args = class_args or {}
1151
1124
  if "::" not in handler_path:
1152
1125
  return get_function(handler_path, namespaces)
1153
1126
 
@@ -1224,7 +1197,7 @@ def calculate_dataframe_hash(dataframe: pandas.DataFrame):
1224
1197
  return hashlib.sha1(pandas.util.hash_pandas_object(dataframe).values).hexdigest()
1225
1198
 
1226
1199
 
1227
- def template_artifact_path(artifact_path, project, run_uid="project"):
1200
+ def template_artifact_path(artifact_path, project, run_uid=None):
1228
1201
  """
1229
1202
  Replace {{run.uid}} with the run uid and {{project}} with the project name in the artifact path.
1230
1203
  If no run uid is provided, the word `project` will be used instead as it is assumed to be a project
@@ -1232,6 +1205,7 @@ def template_artifact_path(artifact_path, project, run_uid="project"):
1232
1205
  """
1233
1206
  if not artifact_path:
1234
1207
  return artifact_path
1208
+ run_uid = run_uid or "project"
1235
1209
  artifact_path = artifact_path.replace("{{run.uid}}", run_uid)
1236
1210
  artifact_path = _fill_project_path_template(artifact_path, project)
1237
1211
  return artifact_path
@@ -1291,13 +1265,6 @@ def str_to_timestamp(time_str: str, now_time: Timestamp = None):
1291
1265
  return Timestamp(time_str)
1292
1266
 
1293
1267
 
1294
- def is_legacy_artifact(artifact):
1295
- if isinstance(artifact, dict):
1296
- return "metadata" not in artifact
1297
- else:
1298
- return not hasattr(artifact, "metadata")
1299
-
1300
-
1301
1268
  def is_link_artifact(artifact):
1302
1269
  if isinstance(artifact, dict):
1303
1270
  return (
@@ -1307,7 +1274,7 @@ def is_link_artifact(artifact):
1307
1274
  return artifact.kind == mlrun.common.schemas.ArtifactCategories.link.value
1308
1275
 
1309
1276
 
1310
- def format_run(run: dict, with_project=False) -> dict:
1277
+ def format_run(run: PipelineRun, with_project=False) -> dict:
1311
1278
  fields = [
1312
1279
  "id",
1313
1280
  "name",
@@ -1344,17 +1311,17 @@ def format_run(run: dict, with_project=False) -> dict:
1344
1311
  # pipelines are yet to populate the status or workflow has failed
1345
1312
  # as observed https://jira.iguazeng.com/browse/ML-5195
1346
1313
  # set to unknown to ensure a status is returned
1347
- if run["status"] is None:
1348
- run["status"] = inflection.titleize(mlrun.runtimes.constants.RunStates.unknown)
1314
+ if run.get("status", None) is None:
1315
+ run["status"] = inflection.titleize(
1316
+ mlrun.common.runtimes.constants.RunStates.unknown
1317
+ )
1349
1318
 
1350
1319
  return run
1351
1320
 
1352
1321
 
1353
1322
  def get_in_artifact(artifact: dict, key, default=None, raise_on_missing=False):
1354
1323
  """artifact can be dict or Artifact object"""
1355
- if is_legacy_artifact(artifact):
1356
- return artifact.get(key, default)
1357
- elif key == "kind":
1324
+ if key == "kind":
1358
1325
  return artifact.get(key, default)
1359
1326
  else:
1360
1327
  for block in ["metadata", "spec", "status"]:
@@ -1596,3 +1563,68 @@ def get_serving_spec():
1596
1563
  )
1597
1564
  spec = json.loads(data)
1598
1565
  return spec
1566
+
1567
+
1568
+ def additional_filters_warning(additional_filters, class_name):
1569
+ if additional_filters and any(additional_filters):
1570
+ mlrun.utils.logger.warn(
1571
+ f"additional_filters parameter is not supported in {class_name},"
1572
+ f" parameter has been ignored."
1573
+ )
1574
+
1575
+
1576
+ def validate_component_version_compatibility(
1577
+ component_name: typing.Literal["iguazio", "nuclio"], *min_versions: str
1578
+ ):
1579
+ """
1580
+ :param component_name: Name of the component to validate compatibility for.
1581
+ :param min_versions: Valid minimum version(s) required, assuming no 2 versions has equal major and minor.
1582
+ """
1583
+ parsed_min_versions = [
1584
+ semver.VersionInfo.parse(min_version) for min_version in min_versions
1585
+ ]
1586
+ parsed_current_version = None
1587
+ component_current_version = None
1588
+ try:
1589
+ if component_name == "iguazio":
1590
+ component_current_version = mlrun.mlconf.igz_version
1591
+ parsed_current_version = mlrun.mlconf.get_parsed_igz_version()
1592
+
1593
+ if parsed_current_version:
1594
+ # ignore pre-release and build metadata, as iguazio version always has them, and we only care about the
1595
+ # major, minor, and patch versions
1596
+ parsed_current_version = semver.VersionInfo.parse(
1597
+ f"{parsed_current_version.major}.{parsed_current_version.minor}.{parsed_current_version.patch}"
1598
+ )
1599
+ if component_name == "nuclio":
1600
+ component_current_version = mlrun.mlconf.nuclio_version
1601
+ parsed_current_version = semver.VersionInfo.parse(
1602
+ mlrun.mlconf.nuclio_version
1603
+ )
1604
+ if not parsed_current_version:
1605
+ return True
1606
+ except ValueError:
1607
+ # only log when version is set but invalid
1608
+ if component_current_version:
1609
+ logger.warning(
1610
+ "Unable to parse current version, assuming compatibility",
1611
+ component_name=component_name,
1612
+ current_version=component_current_version,
1613
+ min_versions=min_versions,
1614
+ )
1615
+ return True
1616
+
1617
+ parsed_min_versions.sort(reverse=True)
1618
+ for parsed_min_version in parsed_min_versions:
1619
+ if parsed_current_version < parsed_min_version:
1620
+ return False
1621
+ return True
1622
+
1623
+
1624
+ def format_alert_summary(
1625
+ alert: mlrun.common.schemas.AlertConfig, event_data: mlrun.common.schemas.Event
1626
+ ) -> str:
1627
+ result = alert.summary.replace("{{project}}", alert.project)
1628
+ result = result.replace("{{name}}", alert.name)
1629
+ result = result.replace("{{entity}}", event_data.entity.ids[0])
1630
+ return result
mlrun/utils/logger.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import logging
16
+ import typing
16
17
  from enum import Enum
17
18
  from sys import stdout
18
19
  from traceback import format_exception
@@ -92,7 +93,25 @@ class HumanReadableFormatter(_BaseFormatter):
92
93
 
93
94
  class HumanReadableExtendedFormatter(HumanReadableFormatter):
94
95
  def format(self, record) -> str:
95
- more = self._resolve_more(record)
96
+ more = ""
97
+ record_with = self._record_with(record)
98
+ if record_with:
99
+
100
+ def _format_value(val):
101
+ formatted_val = (
102
+ val
103
+ if isinstance(val, str)
104
+ else str(orjson.loads(self._json_dump(val)))
105
+ )
106
+ return (
107
+ formatted_val.replace("\n", "\n\t\t")
108
+ if len(formatted_val) < 4096
109
+ else repr(formatted_val)
110
+ )
111
+
112
+ more = "\n\t" + "\n\t".join(
113
+ [f"{key}: {_format_value(val)}" for key, val in record_with.items()]
114
+ )
96
115
  return (
97
116
  "> "
98
117
  f"{self.formatTime(record, self.datefmt)} "
@@ -221,14 +240,27 @@ class FormatterKinds(Enum):
221
240
  JSON = "json"
222
241
 
223
242
 
224
- def create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter:
243
+ def resolve_formatter_by_kind(
244
+ formatter_kind: FormatterKinds,
245
+ ) -> type[
246
+ typing.Union[HumanReadableFormatter, HumanReadableExtendedFormatter, JSONFormatter]
247
+ ]:
225
248
  return {
226
- FormatterKinds.HUMAN: HumanReadableFormatter(),
227
- FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter(),
228
- FormatterKinds.JSON: JSONFormatter(),
249
+ FormatterKinds.HUMAN: HumanReadableFormatter,
250
+ FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter,
251
+ FormatterKinds.JSON: JSONFormatter,
229
252
  }[formatter_kind]
230
253
 
231
254
 
255
+ def create_test_logger(name: str = "mlrun", stream: IO[str] = stdout) -> Logger:
256
+ return create_logger(
257
+ level="debug",
258
+ formatter_kind=FormatterKinds.HUMAN_EXTENDED.name,
259
+ name=name,
260
+ stream=stream,
261
+ )
262
+
263
+
232
264
  def create_logger(
233
265
  level: Optional[str] = None,
234
266
  formatter_kind: str = FormatterKinds.HUMAN.name,
@@ -243,11 +275,11 @@ def create_logger(
243
275
  logger_instance = Logger(level, name=name, propagate=False)
244
276
 
245
277
  # resolve formatter
246
- formatter_instance = create_formatter_instance(
278
+ formatter_instance = resolve_formatter_by_kind(
247
279
  FormatterKinds(formatter_kind.lower())
248
280
  )
249
281
 
250
282
  # set handler
251
- logger_instance.set_handler("default", stream or stdout, formatter_instance)
283
+ logger_instance.set_handler("default", stream or stdout, formatter_instance())
252
284
 
253
285
  return logger_instance
@@ -51,14 +51,19 @@ class NotificationTypes(str, enum.Enum):
51
51
  self.console: [self.ipython],
52
52
  }.get(self, [])
53
53
 
54
+ @classmethod
55
+ def local(cls) -> list[str]:
56
+ return [
57
+ cls.console,
58
+ cls.ipython,
59
+ ]
60
+
54
61
  @classmethod
55
62
  def all(cls) -> list[str]:
56
- return list(
57
- [
58
- cls.console,
59
- cls.git,
60
- cls.ipython,
61
- cls.slack,
62
- cls.webhook,
63
- ]
64
- )
63
+ return [
64
+ cls.console,
65
+ cls.git,
66
+ cls.ipython,
67
+ cls.slack,
68
+ cls.webhook,
69
+ ]
@@ -77,7 +77,7 @@ class NotificationBase:
77
77
  return f"[{severity}] {message}"
78
78
  return (
79
79
  f"[{severity}] {message} for project {alert.project} "
80
- f"UID {event_data.entity.id}. Value {event_data.value}"
80
+ f"UID {event_data.entity.ids[0]}. Values {event_data.value_dict}"
81
81
  )
82
82
 
83
83
  if not runs: