mlrun 1.7.0rc33__py3-none-any.whl → 1.7.0rc35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (44) hide show
  1. mlrun/artifacts/base.py +1 -0
  2. mlrun/common/schemas/__init__.py +1 -1
  3. mlrun/common/schemas/common.py +3 -0
  4. mlrun/common/schemas/function.py +7 -0
  5. mlrun/common/schemas/model_monitoring/__init__.py +1 -2
  6. mlrun/common/schemas/model_monitoring/constants.py +3 -16
  7. mlrun/common/schemas/notification.py +1 -1
  8. mlrun/common/schemas/project.py +35 -3
  9. mlrun/common/types.py +1 -0
  10. mlrun/config.py +6 -7
  11. mlrun/datastore/sources.py +8 -4
  12. mlrun/db/base.py +7 -5
  13. mlrun/db/httpdb.py +10 -8
  14. mlrun/execution.py +1 -3
  15. mlrun/model.py +143 -23
  16. mlrun/model_monitoring/applications/context.py +13 -15
  17. mlrun/model_monitoring/applications/evidently_base.py +4 -5
  18. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +5 -0
  19. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +2 -2
  20. mlrun/model_monitoring/db/tsdb/base.py +6 -3
  21. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -3
  22. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +22 -3
  23. mlrun/model_monitoring/stream_processing.py +5 -153
  24. mlrun/projects/pipelines.py +76 -73
  25. mlrun/projects/project.py +7 -1
  26. mlrun/run.py +26 -9
  27. mlrun/runtimes/nuclio/api_gateway.py +22 -6
  28. mlrun/runtimes/nuclio/application/application.py +62 -11
  29. mlrun/runtimes/nuclio/function.py +8 -0
  30. mlrun/runtimes/nuclio/serving.py +6 -6
  31. mlrun/runtimes/pod.py +2 -4
  32. mlrun/serving/server.py +12 -7
  33. mlrun/serving/states.py +16 -2
  34. mlrun/utils/db.py +3 -0
  35. mlrun/utils/helpers.py +30 -19
  36. mlrun/utils/notifications/notification/webhook.py +8 -1
  37. mlrun/utils/version/version.json +2 -2
  38. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/METADATA +4 -2
  39. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/RECORD +43 -44
  40. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/WHEEL +1 -1
  41. mlrun/model_monitoring/prometheus.py +0 -216
  42. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/LICENSE +0 -0
  43. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/entry_points.txt +0 -0
  44. {mlrun-1.7.0rc33.dist-info → mlrun-1.7.0rc35.dist-info}/top_level.txt +0 -0
mlrun/run.py CHANGED
@@ -21,6 +21,7 @@ import tempfile
21
21
  import time
22
22
  import typing
23
23
  import uuid
24
+ import warnings
24
25
  from base64 import b64decode
25
26
  from copy import deepcopy
26
27
  from os import environ, makedirs, path
@@ -196,18 +197,19 @@ def load_func_code(command="", workdir=None, secrets=None, name="name"):
196
197
  def get_or_create_ctx(
197
198
  name: str,
198
199
  event=None,
199
- spec=None,
200
+ spec: Optional[dict] = None,
200
201
  with_env: bool = True,
201
202
  rundb: str = "",
202
203
  project: str = "",
203
- upload_artifacts=False,
204
+ upload_artifacts: bool = False,
204
205
  labels: Optional[dict] = None,
205
206
  ) -> MLClientCtx:
206
- """called from within the user program to obtain a run context
207
+ """
208
+ Called from within the user program to obtain a run context.
207
209
 
208
- the run context is an interface for receiving parameters, data and logging
210
+ The run context is an interface for receiving parameters, data and logging
209
211
  run results, the run context is read from the event, spec, or environment
210
- (in that order), user can also work without a context (local defaults mode)
212
+ (in that order), user can also work without a context (local defaults mode).
211
213
 
212
214
  all results are automatically stored in the "rundb" or artifact store,
213
215
  the path to the rundb can be specified in the call or obtained from env.
@@ -220,7 +222,7 @@ def get_or_create_ctx(
220
222
  :param project: project to initiate the context in (by default `mlrun.mlconf.default_project`)
221
223
  :param upload_artifacts: when using local context (not as part of a job/run), upload artifacts to the
222
224
  system default artifact path location
223
- :param labels: dict of the context labels
225
+ :param labels: (deprecated - use spec instead) dict of the context labels.
224
226
  :return: execution context
225
227
 
226
228
  Examples::
@@ -253,6 +255,20 @@ def get_or_create_ctx(
253
255
  context.log_artifact("results.html", body=b"<b> Some HTML <b>", viewer="web-app")
254
256
 
255
257
  """
258
+ if labels:
259
+ warnings.warn(
260
+ "The `labels` argument is deprecated and will be removed in 1.9.0. "
261
+ "Please use `spec` instead, e.g.:\n"
262
+ "spec={'metadata': {'labels': {'key': 'value'}}}",
263
+ FutureWarning,
264
+ )
265
+ if spec is None:
266
+ spec = {}
267
+ if "metadata" not in spec:
268
+ spec["metadata"] = {}
269
+ if "labels" not in spec["metadata"]:
270
+ spec["metadata"]["labels"] = {}
271
+ spec["metadata"]["labels"].update(labels)
256
272
 
257
273
  if global_context.get() and not spec and not event:
258
274
  return global_context.get()
@@ -306,9 +322,6 @@ def get_or_create_ctx(
306
322
  ctx = MLClientCtx.from_dict(
307
323
  newspec, rundb=out, autocommit=autocommit, tmp=tmp, host=socket.gethostname()
308
324
  )
309
- labels = labels or {}
310
- for key, val in labels.items():
311
- ctx.set_label(key=key, value=val)
312
325
  global_context.set(ctx)
313
326
  return ctx
314
327
 
@@ -778,6 +791,10 @@ def code_to_function(
778
791
  raise ValueError("code_output option is only used with notebooks")
779
792
 
780
793
  if is_nuclio:
794
+ mlrun.utils.helpers.validate_single_def_handler(
795
+ function_kind=sub_kind, code=code
796
+ )
797
+
781
798
  runtime = RuntimeKinds.resolve_nuclio_runtime(kind, sub_kind)
782
799
  # default_handler is only used in :mlrun sub kind, determine the handler to invoke in function.run()
783
800
  runtime.spec.default_handler = handler if sub_kind == "mlrun" else ""
@@ -326,6 +326,11 @@ class APIGatewaySpec(ModelObj):
326
326
  return function_names
327
327
 
328
328
 
329
+ class APIGatewayStatus(ModelObj):
330
+ def __init__(self, state: Optional[schemas.APIGatewayState] = None):
331
+ self.state = state or schemas.APIGatewayState.none
332
+
333
+
329
334
  class APIGateway(ModelObj):
330
335
  _dict_fields = [
331
336
  "metadata",
@@ -338,16 +343,18 @@ class APIGateway(ModelObj):
338
343
  self,
339
344
  metadata: APIGatewayMetadata,
340
345
  spec: APIGatewaySpec,
346
+ status: Optional[APIGatewayStatus] = None,
341
347
  ):
342
348
  """
343
349
  Initialize the APIGateway instance.
344
350
 
345
351
  :param metadata: (APIGatewayMetadata) The metadata of the API gateway.
346
352
  :param spec: (APIGatewaySpec) The spec of the API gateway.
353
+ :param status: (APIGatewayStatus) The status of the API gateway.
347
354
  """
348
355
  self.metadata = metadata
349
356
  self.spec = spec
350
- self.state = ""
357
+ self.status = status
351
358
 
352
359
  @property
353
360
  def metadata(self) -> APIGatewayMetadata:
@@ -365,6 +372,14 @@ class APIGateway(ModelObj):
365
372
  def spec(self, spec):
366
373
  self._spec = self._verify_dict(spec, "spec", APIGatewaySpec)
367
374
 
375
+ @property
376
+ def status(self) -> APIGatewayStatus:
377
+ return self._status
378
+
379
+ @status.setter
380
+ def status(self, status):
381
+ self._status = self._verify_dict(status, "status", APIGatewayStatus)
382
+
368
383
  def invoke(
369
384
  self,
370
385
  method="POST",
@@ -394,7 +409,7 @@ class APIGateway(ModelObj):
394
409
  )
395
410
  if not self.is_ready():
396
411
  raise mlrun.errors.MLRunPreconditionFailedError(
397
- f"API gateway is not ready. " f"Current state: {self.state}"
412
+ f"API gateway is not ready. " f"Current state: {self.status.state}"
398
413
  )
399
414
 
400
415
  auth = None
@@ -459,10 +474,10 @@ class APIGateway(ModelObj):
459
474
  )
460
475
 
461
476
  def is_ready(self):
462
- if self.state is not schemas.api_gateway.APIGatewayState.ready:
477
+ if self.status.state is not schemas.api_gateway.APIGatewayState.ready:
463
478
  # try to sync the state
464
479
  self.sync()
465
- return self.state == schemas.api_gateway.APIGatewayState.ready
480
+ return self.status.state == schemas.api_gateway.APIGatewayState.ready
466
481
 
467
482
  def sync(self):
468
483
  """
@@ -479,7 +494,7 @@ class APIGateway(ModelObj):
479
494
  self.spec.functions = synced_gateway.spec.functions
480
495
  self.spec.canary = synced_gateway.spec.canary
481
496
  self.spec.description = synced_gateway.spec.description
482
- self.state = synced_gateway.state
497
+ self.status.state = synced_gateway.status.state
483
498
 
484
499
  def with_basic_auth(self, username: str, password: str):
485
500
  """
@@ -580,8 +595,8 @@ class APIGateway(ModelObj):
580
595
  functions=functions,
581
596
  canary=canary,
582
597
  ),
598
+ status=APIGatewayStatus(state=state),
583
599
  )
584
- new_api_gateway.state = state
585
600
  return new_api_gateway
586
601
 
587
602
  def to_scheme(self) -> schemas.APIGateway:
@@ -625,6 +640,7 @@ class APIGateway(ModelObj):
625
640
  ),
626
641
  upstreams=upstreams,
627
642
  ),
643
+ status=schemas.APIGatewayStatus(state=self.status.state),
628
644
  )
629
645
  api_gateway.spec.authentication = self.spec.authentication.to_scheme()
630
646
  return api_gateway
@@ -27,7 +27,7 @@ from mlrun.runtimes.nuclio.api_gateway import (
27
27
  APIGatewaySpec,
28
28
  )
29
29
  from mlrun.runtimes.nuclio.function import NuclioSpec, NuclioStatus
30
- from mlrun.utils import logger
30
+ from mlrun.utils import logger, update_in
31
31
 
32
32
 
33
33
  class ApplicationSpec(NuclioSpec):
@@ -149,6 +149,7 @@ class ApplicationStatus(NuclioStatus):
149
149
  build_pod=None,
150
150
  container_image=None,
151
151
  application_image=None,
152
+ application_source=None,
152
153
  sidecar_name=None,
153
154
  api_gateway_name=None,
154
155
  api_gateway=None,
@@ -164,6 +165,7 @@ class ApplicationStatus(NuclioStatus):
164
165
  container_image=container_image,
165
166
  )
166
167
  self.application_image = application_image or None
168
+ self.application_source = application_source or None
167
169
  self.sidecar_name = sidecar_name or None
168
170
  self.api_gateway_name = api_gateway_name or None
169
171
  self.api_gateway = api_gateway or None
@@ -291,7 +293,7 @@ class ApplicationRuntime(RemoteRuntime):
291
293
 
292
294
  :return: True if the function is ready (deployed)
293
295
  """
294
- if self.requires_build() or force_build:
296
+ if (self.requires_build() and not self.spec.image) or force_build:
295
297
  self._fill_credentials()
296
298
  self._build_application_image(
297
299
  builder_env=builder_env,
@@ -317,15 +319,25 @@ class ApplicationRuntime(RemoteRuntime):
317
319
  )
318
320
 
319
321
  super().deploy(
320
- project,
321
- tag,
322
- verbose,
323
- auth_info,
324
- builder_env,
322
+ project=project,
323
+ tag=tag,
324
+ verbose=verbose,
325
+ auth_info=auth_info,
326
+ builder_env=builder_env,
327
+ )
328
+ logger.info(
329
+ "Successfully deployed function, creating API gateway",
330
+ api_gateway_name=self.status.api_gateway_name,
331
+ authentication_mode=authentication_mode,
325
332
  )
326
333
 
334
+ # Restore the source in case it was removed to make nuclio not consider it when building
335
+ if not self.spec.build.source and self.status.application_source:
336
+ self.spec.build.source = self.status.application_source
337
+ self.save(versioned=False)
338
+
327
339
  ports = self.spec.internal_application_port if direct_port_access else []
328
- self.create_api_gateway(
340
+ return self.create_api_gateway(
329
341
  name=self.status.api_gateway_name,
330
342
  ports=ports,
331
343
  authentication_mode=authentication_mode,
@@ -354,6 +366,31 @@ class ApplicationRuntime(RemoteRuntime):
354
366
  target_dir=target_dir,
355
367
  )
356
368
 
369
+ def from_image(self, image):
370
+ """
371
+ Deploy the function with an existing nuclio processor image.
372
+ This applies only for the reverse proxy and not the application image.
373
+
374
+ :param image: image name
375
+ """
376
+ super().from_image(image)
377
+ # nuclio implementation detail - when providing the image and emptying out the source code and build source,
378
+ # nuclio skips rebuilding the image and simply takes the prebuilt image
379
+ self.spec.build.functionSourceCode = ""
380
+ self.status.application_source = self.spec.build.source
381
+ self.spec.build.source = ""
382
+
383
+ # save the image in the status, so we won't repopulate the function source code
384
+ self.status.container_image = image
385
+
386
+ # ensure golang runtime and handler for the reverse proxy
387
+ self.spec.nuclio_runtime = "golang"
388
+ update_in(
389
+ self.spec.base_spec,
390
+ "spec.handler",
391
+ "main:Handler",
392
+ )
393
+
357
394
  @classmethod
358
395
  def get_filename_and_handler(cls) -> (str, str):
359
396
  reverse_proxy_file_path = pathlib.Path(__file__).parent / "reverse_proxy.go"
@@ -409,6 +446,9 @@ class ApplicationRuntime(RemoteRuntime):
409
446
  self.status.api_gateway.wait_for_readiness()
410
447
  self.url = self.status.api_gateway.invoke_url
411
448
 
449
+ logger.info("Successfully created API gateway", url=self.url)
450
+ return self.url
451
+
412
452
  def invoke(
413
453
  self,
414
454
  path: str,
@@ -448,6 +488,14 @@ class ApplicationRuntime(RemoteRuntime):
448
488
  **http_client_kwargs,
449
489
  )
450
490
 
491
+ def _run(self, runobj: "mlrun.RunObject", execution):
492
+ raise mlrun.runtimes.RunError(
493
+ "Application runtime .run() is not yet supported. Use .invoke() instead."
494
+ )
495
+
496
+ def _enrich_command_from_status(self):
497
+ pass
498
+
451
499
  def _build_application_image(
452
500
  self,
453
501
  builder_env: dict = None,
@@ -506,9 +554,6 @@ class ApplicationRuntime(RemoteRuntime):
506
554
 
507
555
  if self.status.container_image:
508
556
  self.from_image(self.status.container_image)
509
- # nuclio implementation detail - when providing the image and emptying out the source code,
510
- # nuclio skips rebuilding the image and simply takes the prebuilt image
511
- self.spec.build.functionSourceCode = ""
512
557
 
513
558
  self.status.sidecar_name = f"{self.metadata.name}-sidecar"
514
559
  self.with_sidecar(
@@ -521,6 +566,12 @@ class ApplicationRuntime(RemoteRuntime):
521
566
  self.set_env("SIDECAR_PORT", self.spec.internal_application_port)
522
567
  self.set_env("SIDECAR_HOST", "http://localhost")
523
568
 
569
+ # configure the sidecar container as the default container for logging purposes
570
+ self.set_config(
571
+ "metadata.annotations",
572
+ {"kubectl.kubernetes.io/default-container": self.status.sidecar_name},
573
+ )
574
+
524
575
  def _sync_api_gateway(self):
525
576
  if not self.status.api_gateway_name:
526
577
  return
@@ -446,6 +446,11 @@ class RemoteRuntime(KubeResource):
446
446
  return self
447
447
 
448
448
  def from_image(self, image):
449
+ """
450
+ Deploy the function with an existing nuclio processor image.
451
+
452
+ :param image: image name
453
+ """
449
454
  config = nuclio.config.new_config()
450
455
  update_in(
451
456
  config,
@@ -568,6 +573,9 @@ class RemoteRuntime(KubeResource):
568
573
  # this also means that the function object will be updated with the function status
569
574
  self._wait_for_function_deployment(db, verbose=verbose)
570
575
 
576
+ return self._enrich_command_from_status()
577
+
578
+ def _enrich_command_from_status(self):
571
579
  # NOTE: on older mlrun versions & nuclio versions, function are exposed via NodePort
572
580
  # now, functions can be not exposed (using service type ClusterIP) and hence
573
581
  # for BC we first try to populate the external invocation url, and then
@@ -325,12 +325,12 @@ class ServingRuntime(RemoteRuntime):
325
325
  :param enable_tracking: Enabled/Disable model-monitoring tracking.
326
326
  Default True (tracking enabled).
327
327
 
328
- example::
328
+ Example::
329
329
 
330
- # initialize a new serving function
331
- serving_fn = mlrun.import_function("hub://v2-model-server", new_name="serving")
332
- # apply model monitoring
333
- serving_fn.set_tracking()
330
+ # initialize a new serving function
331
+ serving_fn = mlrun.import_function("hub://v2-model-server", new_name="serving")
332
+ # apply model monitoring
333
+ serving_fn.set_tracking()
334
334
 
335
335
  """
336
336
  # Applying model monitoring configurations
@@ -480,7 +480,7 @@ class ServingRuntime(RemoteRuntime):
480
480
  trigger_args = stream.trigger_args or {}
481
481
 
482
482
  engine = self.spec.graph.engine or "async"
483
- if mlrun.mlconf.is_explicit_ack() and engine == "async":
483
+ if mlrun.mlconf.is_explicit_ack_enabled() and engine == "async":
484
484
  trigger_args["explicit_ack_mode"] = trigger_args.get(
485
485
  "explicit_ack_mode", "explicitOnly"
486
486
  )
mlrun/runtimes/pod.py CHANGED
@@ -215,9 +215,7 @@ class KubeResourceSpec(FunctionSpec):
215
215
  image_pull_secret or mlrun.mlconf.function.spec.image_pull_secret.default
216
216
  )
217
217
  self.node_name = node_name
218
- self.node_selector = (
219
- node_selector or mlrun.mlconf.get_default_function_node_selector()
220
- )
218
+ self.node_selector = node_selector or {}
221
219
  self._affinity = affinity
222
220
  self.priority_class_name = (
223
221
  priority_class_name or mlrun.mlconf.default_function_priority_class_name
@@ -532,7 +530,7 @@ class KubeResourceSpec(FunctionSpec):
532
530
  return
533
531
 
534
532
  # merge node selectors - precedence to existing node selector
535
- self.node_selector = mlrun.utils.helpers.merge_with_precedence(
533
+ self.node_selector = mlrun.utils.helpers.merge_dicts_with_precedence(
536
534
  node_selector, self.node_selector
537
535
  )
538
536
 
mlrun/serving/server.py CHANGED
@@ -321,9 +321,9 @@ def v2_serving_init(context, namespace=None):
321
321
  server.http_trigger = getattr(context.trigger, "kind", "http") == "http"
322
322
  context.logger.info_with(
323
323
  "Setting current function",
324
- current_functiton=os.environ.get("SERVING_CURRENT_FUNCTION", ""),
324
+ current_function=os.getenv("SERVING_CURRENT_FUNCTION", ""),
325
325
  )
326
- server.set_current_function(os.environ.get("SERVING_CURRENT_FUNCTION", ""))
326
+ server.set_current_function(os.getenv("SERVING_CURRENT_FUNCTION", ""))
327
327
  context.logger.info_with(
328
328
  "Initializing states", namespace=namespace or get_caller_globals()
329
329
  )
@@ -344,9 +344,14 @@ def v2_serving_init(context, namespace=None):
344
344
  if server.verbose:
345
345
  context.logger.info(server.to_yaml())
346
346
 
347
- if hasattr(context, "platform") and hasattr(
348
- context.platform, "set_termination_callback"
349
- ):
347
+ _set_callbacks(server, context)
348
+
349
+
350
+ def _set_callbacks(server, context):
351
+ if not server.graph.supports_termination() or not hasattr(context, "platform"):
352
+ return
353
+
354
+ if hasattr(context.platform, "set_termination_callback"):
350
355
  context.logger.info(
351
356
  "Setting termination callback to terminate graph on worker shutdown"
352
357
  )
@@ -358,7 +363,7 @@ def v2_serving_init(context, namespace=None):
358
363
 
359
364
  context.platform.set_termination_callback(termination_callback)
360
365
 
361
- if hasattr(context, "platform") and hasattr(context.platform, "set_drain_callback"):
366
+ if hasattr(context.platform, "set_drain_callback"):
362
367
  context.logger.info(
363
368
  "Setting drain callback to terminate and restart the graph on a drain event (such as rebalancing)"
364
369
  )
@@ -417,7 +422,7 @@ def create_graph_server(
417
422
  parameters = parameters or {}
418
423
  server = GraphServer(graph, parameters, load_mode, verbose=verbose, **kwargs)
419
424
  server.set_current_function(
420
- current_function or os.environ.get("SERVING_CURRENT_FUNCTION", "")
425
+ current_function or os.getenv("SERVING_CURRENT_FUNCTION", "")
421
426
  )
422
427
  return server
423
428
 
mlrun/serving/states.py CHANGED
@@ -27,6 +27,8 @@ from copy import copy, deepcopy
27
27
  from inspect import getfullargspec, signature
28
28
  from typing import Any, Union
29
29
 
30
+ import storey.utils
31
+
30
32
  import mlrun
31
33
 
32
34
  from ..config import config
@@ -386,6 +388,9 @@ class BaseStep(ModelObj):
386
388
  """
387
389
  raise NotImplementedError("set_flow() can only be called on a FlowStep")
388
390
 
391
+ def supports_termination(self):
392
+ return False
393
+
389
394
 
390
395
  class TaskStep(BaseStep):
391
396
  """task execution step, runs a class or handler"""
@@ -867,7 +872,9 @@ class QueueStep(BaseStep):
867
872
  return event
868
873
 
869
874
  if self._stream:
870
- self._stream.push({"id": event.id, "body": data, "path": event.path})
875
+ if self.options.get("full_event", True):
876
+ data = storey.utils.wrap_event_for_serialization(event, data)
877
+ self._stream.push(data)
871
878
  event.terminated = True
872
879
  event.body = None
873
880
  return event
@@ -1273,6 +1280,8 @@ class FlowStep(BaseStep):
1273
1280
  event.body = {"id": event.id}
1274
1281
  return event
1275
1282
 
1283
+ event = storey.utils.unpack_event_if_wrapped(event)
1284
+
1276
1285
  if len(self._start_steps) == 0:
1277
1286
  return event
1278
1287
  next_obj = self._start_steps[0]
@@ -1380,6 +1389,9 @@ class FlowStep(BaseStep):
1380
1389
 
1381
1390
  return step
1382
1391
 
1392
+ def supports_termination(self):
1393
+ return self.engine == "async"
1394
+
1383
1395
 
1384
1396
  class RootFlowStep(FlowStep):
1385
1397
  """root flow step"""
@@ -1672,7 +1684,9 @@ def _init_async_objects(context, steps):
1672
1684
  wait_for_result = True
1673
1685
 
1674
1686
  source_args = context.get_param("source_args", {})
1675
- explicit_ack = is_explicit_ack_supported(context) and mlrun.mlconf.is_explicit_ack()
1687
+ explicit_ack = (
1688
+ is_explicit_ack_supported(context) and mlrun.mlconf.is_explicit_ack_enabled()
1689
+ )
1676
1690
 
1677
1691
  # TODO: Change to AsyncEmitSource once we can drop support for nuclio<1.12.10
1678
1692
  default_source = storey.SyncEmitSource(
mlrun/utils/db.py CHANGED
@@ -28,6 +28,9 @@ class BaseModel:
28
28
  columns = [column.key for column in mapper.columns if column.key not in exclude]
29
29
 
30
30
  def get_key_value(c):
31
+ # all (never say never) DB classes have "object" defined as "full_object"
32
+ if c == "object":
33
+ c = "full_object"
31
34
  if isinstance(getattr(self, c), datetime):
32
35
  return c, getattr(self, c).isoformat()
33
36
  return c, getattr(self, c)
mlrun/utils/helpers.py CHANGED
@@ -111,13 +111,11 @@ def get_artifact_target(item: dict, project=None):
111
111
  tree = item["metadata"].get("tree")
112
112
  tag = item["metadata"].get("tag")
113
113
 
114
- kind = item.get("kind")
115
- if kind in ["dataset", "model", "artifact"] and db_key:
114
+ if item.get("kind") in {"dataset", "model", "artifact"} and db_key:
116
115
  target = f"{DB_SCHEMA}://{StorePrefix.Artifact}/{project_str}/{db_key}"
117
- if tag:
118
- target = f"{target}:{tag}"
116
+ target += f":{tag}" if tag else ":latest"
119
117
  if tree:
120
- target = f"{target}@{tree}"
118
+ target += f"@{tree}"
121
119
  return target
122
120
 
123
121
  return item["spec"].get("target_path")
@@ -1620,28 +1618,25 @@ def additional_filters_warning(additional_filters, class_name):
1620
1618
  )
1621
1619
 
1622
1620
 
1623
- def merge_with_precedence(first_dict: dict, second_dict: dict) -> dict:
1621
+ def merge_dicts_with_precedence(*dicts: dict) -> dict:
1624
1622
  """
1625
- Merge two dictionaries with precedence given to keys from the second dictionary.
1623
+ Merge multiple dictionaries with precedence given to keys from later dictionaries.
1626
1624
 
1627
- This function merges two dictionaries, `first_dict` and `second_dict`, where keys from `second_dict`
1628
- take precedence in case of conflicts. If both dictionaries contain the same key,
1629
- the value from `second_dict` will overwrite the value from `first_dict`.
1625
+ This function merges an arbitrary number of dictionaries, where keys from dictionaries later
1626
+ in the argument list take precedence over keys from dictionaries earlier in the list. If all
1627
+ dictionaries contain the same key, the value from the last dictionary with that key will
1628
+ overwrite the values from earlier dictionaries.
1630
1629
 
1631
1630
  Example:
1632
1631
  >>> first_dict = {"key1": "value1", "key2": "value2"}
1633
1632
  >>> second_dict = {"key2": "new_value2", "key3": "value3"}
1634
- >>> merge_with_precedence(first_dict, second_dict)
1635
- {'key1': 'value1', 'key2': 'new_value2', 'key3': 'value3'}
1633
+ >>> third_dict = {"key3": "new_value3", "key4": "value4"}
1634
+ >>> merge_dicts_with_precedence(first_dict, second_dict, third_dict)
1635
+ {'key1': 'value1', 'key2': 'new_value2', 'key3': 'new_value3', 'key4': 'value4'}
1636
1636
 
1637
- Note:
1638
- - The merge operation uses the ** operator in Python, which combines key-value pairs
1639
- from each dictionary. Later dictionaries take precedence when there are conflicting keys.
1637
+ - If no dictionaries are provided, the function returns an empty dictionary.
1640
1638
  """
1641
- return {
1642
- **(first_dict or {}),
1643
- **(second_dict or {}),
1644
- }
1639
+ return {k: v for d in dicts if d for k, v in d.items()}
1645
1640
 
1646
1641
 
1647
1642
  def validate_component_version_compatibility(
@@ -1707,6 +1702,22 @@ def is_parquet_file(file_path, format_=None):
1707
1702
  )
1708
1703
 
1709
1704
 
1705
+ def validate_single_def_handler(function_kind: str, code: str):
1706
+ # The name of MLRun's wrapper is 'handler', which is why the handler function name cannot be 'handler'
1707
+ # it would override MLRun's wrapper
1708
+ if function_kind == "mlrun":
1709
+ # Find all lines that start with "def handler("
1710
+ pattern = re.compile(r"^def handler\(", re.MULTILINE)
1711
+ matches = pattern.findall(code)
1712
+
1713
+ # Only MLRun's wrapper handler (footer) can be in the code
1714
+ if len(matches) > 1:
1715
+ raise mlrun.errors.MLRunInvalidArgumentError(
1716
+ "The code file contains a function named “handler“, which is reserved. "
1717
+ + "Use a different name for your function."
1718
+ )
1719
+
1720
+
1710
1721
  def _reload(module, max_recursion_depth):
1711
1722
  """Recursively reload modules."""
1712
1723
  if max_recursion_depth <= 0:
@@ -60,7 +60,14 @@ class WebhookNotification(NotificationBase):
60
60
  request_body["runs"] = runs
61
61
 
62
62
  if alert:
63
- request_body["alert"] = alert.dict()
63
+ request_body["name"] = alert.name
64
+ request_body["project"] = alert.project
65
+ request_body["severity"] = alert.severity
66
+ if alert.summary:
67
+ request_body["summary"] = mlrun.utils.helpers.format_alert_summary(
68
+ alert, event_data
69
+ )
70
+
64
71
  if event_data:
65
72
  request_body["value"] = event_data.value_dict
66
73
  request_body["id"] = event_data.entity.ids[0]
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "bf08d974e49838643bae063182b41e12f05f1a2b",
3
- "version": "1.7.0-rc33"
2
+ "git_commit": "b2082382ddf7988e610a7ab6f9ea1a0ff8da863c",
3
+ "version": "1.7.0-rc35"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.7.0rc33
3
+ Version: 1.7.0rc35
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -43,7 +43,7 @@ Requires-Dist: semver ~=3.0
43
43
  Requires-Dist: dependency-injector ~=4.41
44
44
  Requires-Dist: fsspec <2024.4,>=2023.9.2
45
45
  Requires-Dist: v3iofs ~=0.1.17
46
- Requires-Dist: storey ~=1.7.20
46
+ Requires-Dist: storey ~=1.7.23
47
47
  Requires-Dist: inflection ~=0.5.0
48
48
  Requires-Dist: python-dotenv ~=0.17.0
49
49
  Requires-Dist: setuptools ~=71.0
@@ -68,6 +68,7 @@ Requires-Dist: dask ~=2023.9.0 ; extra == 'all'
68
68
  Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'all'
69
69
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'all'
70
70
  Requires-Dist: gcsfs <2024.4,>=2023.9.2 ; extra == 'all'
71
+ Requires-Dist: google-cloud-bigquery-storage ~=2.17 ; extra == 'all'
71
72
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'all'
72
73
  Requires-Dist: google-cloud-storage ==2.14.0 ; extra == 'all'
73
74
  Requires-Dist: google-cloud ==0.34 ; extra == 'all'
@@ -179,6 +180,7 @@ Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'databricks-sdk'
179
180
  Provides-Extra: google-cloud
180
181
  Requires-Dist: google-cloud-storage ==2.14.0 ; extra == 'google-cloud'
181
182
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'google-cloud'
183
+ Requires-Dist: google-cloud-bigquery-storage ~=2.17 ; extra == 'google-cloud'
182
184
  Requires-Dist: google-cloud ==0.34 ; extra == 'google-cloud'
183
185
  Provides-Extra: google-cloud-bigquery
184
186
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'google-cloud-bigquery'