mlrun 1.10.0rc8__py3-none-any.whl → 1.10.0rc10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (44) hide show
  1. mlrun/common/constants.py +1 -0
  2. mlrun/common/db/dialects.py +25 -0
  3. mlrun/common/schemas/__init__.py +1 -0
  4. mlrun/common/schemas/function.py +1 -0
  5. mlrun/common/schemas/model_monitoring/model_endpoints.py +8 -0
  6. mlrun/common/schemas/partition.py +13 -3
  7. mlrun/common/schemas/workflow.py +7 -0
  8. mlrun/datastore/utils.py +0 -1
  9. mlrun/db/__init__.py +1 -0
  10. mlrun/db/base.py +17 -0
  11. mlrun/db/nopdb.py +9 -0
  12. mlrun/db/sql_types.py +160 -0
  13. mlrun/frameworks/tf_keras/mlrun_interface.py +4 -1
  14. mlrun/frameworks/tf_keras/model_handler.py +23 -3
  15. mlrun/launcher/base.py +0 -1
  16. mlrun/launcher/client.py +0 -1
  17. mlrun/launcher/local.py +0 -4
  18. mlrun/model_monitoring/applications/base.py +21 -1
  19. mlrun/model_monitoring/applications/context.py +2 -1
  20. mlrun/projects/__init__.py +1 -0
  21. mlrun/projects/pipelines.py +36 -0
  22. mlrun/projects/project.py +0 -13
  23. mlrun/runtimes/daskjob.py +0 -2
  24. mlrun/runtimes/kubejob.py +0 -4
  25. mlrun/runtimes/mpijob/abstract.py +0 -2
  26. mlrun/runtimes/mpijob/v1.py +0 -2
  27. mlrun/runtimes/nuclio/function.py +0 -2
  28. mlrun/runtimes/nuclio/serving.py +0 -46
  29. mlrun/runtimes/pod.py +0 -3
  30. mlrun/runtimes/remotesparkjob.py +0 -2
  31. mlrun/runtimes/sparkjob/spark3job.py +0 -2
  32. mlrun/serving/routers.py +17 -13
  33. mlrun/serving/server.py +3 -97
  34. mlrun/serving/system_steps.py +2 -1
  35. mlrun/serving/v2_serving.py +2 -2
  36. mlrun/utils/helpers.py +1 -1
  37. mlrun/utils/version/version.json +2 -2
  38. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/METADATA +15 -12
  39. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/RECORD +43 -42
  40. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/licenses/LICENSE +1 -1
  41. mlrun/common/db/sql_session.py +0 -79
  42. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/WHEEL +0 -0
  43. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/entry_points.txt +0 -0
  44. {mlrun-1.10.0rc8.dist-info → mlrun-1.10.0rc10.dist-info}/top_level.txt +0 -0
@@ -54,7 +54,6 @@ class MPIResourceSpec(KubeResourceSpec):
54
54
  preemption_mode=None,
55
55
  security_context=None,
56
56
  state_thresholds=None,
57
- serving_spec=None,
58
57
  ):
59
58
  super().__init__(
60
59
  command=command,
@@ -84,7 +83,6 @@ class MPIResourceSpec(KubeResourceSpec):
84
83
  preemption_mode=preemption_mode,
85
84
  security_context=security_context,
86
85
  state_thresholds=state_thresholds,
87
- serving_spec=serving_spec,
88
86
  )
89
87
  self.mpi_args = mpi_args or [
90
88
  "-x",
@@ -49,7 +49,6 @@ class MPIV1ResourceSpec(MPIResourceSpec):
49
49
  preemption_mode=None,
50
50
  security_context=None,
51
51
  state_thresholds=None,
52
- serving_spec=None,
53
52
  ):
54
53
  super().__init__(
55
54
  command=command,
@@ -80,7 +79,6 @@ class MPIV1ResourceSpec(MPIResourceSpec):
80
79
  preemption_mode=preemption_mode,
81
80
  security_context=security_context,
82
81
  state_thresholds=state_thresholds,
83
- serving_spec=serving_spec,
84
82
  )
85
83
  self.clean_pod_policy = clean_pod_policy or MPIJobV1CleanPodPolicies.default()
86
84
 
@@ -154,7 +154,6 @@ class NuclioSpec(KubeResourceSpec):
154
154
  add_templated_ingress_host_mode=None,
155
155
  state_thresholds=None,
156
156
  disable_default_http_trigger=None,
157
- serving_spec=None,
158
157
  ):
159
158
  super().__init__(
160
159
  command=command,
@@ -184,7 +183,6 @@ class NuclioSpec(KubeResourceSpec):
184
183
  preemption_mode=preemption_mode,
185
184
  security_context=security_context,
186
185
  state_thresholds=state_thresholds,
187
- serving_spec=serving_spec,
188
186
  )
189
187
 
190
188
  self.base_spec = base_spec or {}
@@ -42,8 +42,6 @@ from mlrun.serving.states import (
42
42
  )
43
43
  from mlrun.utils import get_caller_globals, logger, set_paths
44
44
 
45
- from .. import KubejobRuntime
46
- from ..pod import KubeResourceSpec
47
45
  from .function import NuclioSpec, RemoteRuntime, min_nuclio_versions
48
46
 
49
47
  serving_subkind = "serving_v2"
@@ -151,7 +149,6 @@ class ServingSpec(NuclioSpec):
151
149
  state_thresholds=None,
152
150
  disable_default_http_trigger=None,
153
151
  model_endpoint_creation_task_name=None,
154
- serving_spec=None,
155
152
  ):
156
153
  super().__init__(
157
154
  command=command,
@@ -192,7 +189,6 @@ class ServingSpec(NuclioSpec):
192
189
  service_type=service_type,
193
190
  add_templated_ingress_host_mode=add_templated_ingress_host_mode,
194
191
  disable_default_http_trigger=disable_default_http_trigger,
195
- serving_spec=serving_spec,
196
192
  )
197
193
 
198
194
  self.models = models or {}
@@ -707,7 +703,6 @@ class ServingRuntime(RemoteRuntime):
707
703
  "track_models": self.spec.track_models,
708
704
  "default_content_type": self.spec.default_content_type,
709
705
  "model_endpoint_creation_task_name": self.spec.model_endpoint_creation_task_name,
710
- "filename": getattr(self.spec, "filename", None),
711
706
  }
712
707
 
713
708
  if self.spec.secret_sources:
@@ -716,10 +711,6 @@ class ServingRuntime(RemoteRuntime):
716
711
 
717
712
  return json.dumps(serving_spec)
718
713
 
719
- @property
720
- def serving_spec(self):
721
- return self._get_serving_spec()
722
-
723
714
  def to_mock_server(
724
715
  self,
725
716
  namespace=None,
@@ -824,40 +815,3 @@ class ServingRuntime(RemoteRuntime):
824
815
  "Turn off the mock (mock=False) and make sure Nuclio is installed for real deployment to Nuclio"
825
816
  )
826
817
  self._mock_server = self.to_mock_server()
827
-
828
- def to_job(self) -> KubejobRuntime:
829
- """Convert this ServingRuntime to a KubejobRuntime, so that the graph can be run as a standalone job."""
830
- if self.spec.function_refs:
831
- raise mlrun.errors.MLRunInvalidArgumentError(
832
- f"Cannot convert function '{self.metadata.name}' to a job because it has child functions"
833
- )
834
-
835
- spec = KubeResourceSpec(
836
- image=self.spec.image,
837
- mode=self.spec.mode,
838
- volumes=self.spec.volumes,
839
- volume_mounts=self.spec.volume_mounts,
840
- env=self.spec.env,
841
- resources=self.spec.resources,
842
- default_handler="mlrun.serving.server.execute_graph",
843
- pythonpath=self.spec.pythonpath,
844
- entry_points=self.spec.entry_points,
845
- description=self.spec.description,
846
- workdir=self.spec.workdir,
847
- image_pull_secret=self.spec.image_pull_secret,
848
- node_name=self.spec.node_name,
849
- node_selector=self.spec.node_selector,
850
- affinity=self.spec.affinity,
851
- disable_auto_mount=self.spec.disable_auto_mount,
852
- priority_class_name=self.spec.priority_class_name,
853
- tolerations=self.spec.tolerations,
854
- preemption_mode=self.spec.preemption_mode,
855
- security_context=self.spec.security_context,
856
- state_thresholds=self.spec.state_thresholds,
857
- serving_spec=self._get_serving_spec(),
858
- )
859
- job = KubejobRuntime(
860
- spec=spec,
861
- metadata=self.metadata,
862
- )
863
- return job
mlrun/runtimes/pod.py CHANGED
@@ -103,7 +103,6 @@ class KubeResourceSpec(FunctionSpec):
103
103
  "preemption_mode",
104
104
  "security_context",
105
105
  "state_thresholds",
106
- "serving_spec",
107
106
  ]
108
107
  _default_fields_to_strip = FunctionSpec._default_fields_to_strip + [
109
108
  "volumes",
@@ -179,7 +178,6 @@ class KubeResourceSpec(FunctionSpec):
179
178
  preemption_mode=None,
180
179
  security_context=None,
181
180
  state_thresholds=None,
182
- serving_spec=None,
183
181
  ):
184
182
  super().__init__(
185
183
  command=command,
@@ -225,7 +223,6 @@ class KubeResourceSpec(FunctionSpec):
225
223
  state_thresholds
226
224
  or mlrun.mlconf.function.spec.state_thresholds.default.to_dict()
227
225
  )
228
- self.serving_spec = serving_spec
229
226
  # Termination grace period is internal for runtimes that have a pod termination hook hence it is not in the
230
227
  # _dict_fields and doesn't have a setter.
231
228
  self._termination_grace_period_seconds = None
@@ -58,7 +58,6 @@ class RemoteSparkSpec(KubeResourceSpec):
58
58
  preemption_mode=None,
59
59
  security_context=None,
60
60
  state_thresholds=None,
61
- serving_spec=None,
62
61
  ):
63
62
  super().__init__(
64
63
  command=command,
@@ -88,7 +87,6 @@ class RemoteSparkSpec(KubeResourceSpec):
88
87
  preemption_mode=preemption_mode,
89
88
  security_context=security_context,
90
89
  state_thresholds=state_thresholds,
91
- serving_spec=serving_spec,
92
90
  )
93
91
  self.provider = provider
94
92
 
@@ -168,7 +168,6 @@ class Spark3JobSpec(KubeResourceSpec):
168
168
  executor_cores=None,
169
169
  security_context=None,
170
170
  state_thresholds=None,
171
- serving_spec=None,
172
171
  ):
173
172
  super().__init__(
174
173
  command=command,
@@ -198,7 +197,6 @@ class Spark3JobSpec(KubeResourceSpec):
198
197
  preemption_mode=preemption_mode,
199
198
  security_context=security_context,
200
199
  state_thresholds=state_thresholds,
201
- serving_spec=serving_spec,
202
200
  )
203
201
 
204
202
  self.driver_resources = driver_resources or {}
mlrun/serving/routers.py CHANGED
@@ -80,10 +80,16 @@ class BaseModelRouter(RouterToDict):
80
80
  self._input_path = input_path
81
81
  self._result_path = result_path
82
82
  self._background_task_check_timestamp = None
83
- self._background_task_terminate = False
84
83
  self._background_task_current_state = None
85
84
  self.kwargs = kwargs
86
85
 
86
+ @property
87
+ def background_task_reached_terminal_state(self):
88
+ return (
89
+ self._background_task_current_state
90
+ and self._background_task_current_state != "running"
91
+ )
92
+
87
93
  def parse_event(self, event):
88
94
  parsed_event = {}
89
95
  try:
@@ -185,35 +191,33 @@ class BaseModelRouter(RouterToDict):
185
191
  background_task.status.state
186
192
  in mlrun.common.schemas.BackgroundTaskState.terminal_states()
187
193
  ):
188
- logger.debug(
194
+ logger.info(
189
195
  f"Model endpoint creation task completed with state {background_task.status.state}"
190
196
  )
191
- self._background_task_terminate = True
192
197
  else: # in progress
193
- logger.debug(
198
+ logger.info(
194
199
  f"Model endpoint creation task is still in progress with the current state: "
195
- f"{background_task.status.state}. Events will not be monitored for the next 15 seconds",
200
+ f"{background_task.status.state}. Events will not be monitored for the next "
201
+ f"{mlrun.mlconf.model_endpoint_monitoring.model_endpoint_creation_check_period} seconds",
196
202
  name=self.name,
197
203
  background_task_check_timestamp=self._background_task_check_timestamp.isoformat(),
198
204
  )
199
205
  return background_task.status.state
200
206
  else:
201
- logger.debug(
202
- "Model endpoint creation task name not provided",
207
+ logger.error(
208
+ "Model endpoint creation task name not provided. This function is not being monitored.",
203
209
  )
204
210
  elif self.context.monitoring_mock:
205
- self._background_task_terminate = (
206
- True # If mock monitoring we return success and terminate task check.
207
- )
208
211
  return mlrun.common.schemas.BackgroundTaskState.succeeded
209
- self._background_task_terminate = True # If mock without monitoring we return failed and terminate task check.
210
212
  return mlrun.common.schemas.BackgroundTaskState.failed
211
213
 
212
214
  def _update_background_task_state(self, event):
213
- if not self._background_task_terminate and (
215
+ if not self.background_task_reached_terminal_state and (
214
216
  self._background_task_check_timestamp is None
215
217
  or now_date() - self._background_task_check_timestamp
216
- >= timedelta(seconds=15)
218
+ >= timedelta(
219
+ seconds=mlrun.mlconf.model_endpoint_monitoring.model_endpoint_creation_check_period
220
+ )
217
221
  ):
218
222
  self._background_task_current_state = self._get_background_task_status()
219
223
  if event.body:
mlrun/serving/server.py CHANGED
@@ -21,9 +21,8 @@ import os
21
21
  import socket
22
22
  import traceback
23
23
  import uuid
24
- from typing import Any, Optional, Union
24
+ from typing import Optional, Union
25
25
 
26
- import storey
27
26
  from nuclio import Context as NuclioContext
28
27
  from nuclio.request import Logger as NuclioLogger
29
28
 
@@ -39,10 +38,9 @@ from mlrun.secrets import SecretsStore
39
38
 
40
39
  from ..common.helpers import parse_versioned_object_uri
41
40
  from ..common.schemas.model_monitoring.constants import FileTargetKind
42
- from ..datastore import DataItem, get_stream_pusher
41
+ from ..datastore import get_stream_pusher
43
42
  from ..datastore.store_resources import ResourceCache
44
43
  from ..errors import MLRunInvalidArgumentError
45
- from ..execution import MLClientCtx
46
44
  from ..model import ModelObj
47
45
  from ..utils import get_caller_globals
48
46
  from .states import (
@@ -324,11 +322,7 @@ class GraphServer(ModelObj):
324
322
 
325
323
  def _process_response(self, context, response, get_body):
326
324
  body = response.body
327
- if (
328
- isinstance(context, MLClientCtx)
329
- or isinstance(body, context.Response)
330
- or get_body
331
- ):
325
+ if isinstance(body, context.Response) or get_body:
332
326
  return body
333
327
 
334
328
  if body and not isinstance(body, (str, bytes)):
@@ -541,94 +535,6 @@ def v2_serving_init(context, namespace=None):
541
535
  _set_callbacks(server, context)
542
536
 
543
537
 
544
- async def async_execute_graph(
545
- context: MLClientCtx,
546
- data: DataItem,
547
- batching: bool,
548
- batch_size: Optional[int],
549
- ) -> list[Any]:
550
- spec = mlrun.utils.get_serving_spec()
551
-
552
- source_filename = spec.get("filename", None)
553
- namespace = {}
554
- if source_filename:
555
- with open(source_filename) as f:
556
- exec(f.read(), namespace)
557
-
558
- server = GraphServer.from_dict(spec)
559
-
560
- if config.log_level.lower() == "debug":
561
- server.verbose = True
562
- context.logger.info_with("Initializing states", namespace=namespace)
563
- kwargs = {}
564
- if hasattr(context, "is_mock"):
565
- kwargs["is_mock"] = context.is_mock
566
- server.init_states(
567
- context=None, # this context is expected to be a nuclio context, which we don't have in this flow
568
- namespace=namespace,
569
- **kwargs,
570
- )
571
- context.logger.info("Initializing graph steps")
572
- server.init_object(namespace)
573
-
574
- context.logger.info_with("Graph was initialized", verbose=server.verbose)
575
-
576
- if server.verbose:
577
- context.logger.info(server.to_yaml())
578
-
579
- df = data.as_df()
580
-
581
- responses = []
582
-
583
- async def run(body):
584
- event = storey.Event(id=index, body=body)
585
- response = await server.run(event, context)
586
- responses.append(response)
587
-
588
- if batching and not batch_size:
589
- batch_size = len(df)
590
-
591
- batch = []
592
- for index, row in df.iterrows():
593
- data = row.to_dict()
594
- if batching:
595
- batch.append(data)
596
- if len(batch) == batch_size:
597
- await run(batch)
598
- batch = []
599
- else:
600
- await run(data)
601
-
602
- if batch:
603
- await run(batch)
604
-
605
- termination_result = server.wait_for_completion()
606
- if asyncio.iscoroutine(termination_result):
607
- await termination_result
608
-
609
- return responses
610
-
611
-
612
- def execute_graph(
613
- context: MLClientCtx,
614
- data: DataItem,
615
- batching: bool = False,
616
- batch_size: Optional[int] = None,
617
- ) -> (list[Any], Any):
618
- """
619
- Execute graph as a job, from start to finish.
620
-
621
- :param context: The job's execution client context.
622
- :param data: The input data to the job, to be pushed into the graph row by row, or in batches.
623
- :param batching: Whether to push one or more batches into the graph rather than row by row.
624
- :param batch_size: The number of rows to push per batch. If not set, and batching=True, the entire dataset will
625
- be pushed into the graph in one batch.
626
-
627
- :return: A list of responses.
628
- """
629
- return asyncio.run(async_execute_graph(context, data, batching, batch_size))
630
-
631
-
632
538
  def _set_callbacks(server, context):
633
539
  if not server.graph.supports_termination() or not hasattr(context, "platform"):
634
540
  return
@@ -314,7 +314,8 @@ class BackgroundTaskStatus(storey.MapClass):
314
314
  else: # in progress
315
315
  logger.info(
316
316
  f"Model endpoint creation task is still in progress with the current state: "
317
- f"{background_task_state}. Events will not be monitored for the next 15 seconds",
317
+ f"{background_task_state}. Events will not be monitored for the next "
318
+ f"{mlrun.mlconf.model_endpoint_monitoring.model_endpoint_creation_check_period} seconds",
318
319
  name=self.name,
319
320
  background_task_check_timestamp=self._background_task_check_timestamp.isoformat(),
320
321
  )
@@ -508,8 +508,8 @@ class V2ModelServer(StepToDict):
508
508
  name=self.name,
509
509
  )
510
510
  else:
511
- logger.debug(
512
- "Model endpoint creation task name not provided",
511
+ logger.error(
512
+ "Model endpoint creation task name not provided. This function is not being monitored.",
513
513
  )
514
514
 
515
515
 
mlrun/utils/helpers.py CHANGED
@@ -911,7 +911,7 @@ def enrich_image_url(
911
911
  if is_mlrun_image and "mlrun/ml-base" in image_url:
912
912
  if tag:
913
913
  if mlrun.utils.helpers.validate_component_version_compatibility(
914
- "mlrun-client", "1.10.0", mlrun_client_version=tag
914
+ "mlrun-client", "1.10.0-rc0", mlrun_client_version=tag
915
915
  ):
916
916
  warnings.warn(
917
917
  "'mlrun/ml-base' image is deprecated in 1.10.0 and will be removed in 1.12.0, "
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "fede26558b2c8db736315ad1f48e15e3ce2f387d",
3
- "version": "1.10.0-rc8"
2
+ "git_commit": "7711525d2af418d6991128a0d3253094584ddedc",
3
+ "version": "1.10.0-rc10"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.10.0rc8
3
+ Version: 1.10.0rc10
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -9,7 +9,6 @@ License: Apache License 2.0
9
9
  Keywords: mlrun,mlops,data-science,machine-learning,experiment-tracking
10
10
  Classifier: Development Status :: 4 - Beta
11
11
  Classifier: Intended Audience :: Developers
12
- Classifier: License :: OSI Approved :: Apache Software License
13
12
  Classifier: Operating System :: POSIX :: Linux
14
13
  Classifier: Operating System :: Microsoft :: Windows
15
14
  Classifier: Operating System :: MacOS
@@ -85,11 +84,11 @@ Requires-Dist: avro~=1.11; extra == "kafka"
85
84
  Provides-Extra: redis
86
85
  Requires-Dist: redis~=4.3; extra == "redis"
87
86
  Provides-Extra: mlflow
88
- Requires-Dist: mlflow~=2.16; extra == "mlflow"
87
+ Requires-Dist: mlflow~=2.22; extra == "mlflow"
89
88
  Provides-Extra: databricks-sdk
90
89
  Requires-Dist: databricks-sdk~=0.20.0; extra == "databricks-sdk"
91
90
  Provides-Extra: sqlalchemy
92
- Requires-Dist: sqlalchemy~=1.4; extra == "sqlalchemy"
91
+ Requires-Dist: sqlalchemy~=2.0; extra == "sqlalchemy"
93
92
  Provides-Extra: dask
94
93
  Requires-Dist: dask~=2024.12.1; python_version >= "3.11" and extra == "dask"
95
94
  Requires-Dist: distributed~=2024.12.1; python_version >= "3.11" and extra == "dask"
@@ -112,8 +111,10 @@ Requires-Dist: objgraph~=3.6; extra == "api"
112
111
  Requires-Dist: igz-mgmt~=0.4.1; extra == "api"
113
112
  Requires-Dist: humanfriendly~=10.0; extra == "api"
114
113
  Requires-Dist: fastapi~=0.115.6; extra == "api"
115
- Requires-Dist: sqlalchemy~=1.4; extra == "api"
114
+ Requires-Dist: sqlalchemy~=2.0; extra == "api"
115
+ Requires-Dist: sqlalchemy-utils~=0.41.2; extra == "api"
116
116
  Requires-Dist: pymysql~=1.1; extra == "api"
117
+ Requires-Dist: psycopg2-binary~=2.9; extra == "api"
117
118
  Requires-Dist: alembic~=1.14; extra == "api"
118
119
  Requires-Dist: timelength~=1.1; extra == "api"
119
120
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "api"
@@ -141,7 +142,7 @@ Requires-Dist: google-cloud-storage==2.14.0; extra == "all"
141
142
  Requires-Dist: google-cloud==0.34; extra == "all"
142
143
  Requires-Dist: graphviz~=0.20.0; extra == "all"
143
144
  Requires-Dist: kafka-python~=2.1.0; extra == "all"
144
- Requires-Dist: mlflow~=2.16; extra == "all"
145
+ Requires-Dist: mlflow~=2.22; extra == "all"
145
146
  Requires-Dist: msrest~=0.6.21; extra == "all"
146
147
  Requires-Dist: oss2==2.18.1; extra == "all"
147
148
  Requires-Dist: ossfs==2023.12.0; extra == "all"
@@ -150,7 +151,7 @@ Requires-Dist: pyopenssl>=23; extra == "all"
150
151
  Requires-Dist: redis~=4.3; extra == "all"
151
152
  Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "all"
152
153
  Requires-Dist: snowflake-connector-python~=3.7; extra == "all"
153
- Requires-Dist: sqlalchemy~=1.4; extra == "all"
154
+ Requires-Dist: sqlalchemy~=2.0; extra == "all"
154
155
  Requires-Dist: taos-ws-py==0.3.2; extra == "all"
155
156
  Provides-Extra: complete
156
157
  Requires-Dist: adlfs==2023.9.0; extra == "complete"
@@ -172,7 +173,7 @@ Requires-Dist: google-cloud-storage==2.14.0; extra == "complete"
172
173
  Requires-Dist: google-cloud==0.34; extra == "complete"
173
174
  Requires-Dist: graphviz~=0.20.0; extra == "complete"
174
175
  Requires-Dist: kafka-python~=2.1.0; extra == "complete"
175
- Requires-Dist: mlflow~=2.16; extra == "complete"
176
+ Requires-Dist: mlflow~=2.22; extra == "complete"
176
177
  Requires-Dist: msrest~=0.6.21; extra == "complete"
177
178
  Requires-Dist: oss2==2.18.1; extra == "complete"
178
179
  Requires-Dist: ossfs==2023.12.0; extra == "complete"
@@ -181,7 +182,7 @@ Requires-Dist: pyopenssl>=23; extra == "complete"
181
182
  Requires-Dist: redis~=4.3; extra == "complete"
182
183
  Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete"
183
184
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete"
184
- Requires-Dist: sqlalchemy~=1.4; extra == "complete"
185
+ Requires-Dist: sqlalchemy~=2.0; extra == "complete"
185
186
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete"
186
187
  Provides-Extra: complete-api
187
188
  Requires-Dist: adlfs==2023.9.0; extra == "complete-api"
@@ -212,20 +213,22 @@ Requires-Dist: humanfriendly~=10.0; extra == "complete-api"
212
213
  Requires-Dist: igz-mgmt~=0.4.1; extra == "complete-api"
213
214
  Requires-Dist: kafka-python~=2.1.0; extra == "complete-api"
214
215
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "complete-api"
215
- Requires-Dist: mlflow~=2.16; extra == "complete-api"
216
+ Requires-Dist: mlflow~=2.22; extra == "complete-api"
216
217
  Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.4; extra == "complete-api"
217
218
  Requires-Dist: msrest~=0.6.21; extra == "complete-api"
218
219
  Requires-Dist: objgraph~=3.6; extra == "complete-api"
219
220
  Requires-Dist: oss2==2.18.1; extra == "complete-api"
220
221
  Requires-Dist: ossfs==2023.12.0; extra == "complete-api"
221
222
  Requires-Dist: plotly~=5.23; extra == "complete-api"
223
+ Requires-Dist: psycopg2-binary~=2.9; extra == "complete-api"
222
224
  Requires-Dist: pydantic<2,>=1; extra == "complete-api"
223
225
  Requires-Dist: pymysql~=1.1; extra == "complete-api"
224
226
  Requires-Dist: pyopenssl>=23; extra == "complete-api"
225
227
  Requires-Dist: redis~=4.3; extra == "complete-api"
226
228
  Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete-api"
227
229
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete-api"
228
- Requires-Dist: sqlalchemy~=1.4; extra == "complete-api"
230
+ Requires-Dist: sqlalchemy-utils~=0.41.2; extra == "complete-api"
231
+ Requires-Dist: sqlalchemy~=2.0; extra == "complete-api"
229
232
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete-api"
230
233
  Requires-Dist: timelength~=1.1; extra == "complete-api"
231
234
  Requires-Dist: uvicorn~=0.32.1; extra == "complete-api"
@@ -249,7 +252,7 @@ Dynamic: summary
249
252
  [![PyPI version fury.io](https://badge.fury.io/py/mlrun.svg)](https://pypi.python.org/pypi/mlrun/)
250
253
  [![Documentation](https://readthedocs.org/projects/mlrun/badge/?version=latest)](https://mlrun.readthedocs.io/en/latest/?badge=latest)
251
254
  [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
252
- ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/mlrun/mlrun)
255
+ [![GitHub commit activity](https://img.shields.io/github/commit-activity/w/mlrun/mlrun)](https://github.com/mlrun/mlrun/commits/main)
253
256
  [![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/mlrun/mlrun?sort=semver)](https://github.com/mlrun/mlrun/releases)
254
257
  [![Join MLOps Live](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://mlopslive.slack.com)
255
258