mlrun 1.7.0rc16__py3-none-any.whl → 1.7.0rc18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (65) hide show
  1. mlrun/alerts/alert.py +27 -24
  2. mlrun/artifacts/manager.py +5 -1
  3. mlrun/artifacts/model.py +1 -1
  4. mlrun/common/runtimes/constants.py +3 -0
  5. mlrun/common/schemas/__init__.py +8 -2
  6. mlrun/common/schemas/alert.py +49 -10
  7. mlrun/common/schemas/client_spec.py +1 -0
  8. mlrun/common/schemas/function.py +4 -0
  9. mlrun/common/schemas/model_monitoring/__init__.py +3 -1
  10. mlrun/common/schemas/model_monitoring/constants.py +21 -1
  11. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  12. mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
  13. mlrun/common/schemas/project.py +3 -1
  14. mlrun/config.py +9 -3
  15. mlrun/data_types/to_pandas.py +5 -5
  16. mlrun/datastore/datastore.py +6 -2
  17. mlrun/datastore/redis.py +2 -2
  18. mlrun/datastore/s3.py +5 -0
  19. mlrun/datastore/sources.py +111 -6
  20. mlrun/datastore/targets.py +2 -2
  21. mlrun/db/base.py +6 -2
  22. mlrun/db/httpdb.py +22 -3
  23. mlrun/db/nopdb.py +10 -3
  24. mlrun/errors.py +6 -0
  25. mlrun/feature_store/retrieval/conversion.py +5 -5
  26. mlrun/feature_store/retrieval/job.py +3 -2
  27. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  28. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
  29. mlrun/lists.py +2 -0
  30. mlrun/model.py +8 -6
  31. mlrun/model_monitoring/db/stores/base/store.py +16 -3
  32. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +44 -43
  33. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
  34. mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
  35. mlrun/model_monitoring/db/tsdb/base.py +25 -18
  36. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  37. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +207 -0
  38. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  39. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +231 -0
  40. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +103 -64
  41. mlrun/model_monitoring/db/v3io_tsdb_reader.py +217 -16
  42. mlrun/model_monitoring/helpers.py +32 -0
  43. mlrun/model_monitoring/stream_processing.py +7 -4
  44. mlrun/model_monitoring/writer.py +19 -14
  45. mlrun/package/utils/_formatter.py +2 -2
  46. mlrun/projects/project.py +40 -11
  47. mlrun/render.py +8 -5
  48. mlrun/runtimes/__init__.py +1 -0
  49. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  50. mlrun/runtimes/nuclio/api_gateway.py +97 -77
  51. mlrun/runtimes/nuclio/application/application.py +160 -7
  52. mlrun/runtimes/nuclio/function.py +18 -12
  53. mlrun/track/tracker.py +2 -1
  54. mlrun/utils/async_http.py +25 -5
  55. mlrun/utils/helpers.py +28 -3
  56. mlrun/utils/logger.py +11 -6
  57. mlrun/utils/notifications/notification/slack.py +27 -7
  58. mlrun/utils/notifications/notification_pusher.py +45 -41
  59. mlrun/utils/version/version.json +2 -2
  60. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/METADATA +8 -3
  61. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/RECORD +65 -61
  62. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/LICENSE +0 -0
  63. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/WHEEL +0 -0
  64. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/entry_points.txt +0 -0
  65. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/top_level.txt +0 -0
mlrun/track/tracker.py CHANGED
@@ -31,8 +31,9 @@ class Tracker(ABC):
31
31
  * Offline: Manually importing models and artifacts into an MLRun project using the `import_x` methods.
32
32
  """
33
33
 
34
+ @staticmethod
34
35
  @abstractmethod
35
- def is_enabled(self) -> bool:
36
+ def is_enabled() -> bool:
36
37
  """
37
38
  Checks if tracker is enabled.
38
39
 
mlrun/utils/async_http.py CHANGED
@@ -24,7 +24,7 @@ from aiohttp_retry import ExponentialRetry, RequestParams, RetryClient, RetryOpt
24
24
  from aiohttp_retry.client import _RequestContext
25
25
 
26
26
  from mlrun.config import config
27
- from mlrun.errors import err_to_str
27
+ from mlrun.errors import err_to_str, raise_for_status
28
28
 
29
29
  from .helpers import logger as mlrun_logger
30
30
 
@@ -46,12 +46,21 @@ class AsyncClientWithRetry(RetryClient):
46
46
  *args,
47
47
  **kwargs,
48
48
  ):
49
+ # do not retry on PUT / PATCH as they might have side effects (not truly idempotent)
50
+ blacklisted_methods = (
51
+ blacklisted_methods
52
+ if blacklisted_methods is not None
53
+ else [
54
+ "POST",
55
+ "PUT",
56
+ "PATCH",
57
+ ]
58
+ )
49
59
  super().__init__(
50
60
  *args,
51
61
  retry_options=ExponentialRetryOverride(
52
62
  retry_on_exception=retry_on_exception,
53
- # do not retry on PUT / PATCH as they might have side effects (not truly idempotent)
54
- blacklisted_methods=blacklisted_methods or ["POST", "PUT", "PATCH"],
63
+ blacklisted_methods=blacklisted_methods,
55
64
  attempts=max_retries,
56
65
  statuses=retry_on_status_codes,
57
66
  factor=retry_backoff_factor,
@@ -63,6 +72,12 @@ class AsyncClientWithRetry(RetryClient):
63
72
  **kwargs,
64
73
  )
65
74
 
75
+ def methods_blacklist_update_required(self, new_blacklist: str):
76
+ self._retry_options: ExponentialRetryOverride
77
+ return set(self._retry_options.blacklisted_methods).difference(
78
+ set(new_blacklist)
79
+ )
80
+
66
81
  def _make_requests(
67
82
  self,
68
83
  params_list: list[RequestParams],
@@ -173,7 +188,7 @@ class _CustomRequestContext(_RequestContext):
173
188
  last_attempt = current_attempt == self._retry_options.attempts
174
189
  if self._is_status_code_ok(response.status) or last_attempt:
175
190
  if self._raise_for_status:
176
- response.raise_for_status()
191
+ raise_for_status(response)
177
192
 
178
193
  self._response = response
179
194
  return response
@@ -275,6 +290,11 @@ class _CustomRequestContext(_RequestContext):
275
290
  if isinstance(exc.os_error, exc_type):
276
291
  return
277
292
  if exc.__cause__:
278
- return self.verify_exception_type(exc.__cause__)
293
+ # If the cause exception is retriable, return, otherwise, raise the original exception
294
+ try:
295
+ self.verify_exception_type(exc.__cause__)
296
+ except Exception:
297
+ raise exc
298
+ return
279
299
  else:
280
300
  raise exc
mlrun/utils/helpers.py CHANGED
@@ -973,6 +973,15 @@ def get_ui_url(project, uid=None):
973
973
  return url
974
974
 
975
975
 
976
+ def get_model_endpoint_url(project, model_name, model_endpoint_id):
977
+ url = ""
978
+ if mlrun.mlconf.resolve_ui_url():
979
+ url = f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}/{project}/models"
980
+ if model_name:
981
+ url += f"/model-endpoints/{model_name}/{model_endpoint_id}/overview"
982
+ return url
983
+
984
+
976
985
  def get_workflow_url(project, id=None):
977
986
  url = ""
978
987
  if mlrun.mlconf.resolve_ui_url():
@@ -1183,7 +1192,7 @@ def calculate_dataframe_hash(dataframe: pandas.DataFrame):
1183
1192
  return hashlib.sha1(pandas.util.hash_pandas_object(dataframe).values).hexdigest()
1184
1193
 
1185
1194
 
1186
- def template_artifact_path(artifact_path, project, run_uid="project"):
1195
+ def template_artifact_path(artifact_path, project, run_uid=None):
1187
1196
  """
1188
1197
  Replace {{run.uid}} with the run uid and {{project}} with the project name in the artifact path.
1189
1198
  If no run uid is provided, the word `project` will be used instead as it is assumed to be a project
@@ -1191,6 +1200,7 @@ def template_artifact_path(artifact_path, project, run_uid="project"):
1191
1200
  """
1192
1201
  if not artifact_path:
1193
1202
  return artifact_path
1203
+ run_uid = run_uid or "project"
1194
1204
  artifact_path = artifact_path.replace("{{run.uid}}", run_uid)
1195
1205
  artifact_path = _fill_project_path_template(artifact_path, project)
1196
1206
  return artifact_path
@@ -1572,13 +1582,19 @@ def validate_component_version_compatibility(
1572
1582
  component_current_version = None
1573
1583
  try:
1574
1584
  if component_name == "iguazio":
1575
- parsed_current_version = mlrun.mlconf.get_parsed_igz_version()
1576
1585
  component_current_version = mlrun.mlconf.igz_version
1586
+ parsed_current_version = mlrun.mlconf.get_parsed_igz_version()
1587
+
1588
+ # ignore pre-release and build metadata, as iguazio version always has them, and we only care about the
1589
+ # major, minor, and patch versions
1590
+ parsed_current_version = semver.VersionInfo.parse(
1591
+ f"{parsed_current_version.major}.{parsed_current_version.minor}.{parsed_current_version.patch}"
1592
+ )
1577
1593
  if component_name == "nuclio":
1594
+ component_current_version = mlrun.mlconf.nuclio_version
1578
1595
  parsed_current_version = semver.VersionInfo.parse(
1579
1596
  mlrun.mlconf.nuclio_version
1580
1597
  )
1581
- component_current_version = mlrun.mlconf.nuclio_version
1582
1598
  if not parsed_current_version:
1583
1599
  return True
1584
1600
  except ValueError:
@@ -1597,3 +1613,12 @@ def validate_component_version_compatibility(
1597
1613
  if parsed_current_version < parsed_min_version:
1598
1614
  return False
1599
1615
  return True
1616
+
1617
+
1618
+ def format_alert_summary(
1619
+ alert: mlrun.common.schemas.AlertConfig, event_data: mlrun.common.schemas.Event
1620
+ ) -> str:
1621
+ result = alert.summary.replace("{{project}}", alert.project)
1622
+ result = result.replace("{{name}}", alert.name)
1623
+ result = result.replace("{{entity}}", event_data.entity.ids[0])
1624
+ return result
mlrun/utils/logger.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import logging
16
+ import typing
16
17
  from enum import Enum
17
18
  from sys import stdout
18
19
  from traceback import format_exception
@@ -221,11 +222,15 @@ class FormatterKinds(Enum):
221
222
  JSON = "json"
222
223
 
223
224
 
224
- def create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter:
225
+ def resolve_formatter_by_kind(
226
+ formatter_kind: FormatterKinds,
227
+ ) -> type[
228
+ typing.Union[HumanReadableFormatter, HumanReadableExtendedFormatter, JSONFormatter]
229
+ ]:
225
230
  return {
226
- FormatterKinds.HUMAN: HumanReadableFormatter(),
227
- FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter(),
228
- FormatterKinds.JSON: JSONFormatter(),
231
+ FormatterKinds.HUMAN: HumanReadableFormatter,
232
+ FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter,
233
+ FormatterKinds.JSON: JSONFormatter,
229
234
  }[formatter_kind]
230
235
 
231
236
 
@@ -243,11 +248,11 @@ def create_logger(
243
248
  logger_instance = Logger(level, name=name, propagate=False)
244
249
 
245
250
  # resolve formatter
246
- formatter_instance = create_formatter_instance(
251
+ formatter_instance = resolve_formatter_by_kind(
247
252
  FormatterKinds(formatter_kind.lower())
248
253
  )
249
254
 
250
255
  # set handler
251
- logger_instance.set_handler("default", stream or stdout, formatter_instance)
256
+ logger_instance.set_handler("default", stream or stdout, formatter_instance())
252
257
 
253
258
  return logger_instance
@@ -32,6 +32,7 @@ class SlackNotification(NotificationBase):
32
32
  "completed": ":smiley:",
33
33
  "running": ":man-running:",
34
34
  "error": ":x:",
35
+ "skipped": ":zzz:",
35
36
  }
36
37
 
37
38
  async def push(
@@ -135,8 +136,16 @@ class SlackNotification(NotificationBase):
135
136
  line = [
136
137
  self._get_slack_row(f":bell: {alert.name} alert has occurred"),
137
138
  self._get_slack_row(f"*Project:*\n{alert.project}"),
138
- self._get_slack_row(f"*UID:*\n{event_data.entity.ids[0]}"),
139
+ self._get_slack_row(f"*ID:*\n{event_data.entity.ids[0]}"),
139
140
  ]
141
+
142
+ if alert.summary:
143
+ line.append(
144
+ self._get_slack_row(
145
+ f"*Summary:*\n{mlrun.utils.helpers.format_alert_summary(alert, event_data)}"
146
+ )
147
+ )
148
+
140
149
  if event_data.value_dict:
141
150
  data_lines = []
142
151
  for key, value in event_data.value_dict.items():
@@ -144,10 +153,21 @@ class SlackNotification(NotificationBase):
144
153
  data_text = "\n".join(data_lines)
145
154
  line.append(self._get_slack_row(f"*Event data:*\n{data_text}"))
146
155
 
147
- if url := mlrun.utils.helpers.get_ui_url(
148
- alert.project, event_data.entity.ids[0]
149
- ):
150
- line.append(self._get_slack_row(f"*Overview:*\n<{url}|*Job overview*>"))
156
+ if (
157
+ event_data.entity.kind == mlrun.common.schemas.alert.EventEntityKind.JOB
158
+ ): # JOB entity
159
+ uid = event_data.value_dict.get("uid")
160
+ url = mlrun.utils.helpers.get_ui_url(alert.project, uid)
161
+ overview_type = "Job overview"
162
+ else: # MODEL entity
163
+ model_name = event_data.value_dict.get("model")
164
+ model_endpoint_id = event_data.value_dict.get("model_endpoint_id")
165
+ url = mlrun.utils.helpers.get_model_endpoint_url(
166
+ alert.project, model_name, model_endpoint_id
167
+ )
168
+ overview_type = "Model endpoint"
169
+
170
+ line.append(self._get_slack_row(f"*Overview:*\n<{url}|*{overview_type}*>"))
151
171
 
152
172
  return line
153
173
 
@@ -157,11 +177,11 @@ class SlackNotification(NotificationBase):
157
177
 
158
178
  # Only show the URL if the run is not a function (serving or mlrun function)
159
179
  kind = run.get("step_kind")
160
- if url and not kind or kind == "run":
180
+ state = run["status"].get("state", "")
181
+ if state != "skipped" and (url and not kind or kind == "run"):
161
182
  line = f'<{url}|*{meta.get("name")}*>'
162
183
  else:
163
184
  line = meta.get("name")
164
- state = run["status"].get("state", "")
165
185
  if kind:
166
186
  line = f'{line} *({run.get("step_kind", run.get("kind", ""))})*'
167
187
  line = f'{self.emojis.get(state, ":question:")} {line}'
@@ -14,7 +14,6 @@
14
14
 
15
15
  import asyncio
16
16
  import datetime
17
- import json
18
17
  import os
19
18
  import re
20
19
  import traceback
@@ -23,6 +22,7 @@ from concurrent.futures import ThreadPoolExecutor
23
22
 
24
23
  import kfp
25
24
  import mlrun_pipelines.common.ops
25
+ import mlrun_pipelines.models
26
26
 
27
27
  import mlrun.common.runtimes.constants
28
28
  import mlrun.common.schemas
@@ -392,17 +392,29 @@ class NotificationPusher(_NotificationPusherBase):
392
392
  steps = []
393
393
  db = mlrun.get_run_db()
394
394
 
395
- def _add_run_step(_node_name, _node_template, _step_kind):
396
- _run = db.list_runs(
397
- project=run.metadata.project,
398
- labels=f"mlrun/runner-pod={_node_name}",
399
- )[0]
400
- _run["step_kind"] = _step_kind
395
+ def _add_run_step(_step: mlrun_pipelines.models.PipelineStep):
396
+ try:
397
+ _run = db.list_runs(
398
+ project=run.metadata.project,
399
+ labels=f"mlrun/runner-pod={_step.node_name}",
400
+ )[0]
401
+ except IndexError:
402
+ _run = {
403
+ "metadata": {
404
+ "name": _step.display_name,
405
+ "project": run.metadata.project,
406
+ },
407
+ }
408
+ _run["step_kind"] = _step.step_type
409
+ if _step.skipped:
410
+ _run.setdefault("status", {})["state"] = (
411
+ mlrun.common.runtimes.constants.RunStates.skipped
412
+ )
401
413
  steps.append(_run)
402
414
 
403
- def _add_deploy_function_step(_, _node_template, _step_kind):
415
+ def _add_deploy_function_step(_step: mlrun_pipelines.models.PipelineStep):
404
416
  project, name, hash_key = self._extract_function_uri(
405
- _node_template["metadata"]["annotations"]["mlrun/function-uri"]
417
+ _step.get_annotation("mlrun/function-uri")
406
418
  )
407
419
  if name:
408
420
  try:
@@ -419,16 +431,19 @@ class NotificationPusher(_NotificationPusherBase):
419
431
  "hash_key": hash_key,
420
432
  },
421
433
  }
422
- function["status"] = {
423
- "state": mlrun.common.runtimes.constants.PodPhases.pod_phase_to_run_state(
424
- node["phase"]
425
- ),
426
- }
434
+ pod_phase = _step.phase
435
+ if _step.skipped:
436
+ state = mlrun.common.schemas.FunctionState.skipped
437
+ else:
438
+ state = mlrun.common.runtimes.constants.PodPhases.pod_phase_to_run_state(
439
+ pod_phase
440
+ )
441
+ function["status"] = {"state": state}
427
442
  if isinstance(function["metadata"].get("updated"), datetime.datetime):
428
443
  function["metadata"]["updated"] = function["metadata"][
429
444
  "updated"
430
445
  ].isoformat()
431
- function["step_kind"] = _step_kind
446
+ function["step_kind"] = _step.step_type
432
447
  steps.append(function)
433
448
 
434
449
  step_methods = {
@@ -446,26 +461,10 @@ class NotificationPusher(_NotificationPusherBase):
446
461
  return steps
447
462
 
448
463
  try:
449
- workflow_nodes = sorted(
450
- workflow_manifest["status"]["nodes"].items(),
451
- key=lambda _node: _node[1]["finishedAt"],
452
- )
453
- for node_name, node in workflow_nodes:
454
- if node["type"] != "Pod":
455
- # Skip the parent DAG node
456
- continue
457
-
458
- node_template = next(
459
- template
460
- for template in workflow_manifest["spec"]["templates"]
461
- if template["name"] == node["templateName"]
462
- )
463
- step_type = node_template["metadata"]["annotations"].get(
464
- "mlrun/pipeline-step-type"
465
- )
466
- step_method = step_methods.get(step_type)
464
+ for step in workflow_manifest.get_steps():
465
+ step_method = step_methods.get(step.step_type)
467
466
  if step_method:
468
- step_method(node_name, node_template, step_type)
467
+ step_method(step)
469
468
  return steps
470
469
  except Exception:
471
470
  # If we fail to read the pipeline steps, we will return the list of runs that have the same workflow id
@@ -481,19 +480,24 @@ class NotificationPusher(_NotificationPusherBase):
481
480
  )
482
481
 
483
482
  @staticmethod
484
- def _get_workflow_manifest(workflow_id: str) -> typing.Optional[dict]:
485
- kfp_client = kfp.Client(namespace=mlrun.mlconf.namespace)
483
+ def _get_workflow_manifest(
484
+ workflow_id: str,
485
+ ) -> typing.Optional[mlrun_pipelines.models.PipelineManifest]:
486
+ kfp_url = mlrun.mlconf.resolve_kfp_url(mlrun.mlconf.namespace)
487
+ if not kfp_url:
488
+ raise mlrun.errors.MLRunNotFoundError(
489
+ "KubeFlow Pipelines is not configured"
490
+ )
491
+
492
+ kfp_client = kfp.Client(host=kfp_url)
486
493
 
487
494
  # arbitrary timeout of 5 seconds, the workflow should be done by now
488
495
  kfp_run = kfp_client.wait_for_run_completion(workflow_id, 5)
489
496
  if not kfp_run:
490
497
  return None
491
498
 
492
- kfp_run = kfp_run.to_dict()
493
- try:
494
- return json.loads(kfp_run["pipeline_runtime"]["workflow_manifest"])
495
- except Exception:
496
- return None
499
+ kfp_run = mlrun_pipelines.models.PipelineRun(kfp_run)
500
+ return kfp_run.workflow_manifest()
497
501
 
498
502
  def _extract_function_uri(self, function_uri: str) -> tuple[str, str, str]:
499
503
  """
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "db779a53a32a737e10d538e2fef25695a962117a",
3
- "version": "1.7.0-rc16"
2
+ "git_commit": "cf983306a4f164f1c0a4f3ccf666ba9448d09e2e",
3
+ "version": "1.7.0-rc18"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.7.0rc16
3
+ Version: 1.7.0rc18
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -28,7 +28,7 @@ Requires-Dist: aiohttp-retry ~=2.8
28
28
  Requires-Dist: click ~=8.1
29
29
  Requires-Dist: nest-asyncio ~=1.0
30
30
  Requires-Dist: ipython ~=8.10
31
- Requires-Dist: nuclio-jupyter ~=0.9.16
31
+ Requires-Dist: nuclio-jupyter ~=0.9.17
32
32
  Requires-Dist: numpy <1.27.0,>=1.16.5
33
33
  Requires-Dist: pandas <2.2,>=1.2
34
34
  Requires-Dist: pyarrow <15,>=10.0
@@ -43,7 +43,7 @@ Requires-Dist: semver ~=3.0
43
43
  Requires-Dist: dependency-injector ~=4.41
44
44
  Requires-Dist: fsspec <2024.4,>=2023.9.2
45
45
  Requires-Dist: v3iofs ~=0.1.17
46
- Requires-Dist: storey ~=1.7.11
46
+ Requires-Dist: storey ~=1.7.17
47
47
  Requires-Dist: inflection ~=0.5.0
48
48
  Requires-Dist: python-dotenv ~=0.17.0
49
49
  Requires-Dist: setuptools ~=69.1
@@ -82,6 +82,7 @@ Requires-Dist: pyopenssl >=23 ; extra == 'all'
82
82
  Requires-Dist: redis ~=4.3 ; extra == 'all'
83
83
  Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'all'
84
84
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'all'
85
+ Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'all'
85
86
  Provides-Extra: api
86
87
  Requires-Dist: uvicorn ~=0.27.1 ; extra == 'api'
87
88
  Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'api'
@@ -129,6 +130,7 @@ Requires-Dist: pyopenssl >=23 ; extra == 'complete'
129
130
  Requires-Dist: redis ~=4.3 ; extra == 'complete'
130
131
  Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'complete'
131
132
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete'
133
+ Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'complete'
132
134
  Provides-Extra: complete-api
133
135
  Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete-api'
134
136
  Requires-Dist: aiobotocore <2.8,>=2.5.0 ; extra == 'complete-api'
@@ -161,6 +163,7 @@ Requires-Dist: pyopenssl >=23 ; extra == 'complete-api'
161
163
  Requires-Dist: redis ~=4.3 ; extra == 'complete-api'
162
164
  Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'complete-api'
163
165
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete-api'
166
+ Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'complete-api'
164
167
  Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
165
168
  Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
166
169
  Provides-Extra: dask
@@ -193,6 +196,8 @@ Requires-Dist: aiobotocore <2.8,>=2.5.0 ; extra == 's3'
193
196
  Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 's3'
194
197
  Provides-Extra: sqlalchemy
195
198
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'sqlalchemy'
199
+ Provides-Extra: tdengine
200
+ Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'tdengine'
196
201
 
197
202
  <a id="top"></a>
198
203
  [![Build Status](https://github.com/mlrun/mlrun/actions/workflows/build.yaml/badge.svg?branch=development)](https://github.com/mlrun/mlrun/actions/workflows/build.yaml?query=branch%3Adevelopment)