mlrun 1.7.0rc7__py3-none-any.whl → 1.7.0rc9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (52) hide show
  1. mlrun/__main__.py +2 -0
  2. mlrun/common/schemas/__init__.py +3 -0
  3. mlrun/common/schemas/api_gateway.py +8 -1
  4. mlrun/common/schemas/hub.py +7 -9
  5. mlrun/common/schemas/model_monitoring/constants.py +1 -1
  6. mlrun/common/schemas/pagination.py +26 -0
  7. mlrun/common/schemas/project.py +15 -10
  8. mlrun/config.py +28 -10
  9. mlrun/datastore/__init__.py +3 -7
  10. mlrun/datastore/datastore_profile.py +19 -1
  11. mlrun/datastore/snowflake_utils.py +43 -0
  12. mlrun/datastore/sources.py +9 -26
  13. mlrun/datastore/targets.py +131 -11
  14. mlrun/datastore/utils.py +10 -5
  15. mlrun/db/base.py +44 -0
  16. mlrun/db/httpdb.py +122 -21
  17. mlrun/db/nopdb.py +107 -0
  18. mlrun/feature_store/api.py +3 -2
  19. mlrun/feature_store/retrieval/spark_merger.py +27 -23
  20. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +1 -1
  21. mlrun/frameworks/tf_keras/mlrun_interface.py +2 -2
  22. mlrun/kfpops.py +2 -5
  23. mlrun/launcher/base.py +1 -1
  24. mlrun/launcher/client.py +2 -2
  25. mlrun/model_monitoring/helpers.py +3 -1
  26. mlrun/projects/pipelines.py +1 -1
  27. mlrun/projects/project.py +32 -21
  28. mlrun/run.py +5 -1
  29. mlrun/runtimes/__init__.py +16 -0
  30. mlrun/runtimes/base.py +4 -1
  31. mlrun/runtimes/kubejob.py +26 -121
  32. mlrun/runtimes/nuclio/api_gateway.py +58 -8
  33. mlrun/runtimes/nuclio/application/application.py +79 -1
  34. mlrun/runtimes/nuclio/application/reverse_proxy.go +9 -1
  35. mlrun/runtimes/nuclio/function.py +11 -8
  36. mlrun/runtimes/nuclio/serving.py +2 -2
  37. mlrun/runtimes/pod.py +145 -0
  38. mlrun/runtimes/utils.py +0 -28
  39. mlrun/serving/remote.py +2 -3
  40. mlrun/serving/routers.py +4 -3
  41. mlrun/serving/server.py +1 -1
  42. mlrun/serving/states.py +6 -9
  43. mlrun/serving/v2_serving.py +4 -3
  44. mlrun/utils/http.py +1 -1
  45. mlrun/utils/retryer.py +1 -0
  46. mlrun/utils/version/version.json +2 -2
  47. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/METADATA +15 -15
  48. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/RECORD +52 -50
  49. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/LICENSE +0 -0
  50. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/WHEEL +0 -0
  51. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/entry_points.txt +0 -0
  52. {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc9.dist-info}/top_level.txt +0 -0
@@ -543,11 +543,16 @@ class RemoteRuntime(KubeResource):
543
543
  :param project: project name
544
544
  :param tag: function tag
545
545
  :param verbose: set True for verbose logging
546
- :param auth_info: service AuthInfo
546
+ :param auth_info: service AuthInfo (deprecated and ignored)
547
547
  :param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token}
548
548
  :param force_build: set True for force building the image
549
549
  """
550
- # todo: verify that the function name is normalized
550
+ if auth_info:
551
+ # TODO: remove in 1.9.0
552
+ warnings.warn(
553
+ "'auth_info' is deprecated for nuclio runtimes in 1.7.0 and will be removed in 1.9.0",
554
+ FutureWarning,
555
+ )
551
556
 
552
557
  old_http_session = getattr(self, "_http_session", None)
553
558
  if old_http_session:
@@ -570,9 +575,7 @@ class RemoteRuntime(KubeResource):
570
575
  self._fill_credentials()
571
576
  db = self._get_db()
572
577
  logger.info("Starting remote function deploy")
573
- data = db.remote_builder(
574
- self, False, builder_env=builder_env, force_build=force_build
575
- )
578
+ data = db.deploy_nuclio_function(func=self, builder_env=builder_env)
576
579
  self.status = data["data"].get("status")
577
580
  self._update_credentials_from_remote_build(data["data"])
578
581
 
@@ -613,7 +616,7 @@ class RemoteRuntime(KubeResource):
613
616
  int(mlrun.mlconf.httpdb.logs.nuclio.pull_deploy_status_default_interval)
614
617
  )
615
618
  try:
616
- text, last_log_timestamp = db.get_builder_status(
619
+ text, last_log_timestamp = db.get_nuclio_deploy_status(
617
620
  self, last_log_timestamp=last_log_timestamp, verbose=verbose
618
621
  )
619
622
  except mlrun.db.RunDBError:
@@ -995,10 +998,10 @@ class RemoteRuntime(KubeResource):
995
998
  ]
996
999
 
997
1000
  if command:
998
- sidecar["command"] = command
1001
+ sidecar["command"] = mlrun.utils.helpers.as_list(command)
999
1002
 
1000
1003
  if args:
1001
- sidecar["args"] = args
1004
+ sidecar["args"] = mlrun.utils.helpers.as_list(args)
1002
1005
 
1003
1006
  def _set_sidecar(self, name: str) -> dict:
1004
1007
  self.spec.config.setdefault("spec.sidecars", [])
@@ -491,9 +491,9 @@ class ServingRuntime(RemoteRuntime):
491
491
 
492
492
  if (
493
493
  stream.path.startswith("kafka://")
494
- or "kafka_bootstrap_servers" in stream.options
494
+ or "kafka_brokers" in stream.options
495
495
  ):
496
- brokers = stream.options.get("kafka_bootstrap_servers")
496
+ brokers = stream.options.get("kafka_brokers")
497
497
  if brokers:
498
498
  brokers = brokers.split(",")
499
499
  topic, brokers = parse_kafka_url(stream.path, brokers)
mlrun/runtimes/pod.py CHANGED
@@ -15,6 +15,7 @@ import copy
15
15
  import inspect
16
16
  import os
17
17
  import re
18
+ import time
18
19
  import typing
19
20
  from enum import Enum
20
21
 
@@ -1338,6 +1339,150 @@ class KubeResource(BaseRuntime):
1338
1339
 
1339
1340
  self.spec.validate_service_account(allowed_service_accounts)
1340
1341
 
1342
+ def _configure_mlrun_build_with_source(
1343
+ self, source, workdir=None, handler=None, pull_at_runtime=True, target_dir=None
1344
+ ):
1345
+ mlrun.utils.helpers.validate_builder_source(source, pull_at_runtime, workdir)
1346
+
1347
+ self.spec.build.source = source
1348
+ if handler:
1349
+ self.spec.default_handler = handler
1350
+ if workdir:
1351
+ self.spec.workdir = workdir
1352
+ if target_dir:
1353
+ self.spec.build.source_code_target_dir = target_dir
1354
+
1355
+ self.spec.build.load_source_on_run = pull_at_runtime
1356
+ if (
1357
+ self.spec.build.base_image
1358
+ and not self.spec.build.commands
1359
+ and pull_at_runtime
1360
+ and not self.spec.image
1361
+ ):
1362
+ # if we load source from repo and don't need a full build use the base_image as the image
1363
+ self.spec.image = self.spec.build.base_image
1364
+ elif not pull_at_runtime:
1365
+ # clear the image so build will not be skipped
1366
+ self.spec.build.base_image = self.spec.build.base_image or self.spec.image
1367
+ self.spec.image = ""
1368
+
1369
+ def _resolve_build_with_mlrun(self, with_mlrun: typing.Optional[bool] = None):
1370
+ build = self.spec.build
1371
+ if with_mlrun is None:
1372
+ if build.with_mlrun is not None:
1373
+ with_mlrun = build.with_mlrun
1374
+ else:
1375
+ with_mlrun = build.base_image and not (
1376
+ build.base_image.startswith("mlrun/")
1377
+ or "/mlrun/" in build.base_image
1378
+ )
1379
+ if (
1380
+ not build.source
1381
+ and not build.commands
1382
+ and not build.requirements
1383
+ and not build.extra
1384
+ and with_mlrun
1385
+ ):
1386
+ logger.info(
1387
+ "Running build to add mlrun package, set "
1388
+ "with_mlrun=False to skip if its already in the image"
1389
+ )
1390
+ return with_mlrun
1391
+
1392
+ def _build_image(
1393
+ self,
1394
+ builder_env,
1395
+ force_build,
1396
+ mlrun_version_specifier,
1397
+ show_on_failure,
1398
+ skip_deployed,
1399
+ watch,
1400
+ is_kfp,
1401
+ with_mlrun,
1402
+ ):
1403
+ # When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operation
1404
+ # is actually done. (when a pipelines pod exits, the pipeline step marked as done)
1405
+ if is_kfp:
1406
+ watch = True
1407
+
1408
+ db = self._get_db()
1409
+ data = db.remote_builder(
1410
+ self,
1411
+ with_mlrun,
1412
+ mlrun_version_specifier,
1413
+ skip_deployed,
1414
+ builder_env=builder_env,
1415
+ force_build=force_build,
1416
+ )
1417
+ self.status = data["data"].get("status", None)
1418
+ self.spec.image = mlrun.utils.get_in(
1419
+ data, "data.spec.image"
1420
+ ) or mlrun.utils.get_in(data, "data.spec.build.image")
1421
+ self.spec.build.base_image = self.spec.build.base_image or mlrun.utils.get_in(
1422
+ data, "data.spec.build.base_image"
1423
+ )
1424
+ # Get the source target dir in case it was enriched due to loading source
1425
+ self.spec.build.source_code_target_dir = mlrun.utils.get_in(
1426
+ data, "data.spec.build.source_code_target_dir"
1427
+ ) or mlrun.utils.get_in(data, "data.spec.clone_target_dir")
1428
+ ready = data.get("ready", False)
1429
+ if not ready:
1430
+ logger.info(
1431
+ f"Started building image: {data.get('data', {}).get('spec', {}).get('build', {}).get('image')}"
1432
+ )
1433
+ if watch and not ready:
1434
+ state = self._build_watch(
1435
+ watch=watch,
1436
+ show_on_failure=show_on_failure,
1437
+ )
1438
+ ready = state == "ready"
1439
+ self.status.state = state
1440
+
1441
+ if watch and not ready:
1442
+ raise mlrun.errors.MLRunRuntimeError("Deploy failed")
1443
+ return ready
1444
+
1445
+ def _build_watch(
1446
+ self,
1447
+ watch: bool = True,
1448
+ logs: bool = True,
1449
+ show_on_failure: bool = False,
1450
+ ):
1451
+ db = self._get_db()
1452
+ offset = 0
1453
+ try:
1454
+ text, _ = db.get_builder_status(self, 0, logs=logs)
1455
+ except mlrun.db.RunDBError:
1456
+ raise ValueError("function or build process not found")
1457
+
1458
+ def print_log(text):
1459
+ if text and (
1460
+ not show_on_failure
1461
+ or self.status.state == mlrun.common.schemas.FunctionState.error
1462
+ ):
1463
+ print(text, end="")
1464
+
1465
+ print_log(text)
1466
+ offset += len(text)
1467
+ if watch:
1468
+ while self.status.state in [
1469
+ mlrun.common.schemas.FunctionState.pending,
1470
+ mlrun.common.schemas.FunctionState.running,
1471
+ ]:
1472
+ time.sleep(2)
1473
+ if show_on_failure:
1474
+ text = ""
1475
+ db.get_builder_status(self, 0, logs=False)
1476
+ if self.status.state == mlrun.common.schemas.FunctionState.error:
1477
+ # re-read the full log on failure
1478
+ text, _ = db.get_builder_status(self, offset, logs=logs)
1479
+ else:
1480
+ text, _ = db.get_builder_status(self, offset, logs=logs)
1481
+ print_log(text)
1482
+ offset += len(text)
1483
+
1484
+ return self.status.state
1485
+
1341
1486
 
1342
1487
  def _resolve_if_type_sanitized(attribute_name, attribute):
1343
1488
  attribute_config = sanitized_attributes[attribute_name]
mlrun/runtimes/utils.py CHANGED
@@ -417,34 +417,6 @@ def get_func_selector(project, name=None, tag=None):
417
417
  return s
418
418
 
419
419
 
420
- class k8s_resource:
421
- kind = ""
422
- per_run = False
423
- per_function = False
424
- k8client = None
425
-
426
- def deploy_function(self, function):
427
- pass
428
-
429
- def release_function(self, function):
430
- pass
431
-
432
- def submit_run(self, function, runobj):
433
- pass
434
-
435
- def get_object(self, name, namespace=None):
436
- return None
437
-
438
- def get_status(self, name, namespace=None):
439
- return None
440
-
441
- def del_object(self, name, namespace=None):
442
- pass
443
-
444
- def get_pods(self, name, namespace=None, master=False):
445
- return {}
446
-
447
-
448
420
  def enrich_function_from_dict(function, function_dict):
449
421
  override_function = mlrun.new_function(runtime=function_dict, kind=function.kind)
450
422
  for attribute in [
mlrun/serving/remote.py CHANGED
@@ -172,8 +172,7 @@ class RemoteStep(storey.SendToHttp):
172
172
  if not self._session:
173
173
  self._session = mlrun.utils.HTTPSessionWithRetry(
174
174
  self.retries,
175
- self.backoff_factor
176
- or mlrun.config.config.http_retry_defaults.backoff_factor,
175
+ self.backoff_factor or mlrun.mlconf.http_retry_defaults.backoff_factor,
177
176
  retry_on_exception=False,
178
177
  retry_on_status=self.retries > 0,
179
178
  retry_on_post=True,
@@ -185,7 +184,7 @@ class RemoteStep(storey.SendToHttp):
185
184
  resp = self._session.request(
186
185
  method,
187
186
  url,
188
- verify=mlrun.config.config.httpdb.http.verify,
187
+ verify=mlrun.mlconf.httpdb.http.verify,
189
188
  headers=headers,
190
189
  data=body,
191
190
  timeout=self.timeout,
mlrun/serving/routers.py CHANGED
@@ -28,6 +28,7 @@ import numpy as np
28
28
  import mlrun
29
29
  import mlrun.common.model_monitoring
30
30
  import mlrun.common.schemas.model_monitoring
31
+ from mlrun.errors import err_to_str
31
32
  from mlrun.utils import logger, now_date
32
33
 
33
34
  from ..common.helpers import parse_versioned_object_uri
@@ -1013,7 +1014,7 @@ def _init_endpoint_record(
1013
1014
  graph_server.function_uri
1014
1015
  )
1015
1016
  except Exception as e:
1016
- logger.error("Failed to parse function URI", exc=e)
1017
+ logger.error("Failed to parse function URI", exc=err_to_str(e))
1017
1018
  return None
1018
1019
 
1019
1020
  # Generating version model value based on the model name and model version
@@ -1089,12 +1090,12 @@ def _init_endpoint_record(
1089
1090
  except Exception as exc:
1090
1091
  logger.warning(
1091
1092
  "Failed creating model endpoint record",
1092
- exc=exc,
1093
+ exc=err_to_str(exc),
1093
1094
  traceback=traceback.format_exc(),
1094
1095
  )
1095
1096
 
1096
1097
  except Exception as e:
1097
- logger.error("Failed to retrieve model endpoint object", exc=e)
1098
+ logger.error("Failed to retrieve model endpoint object", exc=err_to_str(e))
1098
1099
 
1099
1100
  return endpoint_uid
1100
1101
 
mlrun/serving/server.py CHANGED
@@ -53,7 +53,7 @@ class _StreamContext:
53
53
  Initialize _StreamContext object.
54
54
  :param enabled: A boolean indication for applying the stream context
55
55
  :param parameters: Dictionary of optional parameters, such as `log_stream` and `stream_args`. Note that these
56
- parameters might be relevant to the output source such as `kafka_bootstrap_servers` if
56
+ parameters might be relevant to the output source such as `kafka_brokers` if
57
57
  the output source is from type Kafka.
58
58
  :param function_uri: Full value of the function uri, usually it's <project-name>/<function-name>
59
59
  """
mlrun/serving/states.py CHANGED
@@ -1524,21 +1524,18 @@ def _init_async_objects(context, steps):
1524
1524
  endpoint = None
1525
1525
  options = {}
1526
1526
  options.update(step.options)
1527
- kafka_bootstrap_servers = options.pop(
1528
- "kafka_bootstrap_servers", None
1529
- )
1530
- if stream_path.startswith("kafka://") or kafka_bootstrap_servers:
1531
- topic, bootstrap_servers = parse_kafka_url(
1532
- stream_path, kafka_bootstrap_servers
1533
- )
1527
+ kafka_brokers = options.pop("kafka_brokers", None)
1528
+ if stream_path.startswith("kafka://") or kafka_brokers:
1529
+ topic, brokers = parse_kafka_url(stream_path, kafka_brokers)
1534
1530
 
1535
1531
  kafka_producer_options = options.pop(
1536
- "kafka_producer_options", None
1532
+ "kafka_producer_options",
1533
+ options.pop("kafka_bootstrap_servers", None),
1537
1534
  )
1538
1535
 
1539
1536
  step._async_object = storey.KafkaTarget(
1540
1537
  topic=topic,
1541
- bootstrap_servers=bootstrap_servers,
1538
+ brokers=brokers,
1542
1539
  producer_options=kafka_producer_options,
1543
1540
  context=context,
1544
1541
  **options,
@@ -21,6 +21,7 @@ import mlrun.common.model_monitoring
21
21
  import mlrun.common.schemas.model_monitoring
22
22
  from mlrun.artifacts import ModelArtifact # noqa: F401
23
23
  from mlrun.config import config
24
+ from mlrun.errors import err_to_str
24
25
  from mlrun.utils import logger, now_date
25
26
 
26
27
  from ..common.helpers import parse_versioned_object_uri
@@ -523,7 +524,7 @@ def _init_endpoint_record(
523
524
  graph_server.function_uri
524
525
  )
525
526
  except Exception as e:
526
- logger.error("Failed to parse function URI", exc=e)
527
+ logger.error("Failed to parse function URI", exc=err_to_str(e))
527
528
  return None
528
529
 
529
530
  # Generating version model value based on the model name and model version
@@ -576,9 +577,9 @@ def _init_endpoint_record(
576
577
  )
577
578
 
578
579
  except Exception as e:
579
- logger.error("Failed to create endpoint record", exc=e)
580
+ logger.error("Failed to create endpoint record", exc=err_to_str(e))
580
581
 
581
582
  except Exception as e:
582
- logger.error("Failed to retrieve model endpoint object", exc=e)
583
+ logger.error("Failed to retrieve model endpoint object", exc=err_to_str(e))
583
584
 
584
585
  return uid
mlrun/utils/http.py CHANGED
@@ -122,7 +122,7 @@ class HTTPSessionWithRetry(requests.Session):
122
122
 
123
123
  self._logger.warning(
124
124
  "Error during request handling, retrying",
125
- exc=str(exc),
125
+ exc=err_to_str(exc),
126
126
  retry_count=retry_count,
127
127
  url=url,
128
128
  method=method,
mlrun/utils/retryer.py CHANGED
@@ -138,6 +138,7 @@ class Retryer:
138
138
  except mlrun.errors.MLRunFatalFailureError as exc:
139
139
  raise exc.original_exception
140
140
  except Exception as exc:
141
+ self.last_exception = exc
141
142
  return (
142
143
  None,
143
144
  self.last_exception,
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "06b1879c4a1857b20f07e805c46f51aa4ac74cef",
3
- "version": "1.7.0-rc7"
2
+ "git_commit": "3396f9ef4c278411ca6227b1792ff7df20005a42",
3
+ "version": "1.7.0-rc9"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.7.0rc7
3
+ Version: 1.7.0rc9
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -37,20 +37,20 @@ Requires-Dist: pyyaml ~=5.1
37
37
  Requires-Dist: requests ~=2.31
38
38
  Requires-Dist: tabulate ~=0.8.6
39
39
  Requires-Dist: v3io ~=0.6.4
40
- Requires-Dist: pydantic >=1.10.8,~=1.10
40
+ Requires-Dist: pydantic <1.10.15,>=1.10.8
41
41
  Requires-Dist: mergedeep ~=1.3
42
42
  Requires-Dist: v3io-frames ~=0.10.12
43
43
  Requires-Dist: semver ~=3.0
44
44
  Requires-Dist: dependency-injector ~=4.41
45
- Requires-Dist: fsspec ==2023.9.2
45
+ Requires-Dist: fsspec <2024.4,>=2023.9.2
46
46
  Requires-Dist: v3iofs ~=0.1.17
47
- Requires-Dist: storey ~=1.7.6
47
+ Requires-Dist: storey ~=1.7.7
48
48
  Requires-Dist: inflection ~=0.5.0
49
49
  Requires-Dist: python-dotenv ~=0.17.0
50
50
  Requires-Dist: setuptools ~=69.1
51
51
  Requires-Dist: deprecated ~=1.2
52
52
  Requires-Dist: jinja2 >=3.1.3,~=3.1
53
- Requires-Dist: orjson ~=3.9
53
+ Requires-Dist: orjson <4,>=3.9.15
54
54
  Provides-Extra: alibaba-oss
55
55
  Requires-Dist: ossfs ==2023.12.0 ; extra == 'alibaba-oss'
56
56
  Requires-Dist: oss2 ==2.18.1 ; extra == 'alibaba-oss'
@@ -66,7 +66,7 @@ Requires-Dist: boto3 <1.29.0,>=1.28.0 ; extra == 'all'
66
66
  Requires-Dist: dask ~=2023.9.0 ; extra == 'all'
67
67
  Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'all'
68
68
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'all'
69
- Requires-Dist: gcsfs ==2023.9.2 ; extra == 'all'
69
+ Requires-Dist: gcsfs <2024.4,>=2023.9.2 ; extra == 'all'
70
70
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'all'
71
71
  Requires-Dist: google-cloud-storage ==2.14.0 ; extra == 'all'
72
72
  Requires-Dist: google-cloud ==0.34 ; extra == 'all'
@@ -79,14 +79,14 @@ Requires-Dist: ossfs ==2023.12.0 ; extra == 'all'
79
79
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'all'
80
80
  Requires-Dist: pyopenssl >=23 ; extra == 'all'
81
81
  Requires-Dist: redis ~=4.3 ; extra == 'all'
82
- Requires-Dist: s3fs ==2023.9.2 ; extra == 'all'
82
+ Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'all'
83
83
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'all'
84
84
  Provides-Extra: api
85
85
  Requires-Dist: uvicorn ~=0.27.1 ; extra == 'api'
86
86
  Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'api'
87
87
  Requires-Dist: apscheduler <4,>=3.10.3 ; extra == 'api'
88
88
  Requires-Dist: objgraph ~=3.6 ; extra == 'api'
89
- Requires-Dist: igz-mgmt ~=0.1.1 ; extra == 'api'
89
+ Requires-Dist: igz-mgmt ~=0.1.3 ; extra == 'api'
90
90
  Requires-Dist: humanfriendly ~=10.0 ; extra == 'api'
91
91
  Requires-Dist: fastapi ~=0.110.0 ; extra == 'api'
92
92
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'api'
@@ -115,7 +115,7 @@ Requires-Dist: boto3 <1.29.0,>=1.28.0 ; extra == 'complete'
115
115
  Requires-Dist: dask ~=2023.9.0 ; extra == 'complete'
116
116
  Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'complete'
117
117
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'complete'
118
- Requires-Dist: gcsfs ==2023.9.2 ; extra == 'complete'
118
+ Requires-Dist: gcsfs <2024.4,>=2023.9.2 ; extra == 'complete'
119
119
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'complete'
120
120
  Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete'
121
121
  Requires-Dist: kafka-python ~=2.0 ; extra == 'complete'
@@ -126,7 +126,7 @@ Requires-Dist: ossfs ==2023.12.0 ; extra == 'complete'
126
126
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'complete'
127
127
  Requires-Dist: pyopenssl >=23 ; extra == 'complete'
128
128
  Requires-Dist: redis ~=4.3 ; extra == 'complete'
129
- Requires-Dist: s3fs ==2023.9.2 ; extra == 'complete'
129
+ Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'complete'
130
130
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete'
131
131
  Provides-Extra: complete-api
132
132
  Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete-api'
@@ -143,11 +143,11 @@ Requires-Dist: dask ~=2023.9.0 ; extra == 'complete-api'
143
143
  Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'complete-api'
144
144
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'complete-api'
145
145
  Requires-Dist: fastapi ~=0.110.0 ; extra == 'complete-api'
146
- Requires-Dist: gcsfs ==2023.9.2 ; extra == 'complete-api'
146
+ Requires-Dist: gcsfs <2024.4,>=2023.9.2 ; extra == 'complete-api'
147
147
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'complete-api'
148
148
  Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete-api'
149
149
  Requires-Dist: humanfriendly ~=10.0 ; extra == 'complete-api'
150
- Requires-Dist: igz-mgmt ~=0.1.1 ; extra == 'complete-api'
150
+ Requires-Dist: igz-mgmt ~=0.1.3 ; extra == 'complete-api'
151
151
  Requires-Dist: kafka-python ~=2.0 ; extra == 'complete-api'
152
152
  Requires-Dist: mlflow ~=2.8 ; extra == 'complete-api'
153
153
  Requires-Dist: msrest ~=0.6.21 ; extra == 'complete-api'
@@ -158,7 +158,7 @@ Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'complete-api'
158
158
  Requires-Dist: pymysql ~=1.0 ; extra == 'complete-api'
159
159
  Requires-Dist: pyopenssl >=23 ; extra == 'complete-api'
160
160
  Requires-Dist: redis ~=4.3 ; extra == 'complete-api'
161
- Requires-Dist: s3fs ==2023.9.2 ; extra == 'complete-api'
161
+ Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 'complete-api'
162
162
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete-api'
163
163
  Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
164
164
  Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
@@ -174,7 +174,7 @@ Requires-Dist: google-cloud ==0.34 ; extra == 'google-cloud'
174
174
  Provides-Extra: google-cloud-bigquery
175
175
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'google-cloud-bigquery'
176
176
  Provides-Extra: google-cloud-storage
177
- Requires-Dist: gcsfs ==2023.9.2 ; extra == 'google-cloud-storage'
177
+ Requires-Dist: gcsfs <2024.4,>=2023.9.2 ; extra == 'google-cloud-storage'
178
178
  Provides-Extra: graphviz
179
179
  Requires-Dist: graphviz ~=0.20.0 ; extra == 'graphviz'
180
180
  Provides-Extra: kafka
@@ -189,7 +189,7 @@ Requires-Dist: redis ~=4.3 ; extra == 'redis'
189
189
  Provides-Extra: s3
190
190
  Requires-Dist: boto3 <1.29.0,>=1.28.0 ; extra == 's3'
191
191
  Requires-Dist: aiobotocore <2.8,>=2.5.0 ; extra == 's3'
192
- Requires-Dist: s3fs ==2023.9.2 ; extra == 's3'
192
+ Requires-Dist: s3fs <2024.4,>=2023.9.2 ; extra == 's3'
193
193
  Provides-Extra: sqlalchemy
194
194
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'sqlalchemy'
195
195