mlrun 1.6.0rc26__py3-none-any.whl → 1.6.3rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (66) hide show
  1. mlrun/artifacts/manager.py +6 -0
  2. mlrun/artifacts/model.py +28 -22
  3. mlrun/common/db/sql_session.py +3 -0
  4. mlrun/common/model_monitoring/helpers.py +4 -2
  5. mlrun/common/schemas/__init__.py +2 -0
  6. mlrun/common/schemas/common.py +40 -0
  7. mlrun/common/schemas/model_monitoring/__init__.py +1 -0
  8. mlrun/common/schemas/model_monitoring/constants.py +21 -5
  9. mlrun/common/schemas/project.py +2 -0
  10. mlrun/config.py +43 -17
  11. mlrun/data_types/data_types.py +4 -0
  12. mlrun/datastore/azure_blob.py +9 -9
  13. mlrun/datastore/base.py +22 -44
  14. mlrun/datastore/datastore.py +7 -3
  15. mlrun/datastore/datastore_profile.py +15 -3
  16. mlrun/datastore/google_cloud_storage.py +7 -7
  17. mlrun/datastore/sources.py +17 -4
  18. mlrun/datastore/targets.py +3 -1
  19. mlrun/datastore/utils.py +11 -1
  20. mlrun/datastore/v3io.py +70 -46
  21. mlrun/db/base.py +18 -0
  22. mlrun/db/httpdb.py +41 -36
  23. mlrun/execution.py +3 -3
  24. mlrun/feature_store/api.py +133 -132
  25. mlrun/feature_store/feature_set.py +89 -0
  26. mlrun/feature_store/feature_vector.py +120 -0
  27. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +3 -3
  28. mlrun/frameworks/tf_keras/model_handler.py +7 -7
  29. mlrun/k8s_utils.py +56 -0
  30. mlrun/kfpops.py +19 -10
  31. mlrun/model.py +6 -0
  32. mlrun/model_monitoring/api.py +8 -8
  33. mlrun/model_monitoring/batch.py +1 -1
  34. mlrun/model_monitoring/controller.py +0 -7
  35. mlrun/model_monitoring/stores/kv_model_endpoint_store.py +13 -13
  36. mlrun/model_monitoring/stores/sql_model_endpoint_store.py +0 -1
  37. mlrun/model_monitoring/stream_processing.py +52 -38
  38. mlrun/package/packagers/pandas_packagers.py +3 -3
  39. mlrun/package/utils/_archiver.py +3 -1
  40. mlrun/platforms/iguazio.py +6 -65
  41. mlrun/projects/pipelines.py +29 -12
  42. mlrun/projects/project.py +100 -61
  43. mlrun/run.py +2 -0
  44. mlrun/runtimes/base.py +24 -1
  45. mlrun/runtimes/function.py +14 -15
  46. mlrun/runtimes/kubejob.py +5 -3
  47. mlrun/runtimes/local.py +2 -2
  48. mlrun/runtimes/mpijob/abstract.py +6 -6
  49. mlrun/runtimes/pod.py +3 -3
  50. mlrun/runtimes/serving.py +7 -14
  51. mlrun/runtimes/sparkjob/spark3job.py +3 -3
  52. mlrun/serving/remote.py +4 -2
  53. mlrun/serving/routers.py +14 -8
  54. mlrun/utils/async_http.py +3 -3
  55. mlrun/utils/helpers.py +59 -3
  56. mlrun/utils/http.py +3 -3
  57. mlrun/utils/logger.py +2 -2
  58. mlrun/utils/notifications/notification_pusher.py +6 -6
  59. mlrun/utils/regex.py +5 -1
  60. mlrun/utils/version/version.json +2 -2
  61. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/METADATA +21 -23
  62. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/RECORD +66 -65
  63. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/WHEEL +1 -1
  64. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/LICENSE +0 -0
  65. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/entry_points.txt +0 -0
  66. {mlrun-1.6.0rc26.dist-info → mlrun-1.6.3rc1.dist-info}/top_level.txt +0 -0
mlrun/serving/remote.py CHANGED
@@ -21,6 +21,7 @@ import storey
21
21
  from storey.flow import _ConcurrentJobExecution
22
22
 
23
23
  import mlrun
24
+ import mlrun.config
24
25
  from mlrun.errors import err_to_str
25
26
  from mlrun.utils import logger
26
27
 
@@ -173,7 +174,8 @@ class RemoteStep(storey.SendToHttp):
173
174
  if not self._session:
174
175
  self._session = mlrun.utils.HTTPSessionWithRetry(
175
176
  self.retries,
176
- self.backoff_factor or mlrun.mlconf.http_retry_defaults.backoff_factor,
177
+ self.backoff_factor
178
+ or mlrun.config.config.http_retry_defaults.backoff_factor,
177
179
  retry_on_exception=False,
178
180
  retry_on_status=self.retries > 0,
179
181
  retry_on_post=True,
@@ -185,7 +187,7 @@ class RemoteStep(storey.SendToHttp):
185
187
  resp = self._session.request(
186
188
  method,
187
189
  url,
188
- verify=False,
190
+ verify=mlrun.config.config.httpdb.http.verify,
189
191
  headers=headers,
190
192
  data=body,
191
193
  timeout=self.timeout,
mlrun/serving/routers.py CHANGED
@@ -1111,7 +1111,7 @@ class EnrichmentModelRouter(ModelRouter):
1111
1111
  url_prefix: str = None,
1112
1112
  health_prefix: str = None,
1113
1113
  feature_vector_uri: str = "",
1114
- impute_policy: dict = {},
1114
+ impute_policy: dict = None,
1115
1115
  **kwargs,
1116
1116
  ):
1117
1117
  """Model router with feature enrichment (from the feature store)
@@ -1156,14 +1156,17 @@ class EnrichmentModelRouter(ModelRouter):
1156
1156
  )
1157
1157
 
1158
1158
  self.feature_vector_uri = feature_vector_uri
1159
- self.impute_policy = impute_policy
1159
+ self.impute_policy = impute_policy or {}
1160
1160
 
1161
1161
  self._feature_service = None
1162
1162
 
1163
1163
  def post_init(self, mode="sync"):
1164
+ from ..feature_store import get_feature_vector
1165
+
1164
1166
  super().post_init(mode)
1165
- self._feature_service = mlrun.feature_store.get_online_feature_service(
1166
- feature_vector=self.feature_vector_uri,
1167
+ self._feature_service = get_feature_vector(
1168
+ self.feature_vector_uri
1169
+ ).get_online_feature_service(
1167
1170
  impute_policy=self.impute_policy,
1168
1171
  )
1169
1172
 
@@ -1192,7 +1195,7 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1192
1195
  executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
1193
1196
  prediction_col_name: str = None,
1194
1197
  feature_vector_uri: str = "",
1195
- impute_policy: dict = {},
1198
+ impute_policy: dict = None,
1196
1199
  **kwargs,
1197
1200
  ):
1198
1201
  """Voting Ensemble with feature enrichment (from the feature store)
@@ -1299,14 +1302,17 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1299
1302
  )
1300
1303
 
1301
1304
  self.feature_vector_uri = feature_vector_uri
1302
- self.impute_policy = impute_policy
1305
+ self.impute_policy = impute_policy or {}
1303
1306
 
1304
1307
  self._feature_service = None
1305
1308
 
1306
1309
  def post_init(self, mode="sync"):
1310
+ from ..feature_store import get_feature_vector
1311
+
1307
1312
  super().post_init(mode)
1308
- self._feature_service = mlrun.feature_store.get_online_feature_service(
1309
- feature_vector=self.feature_vector_uri,
1313
+ self._feature_service = get_feature_vector(
1314
+ self.feature_vector_uri
1315
+ ).get_online_feature_service(
1310
1316
  impute_policy=self.impute_policy,
1311
1317
  )
1312
1318
 
mlrun/utils/async_http.py CHANGED
@@ -139,9 +139,9 @@ class _CustomRequestContext(_RequestContext):
139
139
 
140
140
  # enrich user agent
141
141
  # will help traceability and debugging
142
- headers[
143
- aiohttp.hdrs.USER_AGENT
144
- ] = f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}"
142
+ headers[aiohttp.hdrs.USER_AGENT] = (
143
+ f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}"
144
+ )
145
145
 
146
146
  response: typing.Optional[
147
147
  aiohttp.ClientResponse
mlrun/utils/helpers.py CHANGED
@@ -176,6 +176,8 @@ def verify_field_regex(
176
176
  log_message: str = "Field is malformed. Does not match required pattern",
177
177
  mode: mlrun.common.schemas.RegexMatchModes = mlrun.common.schemas.RegexMatchModes.all,
178
178
  ) -> bool:
179
+ # limit the error message
180
+ max_chars = 63
179
181
  for pattern in patterns:
180
182
  if not re.match(pattern, str(field_value)):
181
183
  log_func = logger.warn if raise_on_failure else logger.debug
@@ -188,7 +190,8 @@ def verify_field_regex(
188
190
  if mode == mlrun.common.schemas.RegexMatchModes.all:
189
191
  if raise_on_failure:
190
192
  raise mlrun.errors.MLRunInvalidArgumentError(
191
- f"Field '{field_name}' is malformed. '{field_value}' does not match required pattern: {pattern}"
193
+ f"Field '{field_name[:max_chars]}' is malformed. '{field_value[:max_chars]}' "
194
+ f"does not match required pattern: {pattern}"
192
195
  )
193
196
  return False
194
197
  elif mode == mlrun.common.schemas.RegexMatchModes.any:
@@ -198,7 +201,7 @@ def verify_field_regex(
198
201
  elif mode == mlrun.common.schemas.RegexMatchModes.any:
199
202
  if raise_on_failure:
200
203
  raise mlrun.errors.MLRunInvalidArgumentError(
201
- f"Field '{field_name}' is malformed. '{field_value}' does not match any of the"
204
+ f"Field '{field_name[:max_chars]}' is malformed. '{field_value[:max_chars]}' does not match any of the"
202
205
  f" required patterns: {patterns}"
203
206
  )
204
207
  return False
@@ -1472,6 +1475,18 @@ def as_number(field_name, field_value):
1472
1475
 
1473
1476
 
1474
1477
  def filter_warnings(action, category):
1478
+ """
1479
+ Decorator to filter warnings
1480
+
1481
+ Example::
1482
+ @filter_warnings("ignore", FutureWarning)
1483
+ def my_function():
1484
+ pass
1485
+
1486
+ :param action: one of "error", "ignore", "always", "default", "module", or "once"
1487
+ :param category: a class that the warning must be a subclass of
1488
+ """
1489
+
1475
1490
  def decorator(function):
1476
1491
  def wrapper(*args, **kwargs):
1477
1492
  # context manager that copies and, upon exit, restores the warnings filter and the showwarning() function.
@@ -1525,6 +1540,24 @@ def normalize_workflow_name(name, project_name):
1525
1540
  return name.removeprefix(project_name + "-")
1526
1541
 
1527
1542
 
1543
+ def normalize_project_username(username: str):
1544
+ username = username.lower()
1545
+
1546
+ # remove domain if exists
1547
+ username = username.split("@")[0]
1548
+
1549
+ # replace non r'a-z0-9\-_' chars with empty string
1550
+ username = inflection.parameterize(username, separator="")
1551
+
1552
+ # replace underscore with dashes
1553
+ username = inflection.dasherize(username)
1554
+
1555
+ # ensure ends with alphanumeric
1556
+ username = username.rstrip("-_")
1557
+
1558
+ return username
1559
+
1560
+
1528
1561
  # run_in threadpool is taken from fastapi to allow us to run sync functions in a threadpool
1529
1562
  # without importing fastapi in the client
1530
1563
  async def run_in_threadpool(func, *args, **kwargs):
@@ -1571,10 +1604,25 @@ def iterate_list_by_chunks(
1571
1604
 
1572
1605
 
1573
1606
  def to_parquet(df, *args, **kwargs):
1607
+ import pyarrow.lib
1608
+
1574
1609
  # version set for pyspark compatibility, and is needed as of pyarrow 13 due to timestamp incompatibility
1575
1610
  if "version" not in kwargs:
1576
1611
  kwargs["version"] = "2.4"
1577
- df.to_parquet(*args, **kwargs)
1612
+ try:
1613
+ df.to_parquet(*args, **kwargs)
1614
+ except pyarrow.lib.ArrowInvalid as ex:
1615
+ if re.match(
1616
+ "Fragment would be written into [0-9]+. partitions. This exceeds the maximum of [0-9]+",
1617
+ str(ex),
1618
+ ):
1619
+ raise mlrun.errors.MLRunRuntimeError(
1620
+ """Maximum number of partitions exceeded. To resolve this, change
1621
+ partition granularity by setting time_partitioning_granularity or partition_cols, or disable partitioning altogether by
1622
+ setting partitioned=False"""
1623
+ ) from ex
1624
+ else:
1625
+ raise ex
1578
1626
 
1579
1627
 
1580
1628
  def is_ecr_url(registry: str) -> bool:
@@ -1586,3 +1634,11 @@ def get_local_file_schema() -> List:
1586
1634
  # The expression `list(string.ascii_lowercase)` generates a list of lowercase alphabets,
1587
1635
  # which corresponds to drive letters in Windows file paths such as `C:/Windows/path`.
1588
1636
  return ["file"] + list(string.ascii_lowercase)
1637
+
1638
+
1639
+ def is_safe_path(base, filepath, is_symlink=False):
1640
+ # Avoid path traversal attacks by ensuring that the path is safe
1641
+ resolved_filepath = (
1642
+ os.path.abspath(filepath) if not is_symlink else os.path.realpath(filepath)
1643
+ )
1644
+ return base == os.path.commonpath((base, resolved_filepath))
mlrun/utils/http.py CHANGED
@@ -110,9 +110,9 @@ class HTTPSessionWithRetry(requests.Session):
110
110
  def request(self, method, url, **kwargs):
111
111
  retry_count = 0
112
112
  kwargs.setdefault("headers", {})
113
- kwargs["headers"][
114
- "User-Agent"
115
- ] = f"{requests.utils.default_user_agent()} mlrun/{config.version}"
113
+ kwargs["headers"]["User-Agent"] = (
114
+ f"{requests.utils.default_user_agent()} mlrun/{config.version}"
115
+ )
116
116
  while True:
117
117
  try:
118
118
  response = super().request(method, url, **kwargs)
mlrun/utils/logger.py CHANGED
@@ -186,7 +186,7 @@ class FormatterKinds(Enum):
186
186
  JSON = "json"
187
187
 
188
188
 
189
- def _create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter:
189
+ def create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter:
190
190
  return {
191
191
  FormatterKinds.HUMAN: HumanReadableFormatter(),
192
192
  FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter(),
@@ -208,7 +208,7 @@ def create_logger(
208
208
  logger_instance = Logger(level, name=name, propagate=False)
209
209
 
210
210
  # resolve formatter
211
- formatter_instance = _create_formatter_instance(
211
+ formatter_instance = create_formatter_instance(
212
212
  FormatterKinds(formatter_kind.lower())
213
213
  )
214
214
 
@@ -307,9 +307,9 @@ class NotificationPusher(_NotificationPusherBase):
307
307
  traceback=traceback.format_exc(),
308
308
  )
309
309
  update_notification_status_kwargs["reason"] = f"Exception error: {str(exc)}"
310
- update_notification_status_kwargs[
311
- "status"
312
- ] = mlrun.common.schemas.NotificationStatus.ERROR
310
+ update_notification_status_kwargs["status"] = (
311
+ mlrun.common.schemas.NotificationStatus.ERROR
312
+ )
313
313
  raise exc
314
314
  finally:
315
315
  self._update_notification_status(
@@ -356,9 +356,9 @@ class NotificationPusher(_NotificationPusherBase):
356
356
  traceback=traceback.format_exc(),
357
357
  )
358
358
  update_notification_status_kwargs["reason"] = f"Exception error: {str(exc)}"
359
- update_notification_status_kwargs[
360
- "status"
361
- ] = mlrun.common.schemas.NotificationStatus.ERROR
359
+ update_notification_status_kwargs["status"] = (
360
+ mlrun.common.schemas.NotificationStatus.ERROR
361
+ )
362
362
  raise exc
363
363
  finally:
364
364
  await mlrun.utils.helpers.run_in_threadpool(
mlrun/utils/regex.py CHANGED
@@ -21,9 +21,13 @@ pipeline_param = [r"{{pipelineparam:op=([\w\s_-]*);name=([\w\s_-]+)}}"]
21
21
  # k8s character limit is for 63 characters
22
22
  k8s_character_limit = [r"^.{0,63}$"]
23
23
 
24
+ # k8s name
25
+ # https://github.com/kubernetes/apimachinery/blob/kubernetes-1.25.16/pkg/util/validation/validation.go#L33
26
+ qualified_name = [r"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"]
27
+
24
28
  # k8s label value format
25
29
  # https://github.com/kubernetes/kubernetes/blob/v1.20.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L161
26
- label_value = k8s_character_limit + [r"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"]
30
+ label_value = k8s_character_limit + qualified_name
27
31
 
28
32
  # DNS Subdomain (RFC 1123) - used by k8s for most resource names format
29
33
  # https://github.com/kubernetes/kubernetes/blob/v1.20.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L204
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "884eec49eb4123b069e7e9e448169b179facaa8b",
3
- "version": "1.6.0-rc26"
2
+ "git_commit": "e48bcf64d5c6c36239e481620674182e88911c68",
3
+ "version": "1.6.3-rc1"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.6.0rc26
3
+ Version: 1.6.3rc1
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -36,7 +36,7 @@ Requires-Dist: pyarrow <15,>=10.0
36
36
  Requires-Dist: pyyaml ~=5.1
37
37
  Requires-Dist: requests ~=2.31
38
38
  Requires-Dist: tabulate ~=0.8.6
39
- Requires-Dist: v3io ~=0.5.21
39
+ Requires-Dist: v3io ~=0.6.4
40
40
  Requires-Dist: pydantic >=1.10.8,~=1.10
41
41
  Requires-Dist: mergedeep ~=1.3
42
42
  Requires-Dist: v3io-frames ~=0.10.12
@@ -44,10 +44,10 @@ Requires-Dist: semver ~=3.0
44
44
  Requires-Dist: dependency-injector ~=4.41
45
45
  Requires-Dist: fsspec ==2023.9.2
46
46
  Requires-Dist: v3iofs ~=0.1.17
47
- Requires-Dist: storey ~=1.6.18
47
+ Requires-Dist: storey ~=1.6.20
48
48
  Requires-Dist: inflection ~=0.5.0
49
49
  Requires-Dist: python-dotenv ~=0.17.0
50
- Requires-Dist: setuptools ~=68.2
50
+ Requires-Dist: setuptools ~=69.1
51
51
  Requires-Dist: deprecated ~=1.2
52
52
  Requires-Dist: jinja2 >=3.1.3,~=3.1
53
53
  Requires-Dist: anyio ~=3.7
@@ -78,14 +78,13 @@ Requires-Dist: redis ~=4.3 ; extra == 'all'
78
78
  Requires-Dist: s3fs ==2023.9.2 ; extra == 'all'
79
79
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'all'
80
80
  Provides-Extra: api
81
- Requires-Dist: uvicorn ~=0.23.2 ; extra == 'api'
81
+ Requires-Dist: uvicorn ~=0.27.1 ; extra == 'api'
82
82
  Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'api'
83
- Requires-Dist: apscheduler !=3.10.2,~=3.6 ; extra == 'api'
84
- Requires-Dist: sqlite3-to-mysql ~=1.4 ; extra == 'api'
85
- Requires-Dist: objgraph ~=3.5 ; extra == 'api'
86
- Requires-Dist: igz-mgmt ~=0.0.10 ; extra == 'api'
87
- Requires-Dist: humanfriendly ~=9.2 ; extra == 'api'
88
- Requires-Dist: fastapi ~=0.103.2 ; extra == 'api'
83
+ Requires-Dist: apscheduler <4,>=3.10.3 ; extra == 'api'
84
+ Requires-Dist: objgraph ~=3.6 ; extra == 'api'
85
+ Requires-Dist: igz-mgmt ~=0.1.0 ; extra == 'api'
86
+ Requires-Dist: humanfriendly ~=10.0 ; extra == 'api'
87
+ Requires-Dist: fastapi ~=0.110.0 ; extra == 'api'
89
88
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'api'
90
89
  Requires-Dist: pymysql ~=1.0 ; extra == 'api'
91
90
  Requires-Dist: alembic ~=1.9 ; extra == 'api'
@@ -127,7 +126,7 @@ Provides-Extra: complete-api
127
126
  Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete-api'
128
127
  Requires-Dist: aiobotocore <2.8,>=2.5.0 ; extra == 'complete-api'
129
128
  Requires-Dist: alembic ~=1.9 ; extra == 'complete-api'
130
- Requires-Dist: apscheduler !=3.10.2,~=3.6 ; extra == 'complete-api'
129
+ Requires-Dist: apscheduler <4,>=3.10.3 ; extra == 'complete-api'
131
130
  Requires-Dist: avro ~=1.11 ; extra == 'complete-api'
132
131
  Requires-Dist: azure-core ~=1.24 ; extra == 'complete-api'
133
132
  Requires-Dist: azure-identity ~=1.5 ; extra == 'complete-api'
@@ -137,25 +136,24 @@ Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'complete-api'
137
136
  Requires-Dist: dask ~=2023.9.0 ; extra == 'complete-api'
138
137
  Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'complete-api'
139
138
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'complete-api'
140
- Requires-Dist: fastapi ~=0.103.2 ; extra == 'complete-api'
139
+ Requires-Dist: fastapi ~=0.110.0 ; extra == 'complete-api'
141
140
  Requires-Dist: gcsfs ==2023.9.2 ; extra == 'complete-api'
142
141
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'complete-api'
143
142
  Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete-api'
144
- Requires-Dist: humanfriendly ~=9.2 ; extra == 'complete-api'
145
- Requires-Dist: igz-mgmt ~=0.0.10 ; extra == 'complete-api'
143
+ Requires-Dist: humanfriendly ~=10.0 ; extra == 'complete-api'
144
+ Requires-Dist: igz-mgmt ~=0.1.0 ; extra == 'complete-api'
146
145
  Requires-Dist: kafka-python ~=2.0 ; extra == 'complete-api'
147
146
  Requires-Dist: mlflow ~=2.8 ; extra == 'complete-api'
148
147
  Requires-Dist: msrest ~=0.6.21 ; extra == 'complete-api'
149
- Requires-Dist: objgraph ~=3.5 ; extra == 'complete-api'
148
+ Requires-Dist: objgraph ~=3.6 ; extra == 'complete-api'
150
149
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'complete-api'
151
150
  Requires-Dist: pymysql ~=1.0 ; extra == 'complete-api'
152
151
  Requires-Dist: pyopenssl >=23 ; extra == 'complete-api'
153
152
  Requires-Dist: redis ~=4.3 ; extra == 'complete-api'
154
153
  Requires-Dist: s3fs ==2023.9.2 ; extra == 'complete-api'
155
154
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete-api'
156
- Requires-Dist: sqlite3-to-mysql ~=1.4 ; extra == 'complete-api'
157
155
  Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
158
- Requires-Dist: uvicorn ~=0.23.2 ; extra == 'complete-api'
156
+ Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
159
157
  Provides-Extra: dask
160
158
  Requires-Dist: dask ~=2023.9.0 ; extra == 'dask'
161
159
  Requires-Dist: distributed ~=2023.9.0 ; extra == 'dask'
@@ -222,7 +220,7 @@ In MLRun the assets, metadata, and services (data, functions, jobs, artifacts, m
222
220
  Projects can be imported/exported as a whole, mapped to git repositories or IDE projects (in PyCharm, VSCode, etc.), which enables versioning, collaboration, and CI/CD.
223
221
  Project access can be restricted to a set of users and roles.
224
222
 
225
- See: **Docs:** [Projects and Automation](https://docs.mlrun.org/en/latest/projects/project.html), [CI/CD Integration](https://docs.mlrun.org/en/latest/projects/ci-integration.html), **Tutorials:** [Quick start](https://docs.mlrun.org/en/latest/tutorials/01-mlrun-basics.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html), **Video:** [quick start](https://youtu.be/xI8KVGLlj7Q).
223
+ See: **Docs:** [Projects and Automation](https://docs.mlrun.org/en/latest/projects/project.html), [CI/CD Integration](https://docs.mlrun.org/en/latest/projects/ci-integration.html), **Tutorials:** [Quick start](https://docs.mlrun.org/en/latest/tutorials/01-mlrun-basics.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html), **Video:** [Quick start](https://youtu.be/xI8KVGLlj7Q).
226
224
 
227
225
  ### Ingest and process data
228
226
 
@@ -235,13 +233,13 @@ See: **Docs:** [Ingest and process data](https://docs.mlrun.org/en/latest/data-p
235
233
 
236
234
  MLRun allows you to easily build ML pipelines that take data from various sources or the Feature Store and process it, train models at scale with multiple parameters, test models, tracks each experiments, register, version and deploy models, etc. MLRun provides scalable built-in or custom model training services, integrate with any framework and can work with 3rd party training/auto-ML services. You can also bring your own pre-trained model and use it in the pipeline.
237
235
 
238
- See: **Docs:** [Develop and train models](https://docs.mlrun.org/en/latest/development/index.html), [Model Training and Tracking](https://docs.mlrun.org/en/latest/development/model-training-tracking.html), [Batch Runs and Workflows](https://docs.mlrun.org/en/latest/concepts/runs-workflows.html); **Tutorials:** [Train & Eval Models](https://docs.mlrun.org/en/latest/tutorials/02-model-training.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html); **Video:** [Training models](https://youtu.be/bZgBsmLMdQo).
236
+ See: **Docs:** [Develop and train models](https://docs.mlrun.org/en/latest/development/index.html), [Model Training and Tracking](https://docs.mlrun.org/en/latest/development/model-training-tracking.html), [Batch Runs and Workflows](https://docs.mlrun.org/en/latest/concepts/runs-workflows.html); **Tutorials:** [Train, compare, and register models](https://docs.mlrun.org/en/latest/tutorials/02-model-training.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html); **Video:** [Train and compare models](https://youtu.be/bZgBsmLMdQo).
239
237
 
240
238
  ### Deploy models and applications
241
239
 
242
240
  MLRun rapidly deploys and manages production-grade real-time or batch application pipelines using elastic and resilient serverless functions. MLRun addresses the entire ML application: intercepting application/user requests, running data processing tasks, inferencing using one or more models, driving actions, and integrating with the application logic.
243
241
 
244
- See: **Docs:** [Deploy models and applications](https://docs.mlrun.org/en/latest/deployment/index.html), [Realtime Pipelines](https://docs.mlrun.org/en/latest/serving/serving-graph.html), [Batch Inference](https://docs.mlrun.org/en/latest/concepts/TBD.html), **Tutorials:** [Realtime Serving](https://docs.mlrun.org/en/latest/tutorials/03-model-serving.html), [Batch Inference](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html), [Advanced Pipeline](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html); **Video:** [Serving models](https://youtu.be/OUjOus4dZfw).
242
+ See: **Docs:** [Deploy models and applications](https://docs.mlrun.org/en/latest/deployment/index.html), [Realtime Pipelines](https://docs.mlrun.org/en/latest/serving/serving-graph.html), [Batch Inference](https://docs.mlrun.org/en/latest/deployment/batch_inference.html), **Tutorials:** [Realtime Serving](https://docs.mlrun.org/en/latest/tutorials/03-model-serving.html), [Batch Inference](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html), [Advanced Pipeline](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html); **Video:** [Serving pre-trained models](https://youtu.be/OUjOus4dZfw).
245
243
 
246
244
  ### Monitor and alert
247
245
 
@@ -259,9 +257,9 @@ MLRun includes the following major components:
259
257
 
260
258
  [**Project Management:**](https://docs.mlrun.org/en/latest/projects/project.html) A service (API, SDK, DB, UI) that manages the different project assets (data, functions, jobs, workflows, secrets, etc.) and provides central control and metadata layer.
261
259
 
262
- [**Serverless Functions:**](https://docs.mlrun.org/en/latest/runtimes/functions.html) automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.).
260
+ [**Functions:**](https://docs.mlrun.org/en/latest/runtimes/functions.html) automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.).
263
261
 
264
- [**Data & Artifacts:**](https://docs.mlrun.org/en/latest/concepts/data-feature-store.html) Glueless connectivity to various data sources, metadata management, catalog, and versioning for structures/unstructured artifacts.
262
+ [**Data & Artifacts:**](https://docs.mlrun.org/en/latest/concepts/data.html) Glueless connectivity to various data sources, metadata management, catalog, and versioning for structures/unstructured artifacts.
265
263
 
266
264
  [**Feature Store:**](https://docs.mlrun.org/en/latest/feature-store/feature-store.html) automatically collects, prepares, catalogs, and serves production data features for development (offline) and real-time (online) deployment using minimal engineering effort.
267
265