mlrun 1.7.0rc43__py3-none-any.whl → 1.7.0rc45__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +4 -2
- mlrun/artifacts/manager.py +3 -1
- mlrun/common/schemas/alert.py +11 -11
- mlrun/common/schemas/auth.py +2 -0
- mlrun/common/schemas/client_spec.py +0 -1
- mlrun/common/schemas/workflow.py +1 -0
- mlrun/config.py +28 -21
- mlrun/data_types/data_types.py +5 -0
- mlrun/datastore/base.py +4 -4
- mlrun/datastore/storeytargets.py +2 -2
- mlrun/db/httpdb.py +10 -12
- mlrun/db/nopdb.py +21 -4
- mlrun/execution.py +3 -1
- mlrun/feature_store/api.py +1 -0
- mlrun/feature_store/retrieval/spark_merger.py +7 -3
- mlrun/frameworks/_common/plan.py +3 -3
- mlrun/frameworks/_ml_common/plan.py +1 -1
- mlrun/frameworks/parallel_coordinates.py +2 -3
- mlrun/launcher/client.py +6 -6
- mlrun/model_monitoring/applications/results.py +2 -2
- mlrun/model_monitoring/controller.py +1 -1
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +15 -1
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +12 -0
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +2 -2
- mlrun/model_monitoring/helpers.py +7 -8
- mlrun/model_monitoring/writer.py +3 -1
- mlrun/projects/pipelines.py +2 -0
- mlrun/projects/project.py +32 -18
- mlrun/render.py +3 -3
- mlrun/runtimes/daskjob.py +1 -1
- mlrun/runtimes/kubejob.py +6 -6
- mlrun/runtimes/nuclio/api_gateway.py +6 -0
- mlrun/runtimes/nuclio/application/application.py +3 -3
- mlrun/runtimes/nuclio/function.py +45 -0
- mlrun/runtimes/pod.py +19 -13
- mlrun/serving/server.py +2 -0
- mlrun/utils/helpers.py +22 -16
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/METADATA +18 -18
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/RECORD +44 -44
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/WHEEL +1 -1
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc45.dist-info}/top_level.txt +0 -0
mlrun/model_monitoring/writer.py
CHANGED
|
@@ -160,7 +160,9 @@ class ModelMonitoringWriter(StepToDict):
|
|
|
160
160
|
event_kind = f"{event_kind}_detected"
|
|
161
161
|
else:
|
|
162
162
|
event_kind = f"{event_kind}_suspected"
|
|
163
|
-
return alert_objects.EventKind(
|
|
163
|
+
return alert_objects.EventKind(
|
|
164
|
+
value=mlrun.utils.helpers.normalize_name(event_kind)
|
|
165
|
+
)
|
|
164
166
|
|
|
165
167
|
@staticmethod
|
|
166
168
|
def _reconstruct_event(event: _RawEvent) -> tuple[_AppResultEvent, WriterEventKind]:
|
mlrun/projects/pipelines.py
CHANGED
|
@@ -80,6 +80,7 @@ class WorkflowSpec(mlrun.model.ModelObj):
|
|
|
80
80
|
schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None,
|
|
81
81
|
cleanup_ttl: typing.Optional[int] = None,
|
|
82
82
|
image: typing.Optional[str] = None,
|
|
83
|
+
workflow_runner_node_selector: typing.Optional[dict[str, str]] = None,
|
|
83
84
|
):
|
|
84
85
|
self.engine = engine
|
|
85
86
|
self.code = code
|
|
@@ -93,6 +94,7 @@ class WorkflowSpec(mlrun.model.ModelObj):
|
|
|
93
94
|
self._tmp_path = None
|
|
94
95
|
self.schedule = schedule
|
|
95
96
|
self.image = image
|
|
97
|
+
self.workflow_runner_node_selector = workflow_runner_node_selector
|
|
96
98
|
|
|
97
99
|
def get_source_file(self, context=""):
|
|
98
100
|
if not self.code and not self.path:
|
mlrun/projects/project.py
CHANGED
|
@@ -67,13 +67,7 @@ from ..features import Feature
|
|
|
67
67
|
from ..model import EntrypointParam, ImageBuilder, ModelObj
|
|
68
68
|
from ..run import code_to_function, get_object, import_function, new_function
|
|
69
69
|
from ..secrets import SecretsStore
|
|
70
|
-
from ..utils import
|
|
71
|
-
is_ipython,
|
|
72
|
-
is_relative_path,
|
|
73
|
-
is_yaml_path,
|
|
74
|
-
logger,
|
|
75
|
-
update_in,
|
|
76
|
-
)
|
|
70
|
+
from ..utils import is_jupyter, is_relative_path, is_yaml_path, logger, update_in
|
|
77
71
|
from ..utils.clones import (
|
|
78
72
|
add_credentials_git_remote_url,
|
|
79
73
|
clone_git,
|
|
@@ -1599,7 +1593,9 @@ class MlrunProject(ModelObj):
|
|
|
1599
1593
|
:param format: artifact file format: csv, png, ..
|
|
1600
1594
|
:param tag: version tag
|
|
1601
1595
|
:param target_path: absolute target path (instead of using artifact_path + local_path)
|
|
1602
|
-
:param upload: upload to datastore
|
|
1596
|
+
:param upload: Whether to upload the artifact to the datastore. If not provided, and the `local_path`
|
|
1597
|
+
is not a directory, upload occurs by default. Directories are uploaded only when this
|
|
1598
|
+
flag is explicitly set to `True`.
|
|
1603
1599
|
:param labels: a set of key/value labels to tag the artifact with
|
|
1604
1600
|
|
|
1605
1601
|
:returns: artifact object
|
|
@@ -2439,7 +2435,7 @@ class MlrunProject(ModelObj):
|
|
|
2439
2435
|
):
|
|
2440
2436
|
# if function path is not provided and it is not a module (no ".")
|
|
2441
2437
|
# use the current notebook as default
|
|
2442
|
-
if
|
|
2438
|
+
if is_jupyter:
|
|
2443
2439
|
from IPython import get_ipython
|
|
2444
2440
|
|
|
2445
2441
|
kernel = get_ipython()
|
|
@@ -2842,11 +2838,13 @@ class MlrunProject(ModelObj):
|
|
|
2842
2838
|
The function objects are synced against the definitions spec in `self.spec._function_definitions`.
|
|
2843
2839
|
Referenced files/URLs in the function spec will be reloaded.
|
|
2844
2840
|
Function definitions are parsed by the following precedence:
|
|
2845
|
-
|
|
2846
|
-
|
|
2847
|
-
|
|
2848
|
-
|
|
2849
|
-
|
|
2841
|
+
|
|
2842
|
+
1. Contains runtime spec.
|
|
2843
|
+
2. Contains module in the project's context.
|
|
2844
|
+
3. Contains path to function definition (yaml, DB, Hub).
|
|
2845
|
+
4. Contains path to .ipynb or .py files.
|
|
2846
|
+
5. Contains a Nuclio/Serving function image / an 'Application' kind definition.
|
|
2847
|
+
|
|
2850
2848
|
If function definition is already an object, some project metadata updates will apply however,
|
|
2851
2849
|
it will not be reloaded.
|
|
2852
2850
|
|
|
@@ -3060,6 +3058,7 @@ class MlrunProject(ModelObj):
|
|
|
3060
3058
|
source: str = None,
|
|
3061
3059
|
cleanup_ttl: int = None,
|
|
3062
3060
|
notifications: list[mlrun.model.Notification] = None,
|
|
3061
|
+
workflow_runner_node_selector: typing.Optional[dict[str, str]] = None,
|
|
3063
3062
|
) -> _PipelineRunStatus:
|
|
3064
3063
|
"""Run a workflow using kubeflow pipelines
|
|
3065
3064
|
|
|
@@ -3088,15 +3087,20 @@ class MlrunProject(ModelObj):
|
|
|
3088
3087
|
|
|
3089
3088
|
* Remote URL which is loaded dynamically to the workflow runner.
|
|
3090
3089
|
* A path to the project's context on the workflow runner's image.
|
|
3091
|
-
|
|
3092
|
-
|
|
3093
|
-
|
|
3090
|
+
Path can be absolute or relative to `project.spec.build.source_code_target_dir` if defined
|
|
3091
|
+
(enriched when building a project image with source, see `MlrunProject.build_image`).
|
|
3092
|
+
For other engines the source is used to validate that the code is up-to-date.
|
|
3093
|
+
|
|
3094
3094
|
:param cleanup_ttl:
|
|
3095
3095
|
Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the
|
|
3096
3096
|
workflow and all its resources are deleted)
|
|
3097
3097
|
:param notifications:
|
|
3098
3098
|
List of notifications to send for workflow completion
|
|
3099
|
-
|
|
3099
|
+
:param workflow_runner_node_selector:
|
|
3100
|
+
Defines the node selector for the workflow runner pod when using a remote engine.
|
|
3101
|
+
This allows you to control and specify where the workflow runner pod will be scheduled.
|
|
3102
|
+
This setting is only relevant when the engine is set to 'remote' or for scheduled workflows,
|
|
3103
|
+
and it will be ignored if the workflow is not run on a remote engine.
|
|
3100
3104
|
:returns: ~py:class:`~mlrun.projects.pipelines._PipelineRunStatus` instance
|
|
3101
3105
|
"""
|
|
3102
3106
|
|
|
@@ -3162,6 +3166,16 @@ class MlrunProject(ModelObj):
|
|
|
3162
3166
|
)
|
|
3163
3167
|
inner_engine = get_workflow_engine(engine_kind, local).engine
|
|
3164
3168
|
workflow_spec.engine = inner_engine or workflow_engine.engine
|
|
3169
|
+
if workflow_runner_node_selector:
|
|
3170
|
+
if workflow_engine.engine == "remote":
|
|
3171
|
+
workflow_spec.workflow_runner_node_selector = (
|
|
3172
|
+
workflow_runner_node_selector
|
|
3173
|
+
)
|
|
3174
|
+
else:
|
|
3175
|
+
logger.warn(
|
|
3176
|
+
"'workflow_runner_node_selector' applies only to remote engines"
|
|
3177
|
+
" and is ignored for non-remote runs."
|
|
3178
|
+
)
|
|
3165
3179
|
|
|
3166
3180
|
run = workflow_engine.run(
|
|
3167
3181
|
self,
|
mlrun/render.py
CHANGED
|
@@ -22,7 +22,7 @@ import mlrun.utils
|
|
|
22
22
|
|
|
23
23
|
from .config import config
|
|
24
24
|
from .datastore import uri_to_ipython
|
|
25
|
-
from .utils import dict_to_list, get_in,
|
|
25
|
+
from .utils import dict_to_list, get_in, is_jupyter
|
|
26
26
|
|
|
27
27
|
JUPYTER_SERVER_ROOT = environ.get("HOME", "/User")
|
|
28
28
|
supported_viewers = [
|
|
@@ -181,8 +181,8 @@ def run_to_html(results, display=True):
|
|
|
181
181
|
|
|
182
182
|
|
|
183
183
|
def ipython_display(html, display=True, alt_text=None):
|
|
184
|
-
if display and html and
|
|
185
|
-
import IPython
|
|
184
|
+
if display and html and is_jupyter:
|
|
185
|
+
import IPython.display
|
|
186
186
|
|
|
187
187
|
IPython.display.display(IPython.display.HTML(html))
|
|
188
188
|
elif alt_text:
|
mlrun/runtimes/daskjob.py
CHANGED
|
@@ -379,7 +379,7 @@ class DaskCluster(KubejobRuntime):
|
|
|
379
379
|
:param show_on_failure: show logs only in case of build failure
|
|
380
380
|
:param force_build: force building the image, even when no changes were made
|
|
381
381
|
|
|
382
|
-
:return
|
|
382
|
+
:return: True if the function is ready (deployed)
|
|
383
383
|
"""
|
|
384
384
|
return super().deploy(
|
|
385
385
|
watch,
|
mlrun/runtimes/kubejob.py
CHANGED
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
|
|
14
|
+
import typing
|
|
15
15
|
import warnings
|
|
16
16
|
|
|
17
17
|
from mlrun_pipelines.common.ops import build_op
|
|
@@ -143,11 +143,11 @@ class KubejobRuntime(KubeResource):
|
|
|
143
143
|
|
|
144
144
|
def deploy(
|
|
145
145
|
self,
|
|
146
|
-
watch=True,
|
|
147
|
-
with_mlrun=None,
|
|
148
|
-
skip_deployed=False,
|
|
149
|
-
is_kfp=False,
|
|
150
|
-
mlrun_version_specifier=None,
|
|
146
|
+
watch: bool = True,
|
|
147
|
+
with_mlrun: typing.Optional[bool] = None,
|
|
148
|
+
skip_deployed: bool = False,
|
|
149
|
+
is_kfp: bool = False,
|
|
150
|
+
mlrun_version_specifier: typing.Optional[bool] = None,
|
|
151
151
|
builder_env: dict = None,
|
|
152
152
|
show_on_failure: bool = False,
|
|
153
153
|
force_build: bool = False,
|
|
@@ -587,6 +587,12 @@ class APIGateway(ModelObj):
|
|
|
587
587
|
self.metadata.annotations, gateway_timeout
|
|
588
588
|
)
|
|
589
589
|
|
|
590
|
+
def with_annotations(self, annotations: dict):
|
|
591
|
+
"""set a key/value annotations in the metadata of the api gateway"""
|
|
592
|
+
for key, value in annotations.items():
|
|
593
|
+
self.metadata.annotations[key] = str(value)
|
|
594
|
+
return self
|
|
595
|
+
|
|
590
596
|
@classmethod
|
|
591
597
|
def from_scheme(cls, api_gateway: schemas.APIGateway):
|
|
592
598
|
project = api_gateway.metadata.labels.get(
|
|
@@ -438,9 +438,10 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
438
438
|
"""
|
|
439
439
|
Create the application API gateway. Once the application is deployed, the API gateway can be created.
|
|
440
440
|
An application without an API gateway is not accessible.
|
|
441
|
+
|
|
441
442
|
:param name: The name of the API gateway
|
|
442
443
|
:param path: Optional path of the API gateway, default value is "/".
|
|
443
|
-
|
|
444
|
+
The given path should be supported by the deployed application
|
|
444
445
|
:param direct_port_access: Set True to allow direct port access to the application sidecar
|
|
445
446
|
:param authentication_mode: API Gateway authentication mode
|
|
446
447
|
:param authentication_creds: API Gateway basic authentication credentials as a tuple (username, password)
|
|
@@ -449,8 +450,7 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
449
450
|
:param set_as_default: Set the API gateway as the default for the application (`status.api_gateway`)
|
|
450
451
|
:param gateway_timeout: nginx ingress timeout in sec (request timeout, when will the gateway return an
|
|
451
452
|
error)
|
|
452
|
-
|
|
453
|
-
:return: The API gateway URL
|
|
453
|
+
:return: The API gateway URL
|
|
454
454
|
"""
|
|
455
455
|
if not name:
|
|
456
456
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
@@ -23,6 +23,7 @@ import inflection
|
|
|
23
23
|
import nuclio
|
|
24
24
|
import nuclio.utils
|
|
25
25
|
import requests
|
|
26
|
+
import semver
|
|
26
27
|
from aiohttp.client import ClientSession
|
|
27
28
|
from kubernetes import client
|
|
28
29
|
from mlrun_pipelines.common.mounts import VolumeMount
|
|
@@ -296,10 +297,37 @@ class RemoteRuntime(KubeResource):
|
|
|
296
297
|
"""
|
|
297
298
|
if hasattr(spec, "to_dict"):
|
|
298
299
|
spec = spec.to_dict()
|
|
300
|
+
|
|
301
|
+
self._validate_triggers(spec)
|
|
302
|
+
|
|
299
303
|
spec["name"] = name
|
|
300
304
|
self.spec.config[f"spec.triggers.{name}"] = spec
|
|
301
305
|
return self
|
|
302
306
|
|
|
307
|
+
def _validate_triggers(self, spec):
|
|
308
|
+
# ML-7763 / NUC-233
|
|
309
|
+
min_nuclio_version = "1.13.12"
|
|
310
|
+
if mlconf.nuclio_version and semver.VersionInfo.parse(
|
|
311
|
+
mlconf.nuclio_version
|
|
312
|
+
) < semver.VersionInfo.parse(min_nuclio_version):
|
|
313
|
+
explicit_ack_enabled = False
|
|
314
|
+
num_triggers = 0
|
|
315
|
+
trigger_name = spec.get("name", "UNKNOWN")
|
|
316
|
+
for key, config in [(f"spec.triggers.{trigger_name}", spec)] + list(
|
|
317
|
+
self.spec.config.items()
|
|
318
|
+
):
|
|
319
|
+
if key.startswith("spec.triggers."):
|
|
320
|
+
num_triggers += 1
|
|
321
|
+
explicit_ack_enabled = (
|
|
322
|
+
config.get("explicitAckMode", "disable") != "disable"
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
if num_triggers > 1 and explicit_ack_enabled:
|
|
326
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
327
|
+
"Multiple triggers cannot be used in conjunction with explicit ack. "
|
|
328
|
+
f"Please upgrade to nuclio {min_nuclio_version} or newer."
|
|
329
|
+
)
|
|
330
|
+
|
|
303
331
|
def with_source_archive(
|
|
304
332
|
self,
|
|
305
333
|
source,
|
|
@@ -495,6 +523,15 @@ class RemoteRuntime(KubeResource):
|
|
|
495
523
|
extra_attributes = extra_attributes or {}
|
|
496
524
|
if ack_window_size:
|
|
497
525
|
extra_attributes["ackWindowSize"] = ack_window_size
|
|
526
|
+
|
|
527
|
+
access_key = kwargs.pop("access_key", None)
|
|
528
|
+
if access_key:
|
|
529
|
+
logger.warning(
|
|
530
|
+
"The access_key parameter is deprecated and will be ignored, "
|
|
531
|
+
"use the V3IO_ACCESS_KEY environment variable instead"
|
|
532
|
+
)
|
|
533
|
+
access_key = self._resolve_v3io_access_key()
|
|
534
|
+
|
|
498
535
|
self.add_trigger(
|
|
499
536
|
name,
|
|
500
537
|
V3IOStreamTrigger(
|
|
@@ -506,6 +543,7 @@ class RemoteRuntime(KubeResource):
|
|
|
506
543
|
webapi=endpoint or "http://v3io-webapi:8081",
|
|
507
544
|
extra_attributes=extra_attributes,
|
|
508
545
|
read_batch_size=256,
|
|
546
|
+
access_key=access_key,
|
|
509
547
|
**kwargs,
|
|
510
548
|
),
|
|
511
549
|
)
|
|
@@ -1241,6 +1279,13 @@ class RemoteRuntime(KubeResource):
|
|
|
1241
1279
|
|
|
1242
1280
|
return self._resolve_invocation_url("", force_external_address)
|
|
1243
1281
|
|
|
1282
|
+
@staticmethod
|
|
1283
|
+
def _resolve_v3io_access_key():
|
|
1284
|
+
# Nuclio supports generating access key for v3io stream trigger only from version 1.13.11
|
|
1285
|
+
if validate_nuclio_version_compatibility("1.13.11"):
|
|
1286
|
+
return mlrun.model.Credentials.generate_access_key
|
|
1287
|
+
return None
|
|
1288
|
+
|
|
1244
1289
|
|
|
1245
1290
|
def parse_logs(logs):
|
|
1246
1291
|
logs = json.loads(logs)
|
mlrun/runtimes/pod.py
CHANGED
|
@@ -1107,12 +1107,12 @@ class KubeResource(BaseRuntime, KfpAdapterMixin):
|
|
|
1107
1107
|
|
|
1108
1108
|
:param state_thresholds: A dictionary of state to threshold. The supported states are:
|
|
1109
1109
|
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
See mlrun.mlconf.function.spec.state_thresholds for the default thresholds.
|
|
1110
|
+
* pending_scheduled - The pod/crd is scheduled on a node but not yet running
|
|
1111
|
+
* pending_not_scheduled - The pod/crd is not yet scheduled on a node
|
|
1112
|
+
* executing - The pod/crd started and is running
|
|
1113
|
+
* image_pull_backoff - The pod/crd is in image pull backoff
|
|
1115
1114
|
|
|
1115
|
+
See :code:`mlrun.mlconf.function.spec.state_thresholds` for the default thresholds.
|
|
1116
1116
|
:param patch: Whether to merge the given thresholds with the existing thresholds (True, default)
|
|
1117
1117
|
or override them (False)
|
|
1118
1118
|
"""
|
|
@@ -1347,20 +1347,26 @@ class KubeResource(BaseRuntime, KfpAdapterMixin):
|
|
|
1347
1347
|
|
|
1348
1348
|
def _build_image(
|
|
1349
1349
|
self,
|
|
1350
|
-
builder_env,
|
|
1351
|
-
force_build,
|
|
1352
|
-
mlrun_version_specifier,
|
|
1353
|
-
show_on_failure,
|
|
1354
|
-
skip_deployed,
|
|
1355
|
-
watch,
|
|
1356
|
-
is_kfp,
|
|
1357
|
-
with_mlrun,
|
|
1350
|
+
builder_env: dict,
|
|
1351
|
+
force_build: bool,
|
|
1352
|
+
mlrun_version_specifier: typing.Optional[bool],
|
|
1353
|
+
show_on_failure: bool,
|
|
1354
|
+
skip_deployed: bool,
|
|
1355
|
+
watch: bool,
|
|
1356
|
+
is_kfp: bool,
|
|
1357
|
+
with_mlrun: typing.Optional[bool],
|
|
1358
1358
|
):
|
|
1359
1359
|
# When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operation
|
|
1360
1360
|
# is actually done. (when a pipelines pod exits, the pipeline step marked as done)
|
|
1361
1361
|
if is_kfp:
|
|
1362
1362
|
watch = True
|
|
1363
1363
|
|
|
1364
|
+
if skip_deployed and self.requires_build() and not self.is_deployed():
|
|
1365
|
+
logger.warning(
|
|
1366
|
+
f"Even though {skip_deployed=}, the build might be triggered due to the function's configuration. "
|
|
1367
|
+
"See requires_build() and is_deployed() for reasoning."
|
|
1368
|
+
)
|
|
1369
|
+
|
|
1364
1370
|
db = self._get_db()
|
|
1365
1371
|
data = db.remote_builder(
|
|
1366
1372
|
self,
|
mlrun/serving/server.py
CHANGED
mlrun/utils/helpers.py
CHANGED
|
@@ -41,7 +41,7 @@ import semver
|
|
|
41
41
|
import yaml
|
|
42
42
|
from dateutil import parser
|
|
43
43
|
from mlrun_pipelines.models import PipelineRun
|
|
44
|
-
from pandas
|
|
44
|
+
from pandas import Timedelta, Timestamp
|
|
45
45
|
from yaml.representer import RepresenterError
|
|
46
46
|
|
|
47
47
|
import mlrun
|
|
@@ -111,9 +111,12 @@ def get_artifact_target(item: dict, project=None):
|
|
|
111
111
|
project_str = project or item["metadata"].get("project")
|
|
112
112
|
tree = item["metadata"].get("tree")
|
|
113
113
|
tag = item["metadata"].get("tag")
|
|
114
|
+
kind = item.get("kind")
|
|
114
115
|
|
|
115
|
-
if
|
|
116
|
-
target =
|
|
116
|
+
if kind in {"dataset", "model", "artifact"} and db_key:
|
|
117
|
+
target = (
|
|
118
|
+
f"{DB_SCHEMA}://{StorePrefix.kind_to_prefix(kind)}/{project_str}/{db_key}"
|
|
119
|
+
)
|
|
117
120
|
target += f":{tag}" if tag else ":latest"
|
|
118
121
|
if tree:
|
|
119
122
|
target += f"@{tree}"
|
|
@@ -133,18 +136,25 @@ def is_legacy_artifact(artifact):
|
|
|
133
136
|
logger = create_logger(config.log_level, config.log_formatter, "mlrun", sys.stdout)
|
|
134
137
|
missing = object()
|
|
135
138
|
|
|
136
|
-
is_ipython = False
|
|
139
|
+
is_ipython = False # is IPython terminal, including Jupyter
|
|
140
|
+
is_jupyter = False # is Jupyter notebook/lab terminal
|
|
137
141
|
try:
|
|
138
|
-
import IPython
|
|
142
|
+
import IPython.core.getipython
|
|
143
|
+
|
|
144
|
+
ipy = IPython.core.getipython.get_ipython()
|
|
139
145
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
146
|
+
is_ipython = ipy is not None
|
|
147
|
+
is_jupyter = (
|
|
148
|
+
is_ipython
|
|
149
|
+
# not IPython
|
|
150
|
+
and "Terminal" not in str(type(ipy))
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
del ipy
|
|
154
|
+
except ModuleNotFoundError:
|
|
145
155
|
pass
|
|
146
156
|
|
|
147
|
-
if
|
|
157
|
+
if is_jupyter and config.nest_asyncio_enabled in ["1", "True"]:
|
|
148
158
|
# bypass Jupyter asyncio bug
|
|
149
159
|
import nest_asyncio
|
|
150
160
|
|
|
@@ -1421,11 +1431,7 @@ def is_running_in_jupyter_notebook() -> bool:
|
|
|
1421
1431
|
Check if the code is running inside a Jupyter Notebook.
|
|
1422
1432
|
:return: True if running inside a Jupyter Notebook, False otherwise.
|
|
1423
1433
|
"""
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
ipy = IPython.get_ipython()
|
|
1427
|
-
# if its IPython terminal, it isn't a Jupyter ipython
|
|
1428
|
-
return ipy and "Terminal" not in str(type(ipy))
|
|
1434
|
+
return is_jupyter
|
|
1429
1435
|
|
|
1430
1436
|
|
|
1431
1437
|
def create_ipython_display():
|
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.0rc45
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -33,7 +33,7 @@ Requires-Dist: numpy <1.27.0,>=1.16.5
|
|
|
33
33
|
Requires-Dist: pandas <2.2,>=1.2
|
|
34
34
|
Requires-Dist: pyarrow <15,>=10.0
|
|
35
35
|
Requires-Dist: pyyaml <7,>=5.4.1
|
|
36
|
-
Requires-Dist: requests ~=2.
|
|
36
|
+
Requires-Dist: requests ~=2.32
|
|
37
37
|
Requires-Dist: tabulate ~=0.8.6
|
|
38
38
|
Requires-Dist: v3io ~=0.6.9
|
|
39
39
|
Requires-Dist: pydantic <1.10.15,>=1.10.8
|
|
@@ -57,16 +57,16 @@ Requires-Dist: ossfs ==2023.12.0 ; extra == 'alibaba-oss'
|
|
|
57
57
|
Requires-Dist: oss2 ==2.18.1 ; extra == 'alibaba-oss'
|
|
58
58
|
Provides-Extra: all
|
|
59
59
|
Requires-Dist: adlfs ==2023.9.0 ; extra == 'all'
|
|
60
|
-
Requires-Dist: aiobotocore <2.
|
|
60
|
+
Requires-Dist: aiobotocore <2.16,>=2.5.0 ; extra == 'all'
|
|
61
61
|
Requires-Dist: avro ~=1.11 ; extra == 'all'
|
|
62
62
|
Requires-Dist: azure-core ~=1.24 ; extra == 'all'
|
|
63
63
|
Requires-Dist: azure-identity ~=1.5 ; extra == 'all'
|
|
64
64
|
Requires-Dist: azure-keyvault-secrets ~=4.2 ; extra == 'all'
|
|
65
65
|
Requires-Dist: bokeh >=2.4.2,~=2.4 ; extra == 'all'
|
|
66
|
-
Requires-Dist: boto3 <1.
|
|
67
|
-
Requires-Dist: dask ~=2023.
|
|
66
|
+
Requires-Dist: boto3 <1.36,>=1.28.0 ; extra == 'all'
|
|
67
|
+
Requires-Dist: dask ~=2023.12.1 ; extra == 'all'
|
|
68
68
|
Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'all'
|
|
69
|
-
Requires-Dist: distributed ~=2023.
|
|
69
|
+
Requires-Dist: distributed ~=2023.12.1 ; extra == 'all'
|
|
70
70
|
Requires-Dist: gcsfs <2024.7,>=2023.9.2 ; extra == 'all'
|
|
71
71
|
Requires-Dist: google-cloud-bigquery-storage ~=2.17 ; extra == 'all'
|
|
72
72
|
Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'all'
|
|
@@ -111,15 +111,15 @@ Provides-Extra: bokeh
|
|
|
111
111
|
Requires-Dist: bokeh >=2.4.2,~=2.4 ; extra == 'bokeh'
|
|
112
112
|
Provides-Extra: complete
|
|
113
113
|
Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete'
|
|
114
|
-
Requires-Dist: aiobotocore <2.
|
|
114
|
+
Requires-Dist: aiobotocore <2.16,>=2.5.0 ; extra == 'complete'
|
|
115
115
|
Requires-Dist: avro ~=1.11 ; extra == 'complete'
|
|
116
116
|
Requires-Dist: azure-core ~=1.24 ; extra == 'complete'
|
|
117
117
|
Requires-Dist: azure-identity ~=1.5 ; extra == 'complete'
|
|
118
118
|
Requires-Dist: azure-keyvault-secrets ~=4.2 ; extra == 'complete'
|
|
119
|
-
Requires-Dist: boto3 <1.
|
|
120
|
-
Requires-Dist: dask ~=2023.
|
|
119
|
+
Requires-Dist: boto3 <1.36,>=1.28.0 ; extra == 'complete'
|
|
120
|
+
Requires-Dist: dask ~=2023.12.1 ; extra == 'complete'
|
|
121
121
|
Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'complete'
|
|
122
|
-
Requires-Dist: distributed ~=2023.
|
|
122
|
+
Requires-Dist: distributed ~=2023.12.1 ; extra == 'complete'
|
|
123
123
|
Requires-Dist: gcsfs <2024.7,>=2023.9.2 ; extra == 'complete'
|
|
124
124
|
Requires-Dist: google-cloud-bigquery-storage ~=2.17 ; extra == 'complete'
|
|
125
125
|
Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'complete'
|
|
@@ -140,18 +140,18 @@ Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete'
|
|
|
140
140
|
Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'complete'
|
|
141
141
|
Provides-Extra: complete-api
|
|
142
142
|
Requires-Dist: adlfs ==2023.9.0 ; extra == 'complete-api'
|
|
143
|
-
Requires-Dist: aiobotocore <2.
|
|
143
|
+
Requires-Dist: aiobotocore <2.16,>=2.5.0 ; extra == 'complete-api'
|
|
144
144
|
Requires-Dist: alembic ~=1.9 ; extra == 'complete-api'
|
|
145
145
|
Requires-Dist: apscheduler <4,>=3.10.3 ; extra == 'complete-api'
|
|
146
146
|
Requires-Dist: avro ~=1.11 ; extra == 'complete-api'
|
|
147
147
|
Requires-Dist: azure-core ~=1.24 ; extra == 'complete-api'
|
|
148
148
|
Requires-Dist: azure-identity ~=1.5 ; extra == 'complete-api'
|
|
149
149
|
Requires-Dist: azure-keyvault-secrets ~=4.2 ; extra == 'complete-api'
|
|
150
|
-
Requires-Dist: boto3 <1.
|
|
150
|
+
Requires-Dist: boto3 <1.36,>=1.28.0 ; extra == 'complete-api'
|
|
151
151
|
Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'complete-api'
|
|
152
|
-
Requires-Dist: dask ~=2023.
|
|
152
|
+
Requires-Dist: dask ~=2023.12.1 ; extra == 'complete-api'
|
|
153
153
|
Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'complete-api'
|
|
154
|
-
Requires-Dist: distributed ~=2023.
|
|
154
|
+
Requires-Dist: distributed ~=2023.12.1 ; extra == 'complete-api'
|
|
155
155
|
Requires-Dist: fastapi ~=0.110.0 ; extra == 'complete-api'
|
|
156
156
|
Requires-Dist: gcsfs <2024.7,>=2023.9.2 ; extra == 'complete-api'
|
|
157
157
|
Requires-Dist: google-cloud-bigquery-storage ~=2.17 ; extra == 'complete-api'
|
|
@@ -179,8 +179,8 @@ Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
|
|
|
179
179
|
Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
|
|
180
180
|
Requires-Dist: memray ~=1.12 ; (sys_platform != "win32") and extra == 'complete-api'
|
|
181
181
|
Provides-Extra: dask
|
|
182
|
-
Requires-Dist: dask ~=2023.
|
|
183
|
-
Requires-Dist: distributed ~=2023.
|
|
182
|
+
Requires-Dist: dask ~=2023.12.1 ; extra == 'dask'
|
|
183
|
+
Requires-Dist: distributed ~=2023.12.1 ; extra == 'dask'
|
|
184
184
|
Provides-Extra: databricks-sdk
|
|
185
185
|
Requires-Dist: databricks-sdk ~=0.13.0 ; extra == 'databricks-sdk'
|
|
186
186
|
Provides-Extra: google-cloud
|
|
@@ -201,8 +201,8 @@ Requires-Dist: plotly ~=5.23 ; extra == 'plotly'
|
|
|
201
201
|
Provides-Extra: redis
|
|
202
202
|
Requires-Dist: redis ~=4.3 ; extra == 'redis'
|
|
203
203
|
Provides-Extra: s3
|
|
204
|
-
Requires-Dist: boto3 <1.
|
|
205
|
-
Requires-Dist: aiobotocore <2.
|
|
204
|
+
Requires-Dist: boto3 <1.36,>=1.28.0 ; extra == 's3'
|
|
205
|
+
Requires-Dist: aiobotocore <2.16,>=2.5.0 ; extra == 's3'
|
|
206
206
|
Requires-Dist: s3fs <2024.7,>=2023.9.2 ; extra == 's3'
|
|
207
207
|
Provides-Extra: snowflake
|
|
208
208
|
Requires-Dist: snowflake-connector-python ~=3.7 ; extra == 'snowflake'
|