ml-management 0.5.1rc5__tar.gz → 0.5.1rc7__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (105) hide show
  1. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/s3/s3collector.py +2 -2
  2. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/dataset_loader_pattern.py +1 -3
  3. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/base_executor.py +1 -2
  4. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/load_api.py +9 -6
  5. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/log_api.py +81 -252
  6. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/server_mlmanager_exceptions.py +1 -1
  7. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/variables.py +11 -4
  8. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/model_pattern.py +3 -4
  9. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/s3/manager.py +1 -0
  10. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/dataset_loader.py +0 -2
  11. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/executor.py +0 -2
  12. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/model.py +0 -2
  13. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/schema.py +47 -79
  14. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/PKG-INFO +1 -1
  15. ml-management-0.5.1rc7/VERSION +1 -0
  16. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ml_management.egg-info/PKG-INFO +1 -1
  17. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ml_management.egg-info/SOURCES.txt +0 -2
  18. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ml_management.egg-info/requires.txt +6 -2
  19. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/setup.py +6 -2
  20. ml-management-0.5.1rc5/ML_management/mlmanagement/mlmanager.py +0 -212
  21. ml-management-0.5.1rc5/ML_management/model/patterns/rich_python_model.py +0 -10
  22. ml-management-0.5.1rc5/VERSION +0 -1
  23. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/MANIFEST.in +0 -0
  24. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/__init__.py +0 -0
  25. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/__init__.py +0 -0
  26. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/collector_pattern.py +0 -0
  27. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/collector_pattern_to_methods_map.py +0 -0
  28. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/collectors.py +0 -0
  29. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/dummy/__init__.py +0 -0
  30. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/dummy/dummy_collector.py +0 -0
  31. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/s3/__init__.py +0 -0
  32. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/topic_markers/__init__.py +0 -0
  33. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/topic_markers/api_schema.py +0 -0
  34. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/collectors/topic_markers/topic_markers_collector.py +0 -0
  35. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/__init__.py +0 -0
  36. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/base_splits_dataset_loader.py +0 -0
  37. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/dataset_loader_pattern_to_methods_map.py +0 -0
  38. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/poisoned_images_dataset_loader.py +0 -0
  39. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/templates/__init__.py +0 -0
  40. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/templates/dummy_dataset_loader/__init__.py +0 -0
  41. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/templates/dummy_dataset_loader/conda.yaml +0 -0
  42. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/templates/dummy_dataset_loader/dummy_dataset.py +0 -0
  43. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/dataset_loader/templates/upload.py +0 -0
  44. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/__init__.py +0 -0
  45. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/executor_pattern.py +0 -0
  46. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/executor_pattern_to_methods_map.py +0 -0
  47. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/no_model_executor_pattern.py +0 -0
  48. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/patterns.py +0 -0
  49. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/__init__.py +0 -0
  50. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/eval/__init__.py +0 -0
  51. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/eval/conda.yaml +0 -0
  52. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/eval/eval_executor.py +0 -0
  53. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/finetune/__init__.py +0 -0
  54. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/finetune/conda.yaml +0 -0
  55. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/finetune/finetune_executor.py +0 -0
  56. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/train/__init__.py +0 -0
  57. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/train/conda.yaml +0 -0
  58. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/train/train_executor.py +0 -0
  59. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/templates/upload.py +0 -0
  60. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/executor/upload_model_mode.py +0 -0
  61. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/__init__.py +0 -0
  62. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/backend_api.py +0 -0
  63. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/base_exceptions.py +0 -0
  64. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/jsonschema_exceptions.py +0 -0
  65. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/jsonschema_inference.py +0 -0
  66. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/model_type.py +0 -0
  67. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/module_finder.py +0 -0
  68. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/session.py +0 -0
  69. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/singleton_pattern.py +0 -0
  70. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/utils.py +0 -0
  71. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/mlmanagement/visibility_options.py +0 -0
  72. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/__init__.py +0 -0
  73. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/model_type_to_methods_map.py +0 -0
  74. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/__init__.py +0 -0
  75. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/evaluatable_model.py +0 -0
  76. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/gradient_model.py +0 -0
  77. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/model_with_losses.py +0 -0
  78. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/model_with_metrics.py +0 -0
  79. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/preprocessor.py +0 -0
  80. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/retrainable_model.py +0 -0
  81. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/target_layer.py +0 -0
  82. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/torch_model.py +0 -0
  83. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/trainable_model.py +0 -0
  84. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/model/patterns/transformer.py +0 -0
  85. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/registry/__init__.py +0 -0
  86. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/registry/exceptions.py +0 -0
  87. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/s3/__init__.py +0 -0
  88. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/s3/utils.py +0 -0
  89. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/__init__.py +0 -0
  90. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/experiment.py +0 -0
  91. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/job.py +0 -0
  92. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/parameters.py +0 -0
  93. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/sdk/sdk.py +0 -0
  94. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/test_sdk/__init__.py +0 -0
  95. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/test_sdk/test_sdk.py +0 -0
  96. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/tests/__init__.py +0 -0
  97. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/tests/test_jsonschema_inference.py +0 -0
  98. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/uploader_data/__init__.py +0 -0
  99. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/uploader_data/s3_uploader.py +0 -0
  100. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/uploader_data/utils.py +0 -0
  101. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ML_management/version.py +0 -0
  102. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/README.md +0 -0
  103. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ml_management.egg-info/dependency_links.txt +0 -0
  104. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/ml_management.egg-info/top_level.txt +0 -0
  105. {ml-management-0.5.1rc5 → ml-management-0.5.1rc7}/setup.cfg +0 -0
@@ -16,8 +16,8 @@ class S3Collector(CollectorPattern):
16
16
  "type": "object",
17
17
  "properties": {
18
18
  "bucket": {"type": "string"},
19
- "untar_data": {"type": "boolean"},
20
- "remote_paths": {"type": "array", "items": {"type": "string"}},
19
+ "untar_data": {"type": "boolean", "default": False},
20
+ "remote_paths": {"type": ["array", None], "items": {"type": "string"}, "default": None},
21
21
  },
22
22
  "required": ["bucket"],
23
23
  "additionalProperties": False,
@@ -1,10 +1,8 @@
1
1
  """Dataset loader template for custom dataset loader."""
2
2
  from abc import ABC, abstractmethod
3
3
 
4
- from ML_management.model.patterns.rich_python_model import RichPythonModel
5
4
 
6
-
7
- class DatasetLoaderPattern(RichPythonModel, ABC):
5
+ class DatasetLoaderPattern(ABC):
8
6
  """Define dataset loader.
9
7
 
10
8
  Attributes
@@ -14,12 +14,11 @@ from ML_management.executor.patterns import (
14
14
  )
15
15
  from ML_management.model.model_type_to_methods_map import ModelMethodName
16
16
  from ML_management.model.patterns.model_pattern import Model
17
- from ML_management.model.patterns.rich_python_model import RichPythonModel
18
17
 
19
18
  default_dataset_loader_pattern = OneDatasetLoaderPattern()
20
19
 
21
20
 
22
- class BaseExecutor(RichPythonModel, ABC):
21
+ class BaseExecutor(ABC):
23
22
  """Define custom job executor."""
24
23
 
25
24
  DEFAULT_ROLE = DEFAULT_ROLE
@@ -16,12 +16,14 @@ from ML_management.mlmanagement.base_exceptions import MLMClientError
16
16
  from ML_management.mlmanagement.log_api import _raise_error
17
17
  from ML_management.mlmanagement.model_type import ModelType
18
18
  from ML_management.mlmanagement.session import AuthSession
19
- from ML_management.mlmanagement.variables import get_log_service_url
20
- from mlflow.models.model import MLMODEL_FILE_NAME
21
- from mlflow.pyfunc import DATA, FLAVOR_NAME
22
- from mlflow.pyfunc.model import CONFIG_KEY_ARTIFACTS
23
-
24
- MLCONFIG = "MLConfig.yaml"
19
+ from ML_management.mlmanagement.variables import (
20
+ CONFIG_KEY_ARTIFACTS,
21
+ DATA,
22
+ FLAVOR_NAME,
23
+ MLCONFIG,
24
+ MLMODEL_FILE_NAME,
25
+ get_log_service_url,
26
+ )
25
27
 
26
28
 
27
29
  def download_artifacts_by_name_version(
@@ -106,6 +108,7 @@ def _set_model_version_requirements(local_path) -> None:
106
108
  subprocess.check_call(
107
109
  [sys.executable, "-m", "pip", "install", "--no-cache-dir", "--default-timeout=100", *requirements]
108
110
  )
111
+
109
112
  except Exception:
110
113
  print(traceback.format_exc())
111
114
 
@@ -2,31 +2,21 @@ import importlib
2
2
  import json
3
3
  import os
4
4
  import posixpath
5
- import subprocess
6
5
  import sys
7
6
  import tarfile
8
7
  import threading
9
8
  import warnings
10
9
  from contextlib import _GeneratorContextManager
11
10
  from pathlib import Path
12
- from tempfile import TemporaryDirectory
13
11
  from typing import Dict, List, Optional, Union
14
12
 
15
- import cloudpickle
16
13
  import httpx
17
- import numpy
18
- import pandas
19
14
  import yaml
20
- from scipy.sparse import csc_matrix, csr_matrix
21
15
 
22
- import mlflow
23
16
  from ML_management.mlmanagement import variables
24
- from ML_management.mlmanagement.base_exceptions import * # noqa: F403
25
- from ML_management.mlmanagement.base_exceptions import MLMClientError, MLMServerError, PylintError
17
+ from ML_management.mlmanagement.base_exceptions import MLMClientError, MLMServerError
26
18
  from ML_management.mlmanagement.jsonschema_inference import infer_jsonschema
27
- from ML_management.mlmanagement.mlmanager import set_experiment, start_run, start_run_if_not_exist
28
19
  from ML_management.mlmanagement.model_type import ModelType
29
- from ML_management.mlmanagement.module_finder import ModuleFinder
30
20
  from ML_management.mlmanagement.server_mlmanager_exceptions import * # noqa: F403
31
21
  from ML_management.mlmanagement.server_mlmanager_exceptions import (
32
22
  AuthError,
@@ -36,155 +26,23 @@ from ML_management.mlmanagement.server_mlmanager_exceptions import (
36
26
  from ML_management.mlmanagement.session import AuthSession
37
27
  from ML_management.mlmanagement.utils import INIT_FUNCTION_NAME, is_model_name_valid, validate_predict_config
38
28
  from ML_management.mlmanagement.variables import (
29
+ CONFIG_KEY_ARTIFACTS,
30
+ DATA,
31
+ DEFAULT_EXPERIMENT,
39
32
  EXPERIMENT_NAME_FOR_DATASET_LOADER,
40
33
  EXPERIMENT_NAME_FOR_EXECUTOR,
41
34
  FILENAME_FOR_INFERENCE_CONFIG,
42
- active_run_stack,
43
35
  get_log_service_url,
44
36
  get_server_ml_api,
45
37
  )
46
38
  from ML_management.mlmanagement.visibility_options import VisibilityOptions
47
- from ML_management.registry.exceptions import * # noqa: F403
48
- from mlflow.pyfunc import DATA
49
-
50
- CONFIG_KEY_ARTIFACTS = "artifacts"
51
-
52
-
53
- def _log_model(
54
- artifact_path,
55
- description: Optional[str],
56
- model_version_tags: Optional[Dict[str, str]] = None,
57
- code_path=None,
58
- conda_env=None,
59
- python_model=None,
60
- artifacts: Optional[dict] = None,
61
- registered_model_name: str = "default_name",
62
- signature: mlflow.models.signature.ModelSignature = None,
63
- input_example: Union[
64
- pandas.core.frame.DataFrame, numpy.ndarray, dict, list, csr_matrix, csc_matrix, str, bytes
65
- ] = None,
66
- await_registration_for: int = 300,
67
- pip_requirements=None,
68
- extra_pip_requirements=None,
69
- metadata=None,
70
- source_model_name=None,
71
- source_model_version=None,
72
- source_executor_name=None,
73
- source_executor_version=None,
74
- source_executor_role=None,
75
- upload_model_mode=None,
76
- visibility=None,
77
- extra_modules_names: Optional[list] = None,
78
- used_modules_names: Optional[list] = None,
79
- root_module_name: str = "__main__",
80
- linter_check: bool = True,
81
- start_build: bool = True,
82
- create_venv_pack: bool = False,
83
- ):
84
- """
85
- Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow artifact.
86
-
87
- Current run is using.
88
- Parameter registered_model_name must be not empty string,
89
- consist of alphanumeric characters, '_'
90
- and must start and end with an alphanumeric character.
91
- Validation regexp: "(([A-Za-z0-9][A-Za-z0-9_]*)?[A-Za-z0-9])+"
92
- You cannot specify the parameters: loader_module, data_path and the parameters: python_model, artifacts together.
93
- """
94
- raise DeprecationWarning("The function '_log_model' is deprecated.")
95
- from ML_management.executor.upload_model_mode import UploadModelMode # circular import
96
-
97
- if extra_modules_names and used_modules_names:
98
- raise RuntimeError("Parameters 'extra_modules_names' and 'used_modules_names' cannot be used at the same time.")
99
- if upload_model_mode == UploadModelMode.none:
100
- raise RuntimeError("You can't log a model using the 'upload_model_mode' parameter set to none.")
101
- if not is_model_name_valid(registered_model_name):
102
- raise RuntimeError(
103
- "Parameter 'registered_model_name' must be not empty string, "
104
- "consist of alphanumeric characters, '_' "
105
- "and must start and end with an alphanumeric character."
106
- "Validation regexp: '(([A-Za-z0-9][A-Za-z0-9_]*)?[A-Za-z0-9])+'"
107
- )
108
- del UploadModelMode # need to delete this because it is not JSON serializable
109
- if create_venv_pack:
110
- if not isinstance(artifacts, dict):
111
- raise RuntimeError("You must provide config for model serving in artifacts, if create_venv_pack=True")
112
- validate_predict_config(path=artifacts.get(FILENAME_FOR_INFERENCE_CONFIG))
113
- start_run_if_not_exist()
114
- if used_modules_names is not None:
115
- submodules = set(used_modules_names)
116
- ModuleFinder.import_modules_by_name(submodules)
117
- else:
118
- submodules = ModuleFinder().find_root_submodules(root_name=root_module_name)
119
- if linter_check:
120
- for module_name in submodules:
121
- if hasattr(sys.modules[module_name], "__file__"):
122
- # pylint check what would fall on any error except when abstract method is not implemented (E0110)
123
- # and when pylint has been unable to import a module (E0401)
124
- linter_output = subprocess.run(
125
- [
126
- sys.executable,
127
- "-m",
128
- "pylint",
129
- "{}".format(sys.modules[module_name].__file__),
130
- "--clear-cache-post-run=True",
131
- "--disable=all",
132
- "--enable=E",
133
- "--disable=E0401, E1101",
134
- "--fail-on=E",
135
- "--reports=n",
136
- "--score=n",
137
- "-j 0",
138
- ],
139
- check=False,
140
- stdout=subprocess.PIPE,
141
- text=True,
142
- )
143
- if linter_output.stdout: # if linter has output when something wrong with code
144
- raise PylintError(linter_output.stdout)
145
- del linter_output
146
- if extra_modules_names:
147
- extra_set = set(extra_modules_names)
148
- ModuleFinder.import_modules_by_name(extra_set)
149
- submodules = submodules.union(extra_set)
150
- try:
151
- for module_name in submodules:
152
- cloudpickle.register_pickle_by_value(sys.modules[module_name])
153
- kwargs = {
154
- "artifact_path": artifact_path,
155
- "description": description,
156
- "model_version_tags": model_version_tags,
157
- "code_path": code_path,
158
- "conda_env": conda_env,
159
- "python_model": python_model,
160
- "registered_model_name": registered_model_name,
161
- "signature": signature,
162
- "input_example": input_example,
163
- "await_registration_for": await_registration_for,
164
- "pip_requirements": pip_requirements,
165
- "extra_pip_requirements": extra_pip_requirements,
166
- "metadata": metadata,
167
- "source_model_name": source_model_name,
168
- "source_model_version": source_model_version,
169
- "source_executor_name": source_executor_name,
170
- "source_executor_version": source_executor_version,
171
- "source_executor_role": source_executor_role,
172
- "upload_model_mode": upload_model_mode,
173
- "visibility": visibility,
174
- "start_build": start_build,
175
- "create_venv_pack": create_venv_pack,
176
- }
177
- response = _request_log_model("log_model", kwargs)
178
- return _raise_error(response)
179
- finally:
180
- for module_name in submodules:
181
- cloudpickle.unregister_pickle_by_value(sys.modules[module_name])
182
39
 
183
40
 
184
41
  def _log_object_src(
185
42
  artifact_path,
186
43
  model_path: str,
187
44
  description: str,
45
+ experiment_name: str,
188
46
  model_type: ModelType = ModelType.MODEL,
189
47
  model_version_tags: Optional[Dict[str, str]] = None,
190
48
  registered_model_name: str = "default_name",
@@ -200,7 +58,7 @@ def _log_object_src(
200
58
  additional_local_packages: Optional[Union[List[str], str]] = None,
201
59
  ):
202
60
  """
203
- Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow artifact.
61
+ Log a src model with custom inference logic and optional data dependencies as an artifact.
204
62
 
205
63
  Current run is using.
206
64
  Parameter registered_model_name must be not empty string,
@@ -235,7 +93,6 @@ def _log_object_src(
235
93
  del UploadModelMode # need to delete this because it is not JSON serializable
236
94
  if create_venv_pack:
237
95
  validate_predict_config(path=os.path.join(f"{model_path}", "artifacts", f"{FILENAME_FOR_INFERENCE_CONFIG}"))
238
- start_run_if_not_exist()
239
96
  old_python_path = sys.path.copy()
240
97
  old_sys_modules = sys.modules.copy()
241
98
  try:
@@ -262,8 +119,9 @@ def _log_object_src(
262
119
  "create_venv_pack": create_venv_pack,
263
120
  "additional_local_packages": additional_local_packages,
264
121
  "get_object_func": get_object_func,
122
+ "experiment_name": experiment_name,
265
123
  }
266
- response = _request_log_model("log_object_src", kwargs)
124
+ response = _request_log_model(kwargs)
267
125
  result = _raise_error(response)
268
126
  except Exception as err:
269
127
  raise err
@@ -275,13 +133,15 @@ def _log_object_src(
275
133
  sys.modules.pop(module)
276
134
 
277
135
  if additional_local_packages:
278
- if isinstance(additional_local_packages, list):
279
- for package in additional_local_packages:
280
- response = _request_log_artifacts(package, DATA, model_type)
281
- _raise_error(response)
282
- else:
283
- response = _request_log_artifacts(additional_local_packages, DATA, model_type)
136
+ name_version = json.loads(response.content.decode("utf-8"))
137
+ if not isinstance(additional_local_packages, list):
138
+ additional_local_packages = [additional_local_packages]
139
+ for package in additional_local_packages:
140
+ response = _request_log_artifacts(
141
+ package, DATA, name_version["name"], int(name_version["version"]), model_type
142
+ )
284
143
  _raise_error(response)
144
+
285
145
  return result
286
146
 
287
147
 
@@ -325,27 +185,17 @@ def log_executor_src(
325
185
  None
326
186
 
327
187
  """
328
- old_experiment_name = variables.active_experiment
329
-
330
- try:
331
- set_experiment(EXPERIMENT_NAME_FOR_EXECUTOR, visibility=VisibilityOptions.PUBLIC)
332
-
333
- with start_run(nested=True):
334
- _log_object_src(
335
- artifact_path="",
336
- description=description,
337
- registered_model_name=registered_name,
338
- start_build=start_build,
339
- model_path=model_path,
340
- visibility=visibility,
341
- additional_local_packages=additional_local_packages,
342
- model_type=ModelType.EXECUTOR,
343
- )
344
-
345
- except Exception as err:
346
- raise err
347
- finally:
348
- variables.active_experiment = old_experiment_name
188
+ _log_object_src(
189
+ artifact_path="",
190
+ description=description,
191
+ registered_model_name=registered_name,
192
+ start_build=start_build,
193
+ model_path=model_path,
194
+ visibility=visibility,
195
+ additional_local_packages=additional_local_packages,
196
+ model_type=ModelType.EXECUTOR,
197
+ experiment_name=EXPERIMENT_NAME_FOR_EXECUTOR,
198
+ )
349
199
 
350
200
 
351
201
  def log_dataset_loader_src(
@@ -385,23 +235,16 @@ def log_dataset_loader_src(
385
235
  None
386
236
 
387
237
  """
388
- old_experiment_name = variables.active_experiment
389
- try:
390
- set_experiment(EXPERIMENT_NAME_FOR_DATASET_LOADER, visibility=VisibilityOptions.PUBLIC)
391
- with start_run(nested=True):
392
- _log_object_src(
393
- artifact_path="",
394
- description=description,
395
- registered_model_name=registered_name,
396
- model_path=model_path,
397
- visibility=visibility,
398
- additional_local_packages=additional_local_packages,
399
- model_type=ModelType.DATASET_LOADER,
400
- )
401
- except Exception as err:
402
- raise err
403
- finally:
404
- variables.active_experiment = old_experiment_name
238
+ _log_object_src(
239
+ artifact_path="",
240
+ description=description,
241
+ registered_model_name=registered_name,
242
+ model_path=model_path,
243
+ visibility=visibility,
244
+ additional_local_packages=additional_local_packages,
245
+ model_type=ModelType.DATASET_LOADER,
246
+ experiment_name=EXPERIMENT_NAME_FOR_DATASET_LOADER,
247
+ )
405
248
 
406
249
 
407
250
  def log_model_src(
@@ -410,7 +253,7 @@ def log_model_src(
410
253
  description: str,
411
254
  start_build: bool = True,
412
255
  model_version_tags: Optional[Dict[str, str]] = None,
413
- experiment_name: Optional[str] = None,
256
+ experiment_name: str = DEFAULT_EXPERIMENT,
414
257
  create_venv_pack: bool = False,
415
258
  visibility: VisibilityOptions = VisibilityOptions.PRIVATE,
416
259
  additional_local_packages: Optional[Union[List[str], str]] = None,
@@ -445,27 +288,19 @@ def log_model_src(
445
288
  =======
446
289
  None
447
290
  """
448
- old_experiment_name = variables.active_experiment
449
- try:
450
- if experiment_name:
451
- set_experiment(experiment_name, visibility=VisibilityOptions.PUBLIC)
452
- with start_run(nested=True):
453
- _log_object_src(
454
- artifact_path="",
455
- description=description,
456
- registered_model_name=registered_name,
457
- model_path=model_path,
458
- start_build=start_build,
459
- model_version_tags=model_version_tags,
460
- create_venv_pack=create_venv_pack,
461
- visibility=visibility,
462
- additional_local_packages=additional_local_packages,
463
- model_type=ModelType.MODEL,
464
- )
465
- except Exception as err:
466
- raise err
467
- finally:
468
- variables.active_experiment = old_experiment_name
291
+ _log_object_src(
292
+ artifact_path="",
293
+ description=description,
294
+ registered_model_name=registered_name,
295
+ model_path=model_path,
296
+ start_build=start_build,
297
+ model_version_tags=model_version_tags,
298
+ create_venv_pack=create_venv_pack,
299
+ visibility=visibility,
300
+ additional_local_packages=additional_local_packages,
301
+ model_type=ModelType.MODEL,
302
+ experiment_name=experiment_name,
303
+ )
469
304
 
470
305
 
471
306
  def log_artifact(local_path: str, artifact_path: Optional[str] = None) -> None:
@@ -610,14 +445,14 @@ def log_metrics(metrics: Dict[str, float], step: int = 0) -> None:
610
445
  return _request_log_metric(metrics_to_log, step)
611
446
 
612
447
 
613
- def _request_log_model(function_name: str, kwargs: dict):
448
+ def _request_log_model(kwargs: dict):
614
449
  """
615
450
  Send request for log_model function.
616
451
 
617
452
  Steps for log model:
618
453
  0) Infer jsonschema, raise if it is invalid
619
454
  1) open temporary directory
620
- 2) Do mlflow.save_model() locally
455
+ 2) Do init model locally
621
456
  3) Pack it to tar file
622
457
  4) Send it to server to log model there.
623
458
  """
@@ -677,7 +512,7 @@ def _request_log_model(function_name: str, kwargs: dict):
677
512
  from ML_management.model.model_type_to_methods_map import model_pattern_to_methods
678
513
  from ML_management.model.patterns.model_pattern import Model
679
514
 
680
- experiment_name = variables.active_experiment.get("experiment_name")
515
+ experiment_name = kwargs.get("experiment_name", DEFAULT_EXPERIMENT)
681
516
 
682
517
  if python_model is not None:
683
518
  if isinstance(python_model, Model):
@@ -721,42 +556,38 @@ def _request_log_model(function_name: str, kwargs: dict):
721
556
  kwargs.pop(delete_arg, None)
722
557
 
723
558
  log_request = {
724
- "function_name": function_name,
725
- "active_experiment": variables.active_experiment,
726
- "active_run_ids": [run.info.run_id for run in active_run_stack],
559
+ "job_run_id": variables.job_run_id,
727
560
  }
728
561
 
729
- if function_name == "log_model":
730
- kwargs["loader_module"] = mlflow.pyfunc.model.__name__
731
- with TemporaryDirectory() as temp_dir:
732
- model_folder = "model"
733
- path_for_model_folder = os.path.join(temp_dir, model_folder)
734
- mlflow.pyfunc.save_model(path=path_for_model_folder, **kwargs_for_save_model)
735
- model_folder = path_for_model_folder
736
- log_request["kwargs"] = kwargs
737
- return _open_pipe_send_request(model_folder, log_request, url=get_log_service_url("log_model"))
738
- else:
739
- artifacts_path = os.path.join(kwargs["model_path"], CONFIG_KEY_ARTIFACTS)
740
- if os.path.isfile(artifacts_path):
741
- raise Exception(f"The artifact file {artifacts_path} is invalid. The artifact must be a directory.")
562
+ artifacts_path = os.path.join(kwargs["model_path"], CONFIG_KEY_ARTIFACTS)
563
+ if os.path.isfile(artifacts_path):
564
+ raise Exception(f"The artifact file {artifacts_path} is invalid. The artifact must be a directory.")
742
565
 
743
- model_folder = kwargs["model_path"]
566
+ model_folder = kwargs["model_path"]
744
567
 
745
- del kwargs["model_path"]
746
- kwargs["loader_module"] = "ML_management.loader.loader"
747
- log_request["kwargs"] = kwargs
748
- return _open_pipe_send_request(model_folder, log_request, url=get_log_service_url("log_model"))
568
+ del kwargs["model_path"]
569
+ kwargs["loader_module"] = "ML_management.loader.loader"
570
+ log_request["kwargs"] = kwargs
571
+ return _open_pipe_send_request(model_folder, log_request, url=get_log_service_url("log_model"))
749
572
 
750
573
  else:
751
574
  raise Exception("python_model parameter must be specified")
752
575
 
753
576
 
754
- def _request_log_artifacts(local_path, artifact_path, model_type: Optional[ModelType] = None):
577
+ def _request_log_artifacts(
578
+ local_path,
579
+ artifact_path,
580
+ name: Optional[str] = None,
581
+ version: Optional[int] = None,
582
+ model_type: Optional[ModelType] = None,
583
+ ):
755
584
  """Send request for log artifact."""
756
585
  log_artifact_request = {
757
- "kwargs": {"local_path": local_path, "artifact_path": artifact_path},
758
- "active_run_ids": [run.info.run_id for run in active_run_stack],
586
+ "artifact_path": artifact_path,
587
+ "job_run_id": variables.job_run_id,
759
588
  "model_type": model_type,
589
+ "name": name,
590
+ "version": version,
760
591
  }
761
592
  if not os.path.exists(local_path):
762
593
  raise FileNotFoundError(f"Path: {local_path} does not exist.")
@@ -766,7 +597,7 @@ def _request_log_artifacts(local_path, artifact_path, model_type: Optional[Model
766
597
  url = get_log_service_url("log_artifact")
767
598
 
768
599
  # upload multipart
769
- data = {"mlflow_request": json.dumps(log_artifact_request)}
600
+ data = {"log_request": json.dumps(log_artifact_request)}
770
601
  headers = {"Transfer-Encoding": "chunked"}
771
602
 
772
603
  file_content_type = "application/octet-stream"
@@ -836,12 +667,12 @@ def _request(
836
667
  is_tar=False,
837
668
  stream=False,
838
669
  ) -> _GeneratorContextManager:
839
- """Create mlflow_request and send it to server."""
670
+ """Create log request and send it to server."""
840
671
  if not file:
841
672
  return AuthSession().post(url=url, stream=stream, json=request_dict)
842
673
 
843
674
  # upload multipart
844
- data = {"mlflow_request": json.dumps(request_dict)}
675
+ data = {"log_request": json.dumps(request_dict)}
845
676
  headers = {"Transfer-Encoding": "chunked"}
846
677
 
847
678
  file_content_type = "application/octet-stream"
@@ -869,11 +700,10 @@ def _tar_folder(w, model_folder):
869
700
 
870
701
 
871
702
  def _request_log_metric(metrics: Dict[str, float], step: Optional[int] = None):
872
- if not active_run_stack:
703
+ if not variables.job_run_id:
873
704
  raise MLMClientError("The log_metric function must be called from the active job.")
874
- run_id = active_run_stack[0].info.run_id
875
705
 
876
- request_dict = {"metrics": metrics, "step": step, "run_id": run_id}
706
+ request_dict = {"metrics": metrics, "step": step, "run_id": variables.job_run_id}
877
707
 
878
708
  url = posixpath.join(get_server_ml_api(), "log-metric")
879
709
 
@@ -881,11 +711,10 @@ def _request_log_metric(metrics: Dict[str, float], step: Optional[int] = None):
881
711
 
882
712
 
883
713
  def _request_log_params(params: Dict[str, str]):
884
- if not active_run_stack:
714
+ if not variables.job_run_id:
885
715
  raise MLMClientError("The log_params function must be called from the active job.")
886
- run_id = active_run_stack[0].info.run_id
887
716
 
888
- request_dict = {"params": params, "run_id": run_id}
717
+ request_dict = {"params": params, "run_id": variables.job_run_id}
889
718
 
890
719
  url = posixpath.join(get_server_ml_api(), "log-params")
891
720
 
@@ -1,4 +1,4 @@
1
- """Custom exception definition for server mlflow manager graph."""
1
+ """Custom exception definition for server manager graph."""
2
2
 
3
3
  from typing import Any, Optional, Tuple
4
4
 
@@ -10,8 +10,15 @@ s3_password = "PLACEHOLDER"
10
10
  mlm_login = None
11
11
  mlm_password = None
12
12
 
13
- active_run_stack = []
14
- active_experiment: dict = {}
13
+ job_run_id = None
14
+
15
+ CONFIG_KEY_ARTIFACTS = "artifacts"
16
+ DEFAULT_EXPERIMENT = "Default"
17
+ MLCONFIG = "MLConfig.yaml"
18
+ MLMODEL_FILE_NAME = "MLmodel"
19
+ DATA = "data"
20
+ FLAVOR_NAME = "python_function"
21
+
15
22
 
16
23
  EXPERIMENT_NAME_FOR_EXECUTOR = "executors"
17
24
  EXPERIMENT_NAME_FOR_DATASET_LOADER = "dataset_loaders"
@@ -20,8 +27,8 @@ SERVER_META = posixpath.join("bff", "meta.json")
20
27
 
21
28
 
22
29
  def get_server_ml_api() -> str:
23
- """Get server '/mlflow' endpoint URL."""
24
- return posixpath.join(get_server_url(), "mlflow")
30
+ """Get server '/api' endpoint URL."""
31
+ return posixpath.join(get_server_url(), "api")
25
32
 
26
33
 
27
34
  def get_log_service_url(function_name: str) -> str:
@@ -4,11 +4,10 @@ import os
4
4
  from abc import ABC, abstractmethod
5
5
  from pathlib import Path
6
6
 
7
- from ML_management.mlmanagement.log_api import CONFIG_KEY_ARTIFACTS
8
- from ML_management.model.patterns.rich_python_model import RichPythonModel
7
+ from ML_management.mlmanagement.variables import CONFIG_KEY_ARTIFACTS
9
8
 
10
9
 
11
- class Model(RichPythonModel, ABC):
10
+ class Model(ABC):
12
11
  """Abstract class for model that Job will use."""
13
12
 
14
13
  def __new__(cls, *args, **kwargs): # noqa: ARG003
@@ -40,7 +39,7 @@ class Model(RichPythonModel, ABC):
40
39
  """Every model should make predictions."""
41
40
  raise NotImplementedError
42
41
 
43
- def to_device(self, device: str) -> None:
42
+ def to_device(self, device: str) -> None: # noqa: B027
44
43
  """
45
44
  Define model migration to specific device.
46
45
 
@@ -346,6 +346,7 @@ class S3Manager:
346
346
  bucket: str,
347
347
  remote_paths: Optional[List[str]] = None,
348
348
  ) -> str:
349
+ remote_paths = remote_paths if remote_paths else None
349
350
  if remote_paths is not None and len(remote_paths) != 1:
350
351
  raise RuntimeError(f"Expected one tar object, but {len(remote_paths)} were given.")
351
352
 
@@ -302,7 +302,6 @@ def get_dataset_loader_version(name: str, version: Optional[int] = None) -> Data
302
302
  base_query = op.dataset_loader_version_from_name_version(dataset_loader_version=dataset_loader_version_choice)
303
303
  base_query.name()
304
304
  base_query.version()
305
- base_query.run.run_id()
306
305
  base_query.tags()
307
306
  base_query.description()
308
307
  base_query.creation_timestamp()
@@ -395,6 +394,5 @@ def get_initial_dataset_loader_version(name) -> DatasetLoaderVersionInfo:
395
394
  version.version()
396
395
  version.tags()
397
396
  version.description()
398
- version.run.run_id()
399
397
  dataset_loader_version = send_graphql_request(op, json_response=False)
400
398
  return dataset_loader_version.dataset_loader_version_from_name_version
@@ -587,7 +587,6 @@ def get_executor_version(name: str, version: Optional[int] = None) -> ExecutorVe
587
587
  base_query = op.executor_version_from_name_version(executor_version=executor_version_choice)
588
588
  base_query.name()
589
589
  base_query.version()
590
- base_query.run.run_id()
591
590
  base_query.tags()
592
591
  base_query.description()
593
592
  base_query.creation_timestamp()
@@ -680,6 +679,5 @@ def get_initial_executor_version(name) -> ExecutorVersionInfo:
680
679
  version.version()
681
680
  version.tags()
682
681
  version.description()
683
- version.run.run_id()
684
682
  executor_version = send_graphql_request(op, json_response=False)
685
683
  return executor_version.executor_version_from_name_version