mlrun 1.6.0rc35__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +3 -3
- mlrun/api/schemas/__init__.py +1 -1
- mlrun/artifacts/base.py +11 -6
- mlrun/artifacts/dataset.py +2 -2
- mlrun/artifacts/model.py +30 -24
- mlrun/artifacts/plots.py +2 -2
- mlrun/common/db/sql_session.py +5 -3
- mlrun/common/helpers.py +1 -2
- mlrun/common/schemas/artifact.py +3 -3
- mlrun/common/schemas/auth.py +3 -3
- mlrun/common/schemas/background_task.py +1 -1
- mlrun/common/schemas/client_spec.py +1 -1
- mlrun/common/schemas/feature_store.py +16 -16
- mlrun/common/schemas/frontend_spec.py +7 -7
- mlrun/common/schemas/function.py +1 -1
- mlrun/common/schemas/hub.py +4 -9
- mlrun/common/schemas/memory_reports.py +2 -2
- mlrun/common/schemas/model_monitoring/grafana.py +4 -4
- mlrun/common/schemas/model_monitoring/model_endpoints.py +14 -15
- mlrun/common/schemas/notification.py +4 -4
- mlrun/common/schemas/object.py +2 -2
- mlrun/common/schemas/pipeline.py +1 -1
- mlrun/common/schemas/project.py +3 -3
- mlrun/common/schemas/runtime_resource.py +8 -12
- mlrun/common/schemas/schedule.py +3 -3
- mlrun/common/schemas/tag.py +1 -2
- mlrun/common/schemas/workflow.py +2 -2
- mlrun/config.py +8 -4
- mlrun/data_types/to_pandas.py +1 -3
- mlrun/datastore/base.py +0 -28
- mlrun/datastore/datastore_profile.py +9 -9
- mlrun/datastore/filestore.py +0 -1
- mlrun/datastore/google_cloud_storage.py +1 -1
- mlrun/datastore/sources.py +7 -11
- mlrun/datastore/spark_utils.py +1 -2
- mlrun/datastore/targets.py +31 -31
- mlrun/datastore/utils.py +4 -6
- mlrun/datastore/v3io.py +70 -46
- mlrun/db/base.py +22 -23
- mlrun/db/httpdb.py +34 -34
- mlrun/db/nopdb.py +19 -19
- mlrun/errors.py +1 -1
- mlrun/execution.py +4 -4
- mlrun/feature_store/api.py +20 -21
- mlrun/feature_store/common.py +1 -1
- mlrun/feature_store/feature_set.py +28 -32
- mlrun/feature_store/feature_vector.py +24 -27
- mlrun/feature_store/retrieval/base.py +7 -7
- mlrun/feature_store/retrieval/conversion.py +2 -4
- mlrun/feature_store/steps.py +7 -15
- mlrun/features.py +5 -7
- mlrun/frameworks/_common/artifacts_library.py +9 -9
- mlrun/frameworks/_common/mlrun_interface.py +5 -5
- mlrun/frameworks/_common/model_handler.py +48 -48
- mlrun/frameworks/_common/plan.py +2 -3
- mlrun/frameworks/_common/producer.py +3 -4
- mlrun/frameworks/_common/utils.py +5 -5
- mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
- mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +16 -35
- mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
- mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
- mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
- mlrun/frameworks/_ml_common/model_handler.py +24 -24
- mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
- mlrun/frameworks/_ml_common/plan.py +1 -1
- mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
- mlrun/frameworks/_ml_common/utils.py +4 -4
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +7 -7
- mlrun/frameworks/huggingface/model_server.py +4 -4
- mlrun/frameworks/lgbm/__init__.py +32 -32
- mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
- mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
- mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
- mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
- mlrun/frameworks/lgbm/model_handler.py +9 -9
- mlrun/frameworks/lgbm/model_server.py +6 -6
- mlrun/frameworks/lgbm/utils.py +5 -5
- mlrun/frameworks/onnx/dataset.py +8 -8
- mlrun/frameworks/onnx/mlrun_interface.py +3 -3
- mlrun/frameworks/onnx/model_handler.py +6 -6
- mlrun/frameworks/onnx/model_server.py +7 -7
- mlrun/frameworks/parallel_coordinates.py +2 -2
- mlrun/frameworks/pytorch/__init__.py +16 -16
- mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
- mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
- mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
- mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
- mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
- mlrun/frameworks/pytorch/model_handler.py +17 -17
- mlrun/frameworks/pytorch/model_server.py +7 -7
- mlrun/frameworks/sklearn/__init__.py +12 -12
- mlrun/frameworks/sklearn/estimator.py +4 -4
- mlrun/frameworks/sklearn/metrics_library.py +14 -14
- mlrun/frameworks/sklearn/mlrun_interface.py +3 -6
- mlrun/frameworks/sklearn/model_handler.py +2 -2
- mlrun/frameworks/tf_keras/__init__.py +5 -5
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +14 -14
- mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
- mlrun/frameworks/tf_keras/mlrun_interface.py +7 -9
- mlrun/frameworks/tf_keras/model_handler.py +14 -14
- mlrun/frameworks/tf_keras/model_server.py +6 -6
- mlrun/frameworks/xgboost/__init__.py +12 -12
- mlrun/frameworks/xgboost/model_handler.py +6 -6
- mlrun/k8s_utils.py +4 -5
- mlrun/kfpops.py +2 -2
- mlrun/launcher/base.py +10 -10
- mlrun/launcher/local.py +8 -8
- mlrun/launcher/remote.py +7 -7
- mlrun/lists.py +3 -4
- mlrun/model.py +205 -55
- mlrun/model_monitoring/api.py +21 -24
- mlrun/model_monitoring/application.py +4 -4
- mlrun/model_monitoring/batch.py +17 -17
- mlrun/model_monitoring/controller.py +2 -1
- mlrun/model_monitoring/features_drift_table.py +44 -31
- mlrun/model_monitoring/prometheus.py +1 -4
- mlrun/model_monitoring/stores/kv_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stores/model_endpoint_store.py +9 -11
- mlrun/model_monitoring/stores/models/__init__.py +2 -2
- mlrun/model_monitoring/stores/sql_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stream_processing.py +16 -34
- mlrun/model_monitoring/tracking_policy.py +2 -1
- mlrun/package/__init__.py +6 -6
- mlrun/package/context_handler.py +5 -5
- mlrun/package/packager.py +7 -7
- mlrun/package/packagers/default_packager.py +6 -6
- mlrun/package/packagers/numpy_packagers.py +15 -15
- mlrun/package/packagers/pandas_packagers.py +5 -5
- mlrun/package/packagers/python_standard_library_packagers.py +10 -10
- mlrun/package/packagers_manager.py +18 -23
- mlrun/package/utils/_formatter.py +4 -4
- mlrun/package/utils/_pickler.py +2 -2
- mlrun/package/utils/_supported_format.py +4 -4
- mlrun/package/utils/log_hint_utils.py +2 -2
- mlrun/package/utils/type_hint_utils.py +4 -9
- mlrun/platforms/other.py +1 -2
- mlrun/projects/operations.py +5 -5
- mlrun/projects/pipelines.py +9 -9
- mlrun/projects/project.py +58 -46
- mlrun/render.py +1 -1
- mlrun/run.py +9 -9
- mlrun/runtimes/__init__.py +7 -4
- mlrun/runtimes/base.py +20 -23
- mlrun/runtimes/constants.py +5 -5
- mlrun/runtimes/daskjob.py +8 -8
- mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
- mlrun/runtimes/databricks_job/databricks_runtime.py +7 -7
- mlrun/runtimes/function_reference.py +1 -1
- mlrun/runtimes/local.py +1 -1
- mlrun/runtimes/mpijob/abstract.py +1 -2
- mlrun/runtimes/nuclio/__init__.py +20 -0
- mlrun/runtimes/{function.py → nuclio/function.py} +15 -16
- mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
- mlrun/runtimes/{serving.py → nuclio/serving.py} +13 -12
- mlrun/runtimes/pod.py +95 -48
- mlrun/runtimes/remotesparkjob.py +1 -1
- mlrun/runtimes/sparkjob/spark3job.py +50 -33
- mlrun/runtimes/utils.py +1 -2
- mlrun/secrets.py +3 -3
- mlrun/serving/remote.py +0 -4
- mlrun/serving/routers.py +6 -6
- mlrun/serving/server.py +4 -4
- mlrun/serving/states.py +29 -0
- mlrun/serving/utils.py +3 -3
- mlrun/serving/v1_serving.py +6 -7
- mlrun/serving/v2_serving.py +50 -8
- mlrun/track/tracker_manager.py +3 -3
- mlrun/track/trackers/mlflow_tracker.py +1 -2
- mlrun/utils/async_http.py +5 -7
- mlrun/utils/azure_vault.py +1 -1
- mlrun/utils/clones.py +1 -2
- mlrun/utils/condition_evaluator.py +3 -3
- mlrun/utils/db.py +3 -3
- mlrun/utils/helpers.py +37 -119
- mlrun/utils/http.py +1 -4
- mlrun/utils/logger.py +49 -14
- mlrun/utils/notifications/notification/__init__.py +3 -3
- mlrun/utils/notifications/notification/base.py +2 -2
- mlrun/utils/notifications/notification/ipython.py +1 -1
- mlrun/utils/notifications/notification_pusher.py +8 -14
- mlrun/utils/retryer.py +207 -0
- mlrun/utils/singleton.py +1 -1
- mlrun/utils/v3io_clients.py +2 -3
- mlrun/utils/version/version.json +2 -2
- mlrun/utils/version/version.py +2 -6
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/METADATA +9 -9
- mlrun-1.7.0rc2.dist-info/RECORD +315 -0
- mlrun-1.6.0rc35.dist-info/RECORD +0 -313
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/LICENSE +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/WHEEL +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/entry_points.txt +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -15,8 +15,7 @@
|
|
|
15
15
|
|
|
16
16
|
import enum
|
|
17
17
|
import json
|
|
18
|
-
import
|
|
19
|
-
from typing import Any, Dict, List, Optional
|
|
18
|
+
from typing import Any, Optional
|
|
20
19
|
|
|
21
20
|
from pydantic import BaseModel, Field, validator
|
|
22
21
|
from pydantic.main import Extra
|
|
@@ -48,7 +47,7 @@ class ModelEndpointMetadata(BaseModel):
|
|
|
48
47
|
extra = Extra.allow
|
|
49
48
|
|
|
50
49
|
@classmethod
|
|
51
|
-
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values:
|
|
50
|
+
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values: list = None):
|
|
52
51
|
"""Create a `ModelEndpointMetadata` object from an endpoint dictionary
|
|
53
52
|
|
|
54
53
|
:param endpoint_dict: Model endpoint dictionary.
|
|
@@ -71,8 +70,8 @@ class ModelEndpointSpec(ObjectSpec):
|
|
|
71
70
|
model: Optional[str] = "" # <model_name>:<version>
|
|
72
71
|
model_class: Optional[str] = ""
|
|
73
72
|
model_uri: Optional[str] = ""
|
|
74
|
-
feature_names: Optional[
|
|
75
|
-
label_names: Optional[
|
|
73
|
+
feature_names: Optional[list[str]] = []
|
|
74
|
+
label_names: Optional[list[str]] = []
|
|
76
75
|
stream_path: Optional[str] = ""
|
|
77
76
|
algorithm: Optional[str] = ""
|
|
78
77
|
monitor_configuration: Optional[dict] = {}
|
|
@@ -80,7 +79,7 @@ class ModelEndpointSpec(ObjectSpec):
|
|
|
80
79
|
monitoring_mode: Optional[ModelMonitoringMode] = ModelMonitoringMode.disabled.value
|
|
81
80
|
|
|
82
81
|
@classmethod
|
|
83
|
-
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values:
|
|
82
|
+
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values: list = None):
|
|
84
83
|
"""Create a `ModelEndpointSpec` object from an endpoint dictionary
|
|
85
84
|
|
|
86
85
|
:param endpoint_dict: Model endpoint dictionary.
|
|
@@ -123,8 +122,8 @@ class ModelEndpointSpec(ObjectSpec):
|
|
|
123
122
|
|
|
124
123
|
|
|
125
124
|
class Histogram(BaseModel):
|
|
126
|
-
buckets:
|
|
127
|
-
counts:
|
|
125
|
+
buckets: list[float]
|
|
126
|
+
counts: list[int]
|
|
128
127
|
|
|
129
128
|
|
|
130
129
|
class FeatureValues(BaseModel):
|
|
@@ -175,15 +174,15 @@ class ModelEndpointStatus(ObjectStatus):
|
|
|
175
174
|
error_count: Optional[int] = 0
|
|
176
175
|
drift_status: Optional[str] = ""
|
|
177
176
|
drift_measures: Optional[dict] = {}
|
|
178
|
-
metrics: Optional[
|
|
177
|
+
metrics: Optional[dict[str, dict[str, Any]]] = {
|
|
179
178
|
EventKeyMetrics.GENERIC: {
|
|
180
179
|
EventLiveStats.LATENCY_AVG_1H: 0,
|
|
181
180
|
EventLiveStats.PREDICTIONS_PER_SECOND: 0,
|
|
182
181
|
}
|
|
183
182
|
}
|
|
184
|
-
features: Optional[
|
|
185
|
-
children: Optional[
|
|
186
|
-
children_uids: Optional[
|
|
183
|
+
features: Optional[list[Features]] = []
|
|
184
|
+
children: Optional[list[str]] = []
|
|
185
|
+
children_uids: Optional[list[str]] = []
|
|
187
186
|
endpoint_type: Optional[EndpointType] = EndpointType.NODE_EP
|
|
188
187
|
monitoring_feature_set_uri: Optional[str] = ""
|
|
189
188
|
state: Optional[str] = ""
|
|
@@ -192,7 +191,7 @@ class ModelEndpointStatus(ObjectStatus):
|
|
|
192
191
|
extra = Extra.allow
|
|
193
192
|
|
|
194
193
|
@classmethod
|
|
195
|
-
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values:
|
|
194
|
+
def from_flat_dict(cls, endpoint_dict: dict, json_parse_values: list = None):
|
|
196
195
|
"""Create a `ModelEndpointStatus` object from an endpoint dictionary
|
|
197
196
|
|
|
198
197
|
:param endpoint_dict: Model endpoint dictionary.
|
|
@@ -290,13 +289,13 @@ class ModelEndpoint(BaseModel):
|
|
|
290
289
|
|
|
291
290
|
|
|
292
291
|
class ModelEndpointList(BaseModel):
|
|
293
|
-
endpoints:
|
|
292
|
+
endpoints: list[ModelEndpoint] = []
|
|
294
293
|
|
|
295
294
|
|
|
296
295
|
def _mapping_attributes(
|
|
297
296
|
base_model: BaseModel,
|
|
298
297
|
flattened_dictionary: dict,
|
|
299
|
-
json_parse_values:
|
|
298
|
+
json_parse_values: list = None,
|
|
300
299
|
):
|
|
301
300
|
"""Generate a `BaseModel` object with the provided dictionary attributes.
|
|
302
301
|
|
|
@@ -54,14 +54,14 @@ class Notification(pydantic.BaseModel):
|
|
|
54
54
|
name: str
|
|
55
55
|
message: str
|
|
56
56
|
severity: NotificationSeverity
|
|
57
|
-
when:
|
|
57
|
+
when: list[str]
|
|
58
58
|
condition: str
|
|
59
|
-
params:
|
|
59
|
+
params: dict[str, typing.Any] = None
|
|
60
60
|
status: NotificationStatus = None
|
|
61
61
|
sent_time: typing.Union[str, datetime.datetime] = None
|
|
62
|
-
secret_params: typing.Optional[
|
|
62
|
+
secret_params: typing.Optional[dict[str, typing.Any]] = None
|
|
63
63
|
reason: typing.Optional[str] = None
|
|
64
64
|
|
|
65
65
|
|
|
66
66
|
class SetNotificationRequest(pydantic.BaseModel):
|
|
67
|
-
notifications:
|
|
67
|
+
notifications: list[Notification] = None
|
mlrun/common/schemas/object.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
from datetime import datetime
|
|
16
|
-
from typing import
|
|
16
|
+
from typing import Optional
|
|
17
17
|
|
|
18
18
|
from pydantic import BaseModel, Extra
|
|
19
19
|
|
|
@@ -60,7 +60,7 @@ class ObjectRecord(BaseModel):
|
|
|
60
60
|
project: str
|
|
61
61
|
uid: str
|
|
62
62
|
updated: Optional[datetime] = None
|
|
63
|
-
labels:
|
|
63
|
+
labels: list[LabelRecord]
|
|
64
64
|
# state is extracted from the full status dict to enable queries
|
|
65
65
|
state: Optional[str] = None
|
|
66
66
|
full_object: Optional[dict] = None
|
mlrun/common/schemas/pipeline.py
CHANGED
|
@@ -34,6 +34,6 @@ class PipelinesPagination(str):
|
|
|
34
34
|
|
|
35
35
|
class PipelinesOutput(pydantic.BaseModel):
|
|
36
36
|
# use the format query param to control what is returned
|
|
37
|
-
runs:
|
|
37
|
+
runs: list[typing.Union[dict, str]]
|
|
38
38
|
total_size: int
|
|
39
39
|
next_page_token: typing.Optional[str]
|
mlrun/common/schemas/project.py
CHANGED
|
@@ -83,7 +83,7 @@ class ProjectSpec(pydantic.BaseModel):
|
|
|
83
83
|
subpath: typing.Optional[str] = None
|
|
84
84
|
origin_url: typing.Optional[str] = None
|
|
85
85
|
desired_state: typing.Optional[ProjectDesiredState] = ProjectDesiredState.online
|
|
86
|
-
custom_packagers: typing.Optional[
|
|
86
|
+
custom_packagers: typing.Optional[list[tuple[str, bool]]] = None
|
|
87
87
|
default_image: typing.Optional[str] = None
|
|
88
88
|
|
|
89
89
|
class Config:
|
|
@@ -127,8 +127,8 @@ class ProjectsOutput(pydantic.BaseModel):
|
|
|
127
127
|
# union by the definition order. Therefore we can't currently add generic dict for all leader formats, but we need
|
|
128
128
|
# to add a specific classes for them. it's frustrating but couldn't find other workaround, see:
|
|
129
129
|
# https://github.com/samuelcolvin/pydantic/issues/1423, https://github.com/samuelcolvin/pydantic/issues/619
|
|
130
|
-
projects:
|
|
130
|
+
projects: list[typing.Union[Project, str, ProjectSummary, IguazioProject]]
|
|
131
131
|
|
|
132
132
|
|
|
133
133
|
class ProjectSummariesOutput(pydantic.BaseModel):
|
|
134
|
-
project_summaries:
|
|
134
|
+
project_summaries: list[ProjectSummary]
|
|
@@ -26,15 +26,15 @@ class ListRuntimeResourcesGroupByField(mlrun.common.types.StrEnum):
|
|
|
26
26
|
|
|
27
27
|
class RuntimeResource(pydantic.BaseModel):
|
|
28
28
|
name: str
|
|
29
|
-
labels:
|
|
30
|
-
status: typing.Optional[
|
|
29
|
+
labels: dict[str, str] = {}
|
|
30
|
+
status: typing.Optional[dict]
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
class RuntimeResources(pydantic.BaseModel):
|
|
34
|
-
crd_resources:
|
|
35
|
-
pod_resources:
|
|
34
|
+
crd_resources: list[RuntimeResource] = []
|
|
35
|
+
pod_resources: list[RuntimeResource] = []
|
|
36
36
|
# only for dask runtime
|
|
37
|
-
service_resources: typing.Optional[
|
|
37
|
+
service_resources: typing.Optional[list[RuntimeResource]] = None
|
|
38
38
|
|
|
39
39
|
class Config:
|
|
40
40
|
extra = pydantic.Extra.allow
|
|
@@ -45,14 +45,10 @@ class KindRuntimeResources(pydantic.BaseModel):
|
|
|
45
45
|
resources: RuntimeResources
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
RuntimeResourcesOutput =
|
|
48
|
+
RuntimeResourcesOutput = list[KindRuntimeResources]
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
# project name -> job uid -> runtime resources
|
|
52
|
-
GroupedByJobRuntimeResourcesOutput =
|
|
53
|
-
str, typing.Dict[str, RuntimeResources]
|
|
54
|
-
]
|
|
52
|
+
GroupedByJobRuntimeResourcesOutput = dict[str, dict[str, RuntimeResources]]
|
|
55
53
|
# project name -> kind -> runtime resources
|
|
56
|
-
GroupedByProjectRuntimeResourcesOutput =
|
|
57
|
-
str, typing.Dict[str, RuntimeResources]
|
|
58
|
-
]
|
|
54
|
+
GroupedByProjectRuntimeResourcesOutput = dict[str, dict[str, RuntimeResources]]
|
mlrun/common/schemas/schedule.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
from datetime import datetime
|
|
16
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, Literal, Optional, Union
|
|
17
17
|
|
|
18
18
|
from pydantic import BaseModel
|
|
19
19
|
|
|
@@ -119,7 +119,7 @@ class ScheduleRecord(ScheduleInput):
|
|
|
119
119
|
project: str
|
|
120
120
|
last_run_uri: Optional[str]
|
|
121
121
|
state: Optional[str]
|
|
122
|
-
labels: Optional[
|
|
122
|
+
labels: Optional[list[LabelRecord]]
|
|
123
123
|
next_run_time: Optional[datetime]
|
|
124
124
|
|
|
125
125
|
class Config:
|
|
@@ -135,7 +135,7 @@ class ScheduleOutput(ScheduleRecord):
|
|
|
135
135
|
|
|
136
136
|
|
|
137
137
|
class SchedulesOutput(BaseModel):
|
|
138
|
-
schedules:
|
|
138
|
+
schedules: list[ScheduleOutput]
|
|
139
139
|
|
|
140
140
|
|
|
141
141
|
class ScheduleIdentifier(BaseModel):
|
mlrun/common/schemas/tag.py
CHANGED
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
import typing
|
|
16
15
|
|
|
17
16
|
import pydantic
|
|
18
17
|
|
|
@@ -29,4 +28,4 @@ class TagObjects(pydantic.BaseModel):
|
|
|
29
28
|
|
|
30
29
|
kind: str
|
|
31
30
|
# TODO: Add more types to the list for new supported tagged objects
|
|
32
|
-
identifiers:
|
|
31
|
+
identifiers: list[ArtifactIdentifier]
|
mlrun/common/schemas/workflow.py
CHANGED
|
@@ -36,12 +36,12 @@ class WorkflowSpec(pydantic.BaseModel):
|
|
|
36
36
|
|
|
37
37
|
class WorkflowRequest(pydantic.BaseModel):
|
|
38
38
|
spec: typing.Optional[WorkflowSpec] = None
|
|
39
|
-
arguments: typing.Optional[
|
|
39
|
+
arguments: typing.Optional[dict] = None
|
|
40
40
|
artifact_path: typing.Optional[str] = None
|
|
41
41
|
source: typing.Optional[str] = None
|
|
42
42
|
run_name: typing.Optional[str] = None
|
|
43
43
|
namespace: typing.Optional[str] = None
|
|
44
|
-
notifications: typing.Optional[
|
|
44
|
+
notifications: typing.Optional[list[Notification]] = None
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
class WorkflowResponse(pydantic.BaseModel):
|
mlrun/config.py
CHANGED
|
@@ -17,7 +17,7 @@ Configuration system.
|
|
|
17
17
|
Configuration can be in either a configuration file specified by
|
|
18
18
|
MLRUN_CONFIG_FILE environment variable or by environment variables.
|
|
19
19
|
|
|
20
|
-
Environment variables are in the format "
|
|
20
|
+
Environment variables are in the format "MLRUN_HTTPDB__PORT=8080". This will be
|
|
21
21
|
mapped to config.httpdb.port. Values should be in JSON format.
|
|
22
22
|
"""
|
|
23
23
|
|
|
@@ -306,7 +306,11 @@ default_config = {
|
|
|
306
306
|
# default is 16MB, max 1G, for more info https://dev.mysql.com/doc/refman/8.0/en/packet-too-large.html
|
|
307
307
|
"max_allowed_packet": 64000000, # 64MB
|
|
308
308
|
},
|
|
309
|
-
#
|
|
309
|
+
# tests connections for liveness upon each checkout
|
|
310
|
+
"connections_pool_pre_ping": True,
|
|
311
|
+
# this setting causes the pool to recycle connections after the given number of seconds has passed
|
|
312
|
+
"connections_pool_recycle": 60 * 60,
|
|
313
|
+
# None defaults to httpdb.max_workers
|
|
310
314
|
"connections_pool_size": None,
|
|
311
315
|
"connections_pool_max_overflow": None,
|
|
312
316
|
# below is a db-specific configuration
|
|
@@ -344,7 +348,7 @@ default_config = {
|
|
|
344
348
|
# ---------------------------------------------------------------------
|
|
345
349
|
# Note: adding a mode requires special handling on
|
|
346
350
|
# - mlrun.runtimes.constants.NuclioIngressAddTemplatedIngressModes
|
|
347
|
-
# - mlrun.runtimes.function.enrich_function_with_ingress
|
|
351
|
+
# - mlrun.runtimes.nuclio.function.enrich_function_with_ingress
|
|
348
352
|
"add_templated_ingress_host_mode": "never",
|
|
349
353
|
"explicit_ack": "enabled",
|
|
350
354
|
},
|
|
@@ -1122,7 +1126,7 @@ class Config:
|
|
|
1122
1126
|
ver in mlrun.mlconf.ce.mode for ver in ["lite", "full"]
|
|
1123
1127
|
)
|
|
1124
1128
|
|
|
1125
|
-
def get_s3_storage_options(self) ->
|
|
1129
|
+
def get_s3_storage_options(self) -> dict[str, typing.Any]:
|
|
1126
1130
|
"""
|
|
1127
1131
|
Generate storage options dictionary as required for handling S3 path in fsspec. The model monitoring stream
|
|
1128
1132
|
graph uses this method for generating the storage options for S3 parquet target path.
|
mlrun/data_types/to_pandas.py
CHANGED
|
@@ -94,9 +94,7 @@ def toPandas(spark_df):
|
|
|
94
94
|
)
|
|
95
95
|
|
|
96
96
|
# Rename columns to avoid duplicated column names.
|
|
97
|
-
tmp_column_names = [
|
|
98
|
-
"col_{}".format(i) for i in range(len(spark_df.columns))
|
|
99
|
-
]
|
|
97
|
+
tmp_column_names = [f"col_{i}" for i in range(len(spark_df.columns))]
|
|
100
98
|
self_destruct = spark_df.sql_ctx._conf.arrowPySparkSelfDestructEnabled()
|
|
101
99
|
batches = spark_df.toDF(*tmp_column_names)._collect_as_arrow(
|
|
102
100
|
split_batches=self_destruct
|
mlrun/datastore/base.py
CHANGED
|
@@ -654,34 +654,6 @@ def http_get(url, headers=None, auth=None):
|
|
|
654
654
|
return response.content
|
|
655
655
|
|
|
656
656
|
|
|
657
|
-
def http_head(url, headers=None, auth=None):
|
|
658
|
-
try:
|
|
659
|
-
response = requests.head(url, headers=headers, auth=auth, verify=verify_ssl)
|
|
660
|
-
except OSError as exc:
|
|
661
|
-
raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}")
|
|
662
|
-
|
|
663
|
-
mlrun.errors.raise_for_status(response)
|
|
664
|
-
|
|
665
|
-
return response.headers
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
def http_put(url, data, headers=None, auth=None, session=None):
|
|
669
|
-
try:
|
|
670
|
-
put_api = session.put if session else requests.put
|
|
671
|
-
response = put_api(
|
|
672
|
-
url, data=data, headers=headers, auth=auth, verify=verify_ssl
|
|
673
|
-
)
|
|
674
|
-
except OSError as exc:
|
|
675
|
-
raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") from exc
|
|
676
|
-
|
|
677
|
-
mlrun.errors.raise_for_status(response)
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
def http_upload(url, file_path, headers=None, auth=None):
|
|
681
|
-
with open(file_path, "rb") as data:
|
|
682
|
-
http_put(url, data, headers, auth)
|
|
683
|
-
|
|
684
|
-
|
|
685
657
|
class HttpStore(DataStore):
|
|
686
658
|
def __init__(self, parent, schema, name, endpoint="", secrets: dict = None):
|
|
687
659
|
super().__init__(parent, name, schema, endpoint, secrets)
|
|
@@ -30,7 +30,7 @@ from ..secrets import get_secret_or_env
|
|
|
30
30
|
class DatastoreProfile(pydantic.BaseModel):
|
|
31
31
|
type: str
|
|
32
32
|
name: str
|
|
33
|
-
_private_attributes:
|
|
33
|
+
_private_attributes: list = ()
|
|
34
34
|
|
|
35
35
|
class Config:
|
|
36
36
|
extra = pydantic.Extra.forbid
|
|
@@ -81,8 +81,8 @@ class DatastoreProfileKafkaTarget(DatastoreProfile):
|
|
|
81
81
|
_private_attributes = "kwargs_private"
|
|
82
82
|
bootstrap_servers: str
|
|
83
83
|
topic: str
|
|
84
|
-
kwargs_public: typing.Optional[
|
|
85
|
-
kwargs_private: typing.Optional[
|
|
84
|
+
kwargs_public: typing.Optional[dict]
|
|
85
|
+
kwargs_private: typing.Optional[dict]
|
|
86
86
|
|
|
87
87
|
def attributes(self):
|
|
88
88
|
attributes = {"bootstrap_servers": self.bootstrap_servers}
|
|
@@ -96,15 +96,15 @@ class DatastoreProfileKafkaTarget(DatastoreProfile):
|
|
|
96
96
|
class DatastoreProfileKafkaSource(DatastoreProfile):
|
|
97
97
|
type: str = pydantic.Field("kafka_source")
|
|
98
98
|
_private_attributes = ("kwargs_private", "sasl_user", "sasl_pass")
|
|
99
|
-
brokers: typing.Union[str,
|
|
100
|
-
topics: typing.Union[str,
|
|
99
|
+
brokers: typing.Union[str, list[str]]
|
|
100
|
+
topics: typing.Union[str, list[str]]
|
|
101
101
|
group: typing.Optional[str] = "serving"
|
|
102
102
|
initial_offset: typing.Optional[str] = "earliest"
|
|
103
|
-
partitions: typing.Optional[typing.Union[str,
|
|
103
|
+
partitions: typing.Optional[typing.Union[str, list[str]]]
|
|
104
104
|
sasl_user: typing.Optional[str]
|
|
105
105
|
sasl_pass: typing.Optional[str]
|
|
106
|
-
kwargs_public: typing.Optional[
|
|
107
|
-
kwargs_private: typing.Optional[
|
|
106
|
+
kwargs_public: typing.Optional[dict]
|
|
107
|
+
kwargs_private: typing.Optional[dict]
|
|
108
108
|
|
|
109
109
|
def attributes(self):
|
|
110
110
|
attributes = {}
|
|
@@ -227,7 +227,7 @@ class DatastoreProfileGCS(DatastoreProfile):
|
|
|
227
227
|
type: str = pydantic.Field("gcs")
|
|
228
228
|
_private_attributes = ("gcp_credentials",)
|
|
229
229
|
credentials_path: typing.Optional[str] = None # path to file.
|
|
230
|
-
gcp_credentials: typing.Optional[typing.Union[str,
|
|
230
|
+
gcp_credentials: typing.Optional[typing.Union[str, dict]] = None
|
|
231
231
|
|
|
232
232
|
@pydantic.validator("gcp_credentials", pre=True, always=True)
|
|
233
233
|
def convert_dict_to_json(cls, v):
|
mlrun/datastore/filestore.py
CHANGED
|
@@ -138,7 +138,7 @@ class GoogleCloudStorageStore(DataStore):
|
|
|
138
138
|
res = {"spark.hadoop.google.cloud.auth.service.account.enable": "true"}
|
|
139
139
|
if isinstance(st["token"], str):
|
|
140
140
|
# Token is a filename, read json from it
|
|
141
|
-
with open(st["token"]
|
|
141
|
+
with open(st["token"]) as file:
|
|
142
142
|
credentials = json.load(file)
|
|
143
143
|
else:
|
|
144
144
|
# Token is a dictionary, use it directly
|
mlrun/datastore/sources.py
CHANGED
|
@@ -17,7 +17,7 @@ import warnings
|
|
|
17
17
|
from base64 import b64encode
|
|
18
18
|
from copy import copy
|
|
19
19
|
from datetime import datetime
|
|
20
|
-
from typing import
|
|
20
|
+
from typing import Optional, Union
|
|
21
21
|
|
|
22
22
|
import pandas as pd
|
|
23
23
|
import semver
|
|
@@ -170,10 +170,10 @@ class CSVSource(BaseSourceDriver):
|
|
|
170
170
|
self,
|
|
171
171
|
name: str = "",
|
|
172
172
|
path: str = None,
|
|
173
|
-
attributes:
|
|
173
|
+
attributes: dict[str, str] = None,
|
|
174
174
|
key_field: str = None,
|
|
175
175
|
schedule: str = None,
|
|
176
|
-
parse_dates: Union[None, int, str,
|
|
176
|
+
parse_dates: Union[None, int, str, list[int], list[str]] = None,
|
|
177
177
|
**kwargs,
|
|
178
178
|
):
|
|
179
179
|
super().__init__(name, path, attributes, key_field, schedule=schedule, **kwargs)
|
|
@@ -299,7 +299,7 @@ class ParquetSource(BaseSourceDriver):
|
|
|
299
299
|
self,
|
|
300
300
|
name: str = "",
|
|
301
301
|
path: str = None,
|
|
302
|
-
attributes:
|
|
302
|
+
attributes: dict[str, str] = None,
|
|
303
303
|
key_field: str = None,
|
|
304
304
|
time_field: str = None,
|
|
305
305
|
schedule: str = None,
|
|
@@ -800,7 +800,7 @@ class OnlineSource(BaseSourceDriver):
|
|
|
800
800
|
self,
|
|
801
801
|
name: str = None,
|
|
802
802
|
path: str = None,
|
|
803
|
-
attributes:
|
|
803
|
+
attributes: dict[str, object] = None,
|
|
804
804
|
key_field: str = None,
|
|
805
805
|
time_field: str = None,
|
|
806
806
|
workers: int = None,
|
|
@@ -848,8 +848,6 @@ class HttpSource(OnlineSource):
|
|
|
848
848
|
|
|
849
849
|
|
|
850
850
|
class StreamSource(OnlineSource):
|
|
851
|
-
"""Sets stream source for the flow. If stream doesn't exist it will create it"""
|
|
852
|
-
|
|
853
851
|
kind = "v3ioStream"
|
|
854
852
|
|
|
855
853
|
def __init__(
|
|
@@ -863,7 +861,7 @@ class StreamSource(OnlineSource):
|
|
|
863
861
|
**kwargs,
|
|
864
862
|
):
|
|
865
863
|
"""
|
|
866
|
-
Sets stream source for the flow. If stream doesn't exist it will create it
|
|
864
|
+
Sets the stream source for the flow. If the stream doesn't exist it will create it.
|
|
867
865
|
|
|
868
866
|
:param name: stream name. Default "stream"
|
|
869
867
|
:param group: consumer group. Default "serving"
|
|
@@ -915,8 +913,6 @@ class StreamSource(OnlineSource):
|
|
|
915
913
|
|
|
916
914
|
|
|
917
915
|
class KafkaSource(OnlineSource):
|
|
918
|
-
"""Sets kafka source for the flow"""
|
|
919
|
-
|
|
920
916
|
kind = "kafka"
|
|
921
917
|
|
|
922
918
|
def __init__(
|
|
@@ -1047,7 +1043,7 @@ class SQLSource(BaseSourceDriver):
|
|
|
1047
1043
|
db_url: str = None,
|
|
1048
1044
|
table_name: str = None,
|
|
1049
1045
|
spark_options: dict = None,
|
|
1050
|
-
parse_dates:
|
|
1046
|
+
parse_dates: list[str] = None,
|
|
1051
1047
|
**kwargs,
|
|
1052
1048
|
):
|
|
1053
1049
|
"""
|
mlrun/datastore/spark_utils.py
CHANGED
|
@@ -12,12 +12,11 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
from typing import Dict
|
|
16
15
|
|
|
17
16
|
import mlrun
|
|
18
17
|
|
|
19
18
|
|
|
20
|
-
def spark_session_update_hadoop_options(session, spark_options) ->
|
|
19
|
+
def spark_session_update_hadoop_options(session, spark_options) -> dict[str, str]:
|
|
21
20
|
hadoop_conf = session.sparkContext._jsc.hadoopConfiguration()
|
|
22
21
|
non_hadoop_spark_options = {}
|
|
23
22
|
|
mlrun/datastore/targets.py
CHANGED
|
@@ -19,7 +19,7 @@ import sys
|
|
|
19
19
|
import time
|
|
20
20
|
from collections import Counter
|
|
21
21
|
from copy import copy
|
|
22
|
-
from typing import Any,
|
|
22
|
+
from typing import Any, Optional, Union
|
|
23
23
|
from urllib.parse import urlparse
|
|
24
24
|
|
|
25
25
|
import pandas as pd
|
|
@@ -215,9 +215,8 @@ def validate_target_list(targets):
|
|
|
215
215
|
]
|
|
216
216
|
if target_types_requiring_name:
|
|
217
217
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
218
|
-
"Only one default name per target type is allowed (please
|
|
219
|
-
|
|
220
|
-
)
|
|
218
|
+
"Only one default name per target type is allowed (please "
|
|
219
|
+
f"specify name for {target_types_requiring_name} target)"
|
|
221
220
|
)
|
|
222
221
|
|
|
223
222
|
target_names_count = Counter(
|
|
@@ -232,9 +231,8 @@ def validate_target_list(targets):
|
|
|
232
231
|
|
|
233
232
|
if targets_with_same_name:
|
|
234
233
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
235
|
-
"Each target must have a unique name (more than one target with
|
|
236
|
-
|
|
237
|
-
)
|
|
234
|
+
"Each target must have a unique name (more than one target with "
|
|
235
|
+
f"those names found {targets_with_same_name})"
|
|
238
236
|
)
|
|
239
237
|
|
|
240
238
|
no_path_target_types_count = Counter(
|
|
@@ -252,9 +250,8 @@ def validate_target_list(targets):
|
|
|
252
250
|
]
|
|
253
251
|
if target_types_requiring_path:
|
|
254
252
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
255
|
-
"Only one default path per target type is allowed (please specify
|
|
256
|
-
|
|
257
|
-
)
|
|
253
|
+
"Only one default path per target type is allowed (please specify "
|
|
254
|
+
f"path for {target_types_requiring_path} target)"
|
|
258
255
|
)
|
|
259
256
|
|
|
260
257
|
target_paths_count = Counter(
|
|
@@ -269,9 +266,8 @@ def validate_target_list(targets):
|
|
|
269
266
|
|
|
270
267
|
if targets_with_same_path:
|
|
271
268
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
272
|
-
"Each target must have a unique path (more than one target
|
|
273
|
-
|
|
274
|
-
)
|
|
269
|
+
"Each target must have a unique path (more than one target "
|
|
270
|
+
f"with those names found {targets_with_same_path})"
|
|
275
271
|
)
|
|
276
272
|
|
|
277
273
|
|
|
@@ -390,17 +386,17 @@ class BaseStoreTarget(DataTargetBase):
|
|
|
390
386
|
self,
|
|
391
387
|
name: str = "",
|
|
392
388
|
path=None,
|
|
393
|
-
attributes:
|
|
389
|
+
attributes: dict[str, str] = None,
|
|
394
390
|
after_step=None,
|
|
395
391
|
columns=None,
|
|
396
392
|
partitioned: bool = False,
|
|
397
393
|
key_bucketing_number: Optional[int] = None,
|
|
398
|
-
partition_cols: Optional[
|
|
394
|
+
partition_cols: Optional[list[str]] = None,
|
|
399
395
|
time_partitioning_granularity: Optional[str] = None,
|
|
400
396
|
max_events: Optional[int] = None,
|
|
401
397
|
flush_after_seconds: Optional[int] = None,
|
|
402
|
-
storage_options:
|
|
403
|
-
schema:
|
|
398
|
+
storage_options: dict[str, str] = None,
|
|
399
|
+
schema: dict[str, Any] = None,
|
|
404
400
|
credentials_prefix=None,
|
|
405
401
|
):
|
|
406
402
|
super().__init__(
|
|
@@ -731,7 +727,7 @@ class BaseStoreTarget(DataTargetBase):
|
|
|
731
727
|
|
|
732
728
|
|
|
733
729
|
class ParquetTarget(BaseStoreTarget):
|
|
734
|
-
"""
|
|
730
|
+
"""Parquet target storage driver, used to materialize feature set/vector data into parquet files.
|
|
735
731
|
|
|
736
732
|
:param name: optional, target name. By default will be called ParquetTarget
|
|
737
733
|
:param path: optional, Output path. Can be either a file or directory.
|
|
@@ -766,16 +762,16 @@ class ParquetTarget(BaseStoreTarget):
|
|
|
766
762
|
self,
|
|
767
763
|
name: str = "",
|
|
768
764
|
path=None,
|
|
769
|
-
attributes:
|
|
765
|
+
attributes: dict[str, str] = None,
|
|
770
766
|
after_step=None,
|
|
771
767
|
columns=None,
|
|
772
768
|
partitioned: bool = None,
|
|
773
769
|
key_bucketing_number: Optional[int] = None,
|
|
774
|
-
partition_cols: Optional[
|
|
770
|
+
partition_cols: Optional[list[str]] = None,
|
|
775
771
|
time_partitioning_granularity: Optional[str] = None,
|
|
776
772
|
max_events: Optional[int] = 10000,
|
|
777
773
|
flush_after_seconds: Optional[int] = 900,
|
|
778
|
-
storage_options:
|
|
774
|
+
storage_options: dict[str, str] = None,
|
|
779
775
|
):
|
|
780
776
|
self.path = path
|
|
781
777
|
if partitioned is None:
|
|
@@ -1652,24 +1648,24 @@ class SQLTarget(BaseStoreTarget):
|
|
|
1652
1648
|
self,
|
|
1653
1649
|
name: str = "",
|
|
1654
1650
|
path=None,
|
|
1655
|
-
attributes:
|
|
1651
|
+
attributes: dict[str, str] = None,
|
|
1656
1652
|
after_step=None,
|
|
1657
1653
|
partitioned: bool = False,
|
|
1658
1654
|
key_bucketing_number: Optional[int] = None,
|
|
1659
|
-
partition_cols: Optional[
|
|
1655
|
+
partition_cols: Optional[list[str]] = None,
|
|
1660
1656
|
time_partitioning_granularity: Optional[str] = None,
|
|
1661
1657
|
max_events: Optional[int] = None,
|
|
1662
1658
|
flush_after_seconds: Optional[int] = None,
|
|
1663
|
-
storage_options:
|
|
1659
|
+
storage_options: dict[str, str] = None,
|
|
1664
1660
|
db_url: str = None,
|
|
1665
1661
|
table_name: str = None,
|
|
1666
|
-
schema:
|
|
1662
|
+
schema: dict[str, Any] = None,
|
|
1667
1663
|
primary_key_column: str = "",
|
|
1668
1664
|
if_exists: str = "append",
|
|
1669
1665
|
create_table: bool = False,
|
|
1670
1666
|
# create_according_to_data: bool = False,
|
|
1671
1667
|
varchar_len: int = 50,
|
|
1672
|
-
parse_dates:
|
|
1668
|
+
parse_dates: list[str] = None,
|
|
1673
1669
|
):
|
|
1674
1670
|
"""
|
|
1675
1671
|
Write to SqlDB as output target for a flow.
|
|
@@ -1815,7 +1811,7 @@ class SQLTarget(BaseStoreTarget):
|
|
|
1815
1811
|
|
|
1816
1812
|
db_path, table_name, _, _, _, _ = self._parse_url()
|
|
1817
1813
|
engine = sqlalchemy.create_engine(db_path)
|
|
1818
|
-
parse_dates: Optional[
|
|
1814
|
+
parse_dates: Optional[list[str]] = self.attributes.get("parse_dates")
|
|
1819
1815
|
with engine.connect() as conn:
|
|
1820
1816
|
query, parse_dates = _generate_sql_query_with_time_filter(
|
|
1821
1817
|
table_name=table_name,
|
|
@@ -1915,12 +1911,16 @@ class SQLTarget(BaseStoreTarget):
|
|
|
1915
1911
|
# creat new table with the given name
|
|
1916
1912
|
columns = []
|
|
1917
1913
|
for col, col_type in self.schema.items():
|
|
1918
|
-
|
|
1919
|
-
if
|
|
1920
|
-
raise TypeError(
|
|
1914
|
+
col_type_sql = TYPE_TO_SQL_TYPE.get(col_type)
|
|
1915
|
+
if col_type_sql is None:
|
|
1916
|
+
raise TypeError(
|
|
1917
|
+
f"'{col_type}' unsupported type for column '{col}'"
|
|
1918
|
+
)
|
|
1921
1919
|
columns.append(
|
|
1922
1920
|
sqlalchemy.Column(
|
|
1923
|
-
col,
|
|
1921
|
+
col,
|
|
1922
|
+
col_type_sql,
|
|
1923
|
+
primary_key=(col in primary_key_for_check),
|
|
1924
1924
|
)
|
|
1925
1925
|
)
|
|
1926
1926
|
|