mlrun 1.7.2rc4__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (275) hide show
  1. mlrun/__init__.py +26 -22
  2. mlrun/__main__.py +15 -16
  3. mlrun/alerts/alert.py +150 -15
  4. mlrun/api/schemas/__init__.py +1 -9
  5. mlrun/artifacts/__init__.py +2 -3
  6. mlrun/artifacts/base.py +62 -19
  7. mlrun/artifacts/dataset.py +17 -17
  8. mlrun/artifacts/document.py +454 -0
  9. mlrun/artifacts/manager.py +28 -18
  10. mlrun/artifacts/model.py +91 -59
  11. mlrun/artifacts/plots.py +2 -2
  12. mlrun/common/constants.py +8 -0
  13. mlrun/common/formatters/__init__.py +1 -0
  14. mlrun/common/formatters/artifact.py +1 -1
  15. mlrun/common/formatters/feature_set.py +2 -0
  16. mlrun/common/formatters/function.py +1 -0
  17. mlrun/{model_monitoring/db/stores/v3io_kv/__init__.py → common/formatters/model_endpoint.py} +17 -0
  18. mlrun/common/formatters/pipeline.py +1 -2
  19. mlrun/common/formatters/project.py +9 -0
  20. mlrun/common/model_monitoring/__init__.py +0 -5
  21. mlrun/common/model_monitoring/helpers.py +12 -62
  22. mlrun/common/runtimes/constants.py +25 -4
  23. mlrun/common/schemas/__init__.py +9 -5
  24. mlrun/common/schemas/alert.py +114 -19
  25. mlrun/common/schemas/api_gateway.py +3 -3
  26. mlrun/common/schemas/artifact.py +22 -9
  27. mlrun/common/schemas/auth.py +8 -4
  28. mlrun/common/schemas/background_task.py +7 -7
  29. mlrun/common/schemas/client_spec.py +4 -4
  30. mlrun/common/schemas/clusterization_spec.py +2 -2
  31. mlrun/common/schemas/common.py +53 -3
  32. mlrun/common/schemas/constants.py +15 -0
  33. mlrun/common/schemas/datastore_profile.py +1 -1
  34. mlrun/common/schemas/feature_store.py +9 -9
  35. mlrun/common/schemas/frontend_spec.py +4 -4
  36. mlrun/common/schemas/function.py +10 -10
  37. mlrun/common/schemas/hub.py +1 -1
  38. mlrun/common/schemas/k8s.py +3 -3
  39. mlrun/common/schemas/memory_reports.py +3 -3
  40. mlrun/common/schemas/model_monitoring/__init__.py +4 -8
  41. mlrun/common/schemas/model_monitoring/constants.py +127 -46
  42. mlrun/common/schemas/model_monitoring/grafana.py +18 -12
  43. mlrun/common/schemas/model_monitoring/model_endpoints.py +154 -160
  44. mlrun/common/schemas/notification.py +24 -3
  45. mlrun/common/schemas/object.py +1 -1
  46. mlrun/common/schemas/pagination.py +4 -4
  47. mlrun/common/schemas/partition.py +142 -0
  48. mlrun/common/schemas/pipeline.py +3 -3
  49. mlrun/common/schemas/project.py +26 -18
  50. mlrun/common/schemas/runs.py +3 -3
  51. mlrun/common/schemas/runtime_resource.py +5 -5
  52. mlrun/common/schemas/schedule.py +1 -1
  53. mlrun/common/schemas/secret.py +1 -1
  54. mlrun/{model_monitoring/db/stores/sqldb/__init__.py → common/schemas/serving.py} +10 -1
  55. mlrun/common/schemas/tag.py +3 -3
  56. mlrun/common/schemas/workflow.py +6 -5
  57. mlrun/common/types.py +1 -0
  58. mlrun/config.py +157 -89
  59. mlrun/data_types/__init__.py +5 -3
  60. mlrun/data_types/infer.py +13 -3
  61. mlrun/data_types/spark.py +2 -1
  62. mlrun/datastore/__init__.py +59 -18
  63. mlrun/datastore/alibaba_oss.py +4 -1
  64. mlrun/datastore/azure_blob.py +4 -1
  65. mlrun/datastore/base.py +19 -24
  66. mlrun/datastore/datastore.py +10 -4
  67. mlrun/datastore/datastore_profile.py +178 -45
  68. mlrun/datastore/dbfs_store.py +4 -1
  69. mlrun/datastore/filestore.py +4 -1
  70. mlrun/datastore/google_cloud_storage.py +4 -1
  71. mlrun/datastore/hdfs.py +4 -1
  72. mlrun/datastore/inmem.py +4 -1
  73. mlrun/datastore/redis.py +4 -1
  74. mlrun/datastore/s3.py +14 -3
  75. mlrun/datastore/sources.py +89 -92
  76. mlrun/datastore/store_resources.py +7 -4
  77. mlrun/datastore/storeytargets.py +51 -16
  78. mlrun/datastore/targets.py +38 -31
  79. mlrun/datastore/utils.py +87 -4
  80. mlrun/datastore/v3io.py +4 -1
  81. mlrun/datastore/vectorstore.py +291 -0
  82. mlrun/datastore/wasbfs/fs.py +13 -12
  83. mlrun/db/base.py +286 -100
  84. mlrun/db/httpdb.py +1562 -490
  85. mlrun/db/nopdb.py +250 -83
  86. mlrun/errors.py +6 -2
  87. mlrun/execution.py +194 -50
  88. mlrun/feature_store/__init__.py +2 -10
  89. mlrun/feature_store/api.py +20 -458
  90. mlrun/feature_store/common.py +9 -9
  91. mlrun/feature_store/feature_set.py +20 -18
  92. mlrun/feature_store/feature_vector.py +105 -479
  93. mlrun/feature_store/feature_vector_utils.py +466 -0
  94. mlrun/feature_store/retrieval/base.py +15 -11
  95. mlrun/feature_store/retrieval/job.py +2 -1
  96. mlrun/feature_store/retrieval/storey_merger.py +1 -1
  97. mlrun/feature_store/steps.py +3 -3
  98. mlrun/features.py +30 -13
  99. mlrun/frameworks/__init__.py +1 -2
  100. mlrun/frameworks/_common/__init__.py +1 -2
  101. mlrun/frameworks/_common/artifacts_library.py +2 -2
  102. mlrun/frameworks/_common/mlrun_interface.py +10 -6
  103. mlrun/frameworks/_common/model_handler.py +31 -31
  104. mlrun/frameworks/_common/producer.py +3 -1
  105. mlrun/frameworks/_dl_common/__init__.py +1 -2
  106. mlrun/frameworks/_dl_common/loggers/__init__.py +1 -2
  107. mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +4 -4
  108. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +3 -3
  109. mlrun/frameworks/_ml_common/__init__.py +1 -2
  110. mlrun/frameworks/_ml_common/loggers/__init__.py +1 -2
  111. mlrun/frameworks/_ml_common/model_handler.py +21 -21
  112. mlrun/frameworks/_ml_common/plans/__init__.py +1 -2
  113. mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +3 -1
  114. mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
  115. mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
  116. mlrun/frameworks/auto_mlrun/__init__.py +1 -2
  117. mlrun/frameworks/auto_mlrun/auto_mlrun.py +22 -15
  118. mlrun/frameworks/huggingface/__init__.py +1 -2
  119. mlrun/frameworks/huggingface/model_server.py +9 -9
  120. mlrun/frameworks/lgbm/__init__.py +47 -44
  121. mlrun/frameworks/lgbm/callbacks/__init__.py +1 -2
  122. mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -2
  123. mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -2
  124. mlrun/frameworks/lgbm/mlrun_interfaces/__init__.py +1 -2
  125. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +5 -5
  126. mlrun/frameworks/lgbm/model_handler.py +15 -11
  127. mlrun/frameworks/lgbm/model_server.py +11 -7
  128. mlrun/frameworks/lgbm/utils.py +2 -2
  129. mlrun/frameworks/onnx/__init__.py +1 -2
  130. mlrun/frameworks/onnx/dataset.py +3 -3
  131. mlrun/frameworks/onnx/mlrun_interface.py +2 -2
  132. mlrun/frameworks/onnx/model_handler.py +7 -5
  133. mlrun/frameworks/onnx/model_server.py +8 -6
  134. mlrun/frameworks/parallel_coordinates.py +11 -11
  135. mlrun/frameworks/pytorch/__init__.py +22 -23
  136. mlrun/frameworks/pytorch/callbacks/__init__.py +1 -2
  137. mlrun/frameworks/pytorch/callbacks/callback.py +2 -1
  138. mlrun/frameworks/pytorch/callbacks/logging_callback.py +15 -8
  139. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +19 -12
  140. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +22 -15
  141. mlrun/frameworks/pytorch/callbacks_handler.py +36 -30
  142. mlrun/frameworks/pytorch/mlrun_interface.py +17 -17
  143. mlrun/frameworks/pytorch/model_handler.py +21 -17
  144. mlrun/frameworks/pytorch/model_server.py +13 -9
  145. mlrun/frameworks/sklearn/__init__.py +19 -18
  146. mlrun/frameworks/sklearn/estimator.py +2 -2
  147. mlrun/frameworks/sklearn/metric.py +3 -3
  148. mlrun/frameworks/sklearn/metrics_library.py +8 -6
  149. mlrun/frameworks/sklearn/mlrun_interface.py +3 -2
  150. mlrun/frameworks/sklearn/model_handler.py +4 -3
  151. mlrun/frameworks/tf_keras/__init__.py +11 -12
  152. mlrun/frameworks/tf_keras/callbacks/__init__.py +1 -2
  153. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +17 -14
  154. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +15 -12
  155. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +21 -18
  156. mlrun/frameworks/tf_keras/model_handler.py +17 -13
  157. mlrun/frameworks/tf_keras/model_server.py +12 -8
  158. mlrun/frameworks/xgboost/__init__.py +19 -18
  159. mlrun/frameworks/xgboost/model_handler.py +13 -9
  160. mlrun/k8s_utils.py +2 -5
  161. mlrun/launcher/base.py +3 -4
  162. mlrun/launcher/client.py +2 -2
  163. mlrun/launcher/local.py +6 -2
  164. mlrun/launcher/remote.py +1 -1
  165. mlrun/lists.py +8 -4
  166. mlrun/model.py +132 -46
  167. mlrun/model_monitoring/__init__.py +3 -5
  168. mlrun/model_monitoring/api.py +113 -98
  169. mlrun/model_monitoring/applications/__init__.py +0 -5
  170. mlrun/model_monitoring/applications/_application_steps.py +81 -50
  171. mlrun/model_monitoring/applications/base.py +467 -14
  172. mlrun/model_monitoring/applications/context.py +212 -134
  173. mlrun/model_monitoring/{db/stores/base → applications/evidently}/__init__.py +6 -2
  174. mlrun/model_monitoring/applications/evidently/base.py +146 -0
  175. mlrun/model_monitoring/applications/histogram_data_drift.py +89 -56
  176. mlrun/model_monitoring/applications/results.py +67 -15
  177. mlrun/model_monitoring/controller.py +701 -315
  178. mlrun/model_monitoring/db/__init__.py +0 -2
  179. mlrun/model_monitoring/db/_schedules.py +242 -0
  180. mlrun/model_monitoring/db/_stats.py +189 -0
  181. mlrun/model_monitoring/db/tsdb/__init__.py +33 -22
  182. mlrun/model_monitoring/db/tsdb/base.py +243 -49
  183. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +76 -36
  184. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +33 -0
  185. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py +213 -0
  186. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +534 -88
  187. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +1 -0
  188. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +436 -106
  189. mlrun/model_monitoring/helpers.py +356 -114
  190. mlrun/model_monitoring/stream_processing.py +190 -345
  191. mlrun/model_monitoring/tracking_policy.py +11 -4
  192. mlrun/model_monitoring/writer.py +49 -90
  193. mlrun/package/__init__.py +3 -6
  194. mlrun/package/context_handler.py +2 -2
  195. mlrun/package/packager.py +12 -9
  196. mlrun/package/packagers/__init__.py +0 -2
  197. mlrun/package/packagers/default_packager.py +14 -11
  198. mlrun/package/packagers/numpy_packagers.py +16 -7
  199. mlrun/package/packagers/pandas_packagers.py +18 -18
  200. mlrun/package/packagers/python_standard_library_packagers.py +25 -11
  201. mlrun/package/packagers_manager.py +35 -32
  202. mlrun/package/utils/__init__.py +0 -3
  203. mlrun/package/utils/_pickler.py +6 -6
  204. mlrun/platforms/__init__.py +47 -16
  205. mlrun/platforms/iguazio.py +4 -1
  206. mlrun/projects/operations.py +30 -30
  207. mlrun/projects/pipelines.py +116 -47
  208. mlrun/projects/project.py +1292 -329
  209. mlrun/render.py +5 -9
  210. mlrun/run.py +57 -14
  211. mlrun/runtimes/__init__.py +1 -3
  212. mlrun/runtimes/base.py +30 -22
  213. mlrun/runtimes/daskjob.py +9 -9
  214. mlrun/runtimes/databricks_job/databricks_runtime.py +6 -5
  215. mlrun/runtimes/function_reference.py +5 -2
  216. mlrun/runtimes/generators.py +3 -2
  217. mlrun/runtimes/kubejob.py +6 -7
  218. mlrun/runtimes/mounts.py +574 -0
  219. mlrun/runtimes/mpijob/__init__.py +0 -2
  220. mlrun/runtimes/mpijob/abstract.py +7 -6
  221. mlrun/runtimes/nuclio/api_gateway.py +7 -7
  222. mlrun/runtimes/nuclio/application/application.py +11 -13
  223. mlrun/runtimes/nuclio/application/reverse_proxy.go +66 -64
  224. mlrun/runtimes/nuclio/function.py +127 -70
  225. mlrun/runtimes/nuclio/serving.py +105 -37
  226. mlrun/runtimes/pod.py +159 -54
  227. mlrun/runtimes/remotesparkjob.py +3 -2
  228. mlrun/runtimes/sparkjob/__init__.py +0 -2
  229. mlrun/runtimes/sparkjob/spark3job.py +22 -12
  230. mlrun/runtimes/utils.py +7 -6
  231. mlrun/secrets.py +2 -2
  232. mlrun/serving/__init__.py +8 -0
  233. mlrun/serving/merger.py +7 -5
  234. mlrun/serving/remote.py +35 -22
  235. mlrun/serving/routers.py +186 -240
  236. mlrun/serving/server.py +41 -10
  237. mlrun/serving/states.py +432 -118
  238. mlrun/serving/utils.py +13 -2
  239. mlrun/serving/v1_serving.py +3 -2
  240. mlrun/serving/v2_serving.py +161 -203
  241. mlrun/track/__init__.py +1 -1
  242. mlrun/track/tracker.py +2 -2
  243. mlrun/track/trackers/mlflow_tracker.py +6 -5
  244. mlrun/utils/async_http.py +35 -22
  245. mlrun/utils/clones.py +7 -4
  246. mlrun/utils/helpers.py +511 -58
  247. mlrun/utils/logger.py +119 -13
  248. mlrun/utils/notifications/notification/__init__.py +22 -19
  249. mlrun/utils/notifications/notification/base.py +39 -15
  250. mlrun/utils/notifications/notification/console.py +6 -6
  251. mlrun/utils/notifications/notification/git.py +11 -11
  252. mlrun/utils/notifications/notification/ipython.py +10 -9
  253. mlrun/utils/notifications/notification/mail.py +176 -0
  254. mlrun/utils/notifications/notification/slack.py +16 -8
  255. mlrun/utils/notifications/notification/webhook.py +24 -8
  256. mlrun/utils/notifications/notification_pusher.py +191 -200
  257. mlrun/utils/regex.py +12 -2
  258. mlrun/utils/version/version.json +2 -2
  259. {mlrun-1.7.2rc4.dist-info → mlrun-1.8.0.dist-info}/METADATA +69 -54
  260. mlrun-1.8.0.dist-info/RECORD +351 -0
  261. {mlrun-1.7.2rc4.dist-info → mlrun-1.8.0.dist-info}/WHEEL +1 -1
  262. mlrun/model_monitoring/applications/evidently_base.py +0 -137
  263. mlrun/model_monitoring/db/stores/__init__.py +0 -136
  264. mlrun/model_monitoring/db/stores/base/store.py +0 -213
  265. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +0 -71
  266. mlrun/model_monitoring/db/stores/sqldb/models/base.py +0 -190
  267. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +0 -103
  268. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +0 -40
  269. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +0 -659
  270. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +0 -726
  271. mlrun/model_monitoring/model_endpoint.py +0 -118
  272. mlrun-1.7.2rc4.dist-info/RECORD +0 -351
  273. {mlrun-1.7.2rc4.dist-info → mlrun-1.8.0.dist-info}/entry_points.txt +0 -0
  274. {mlrun-1.7.2rc4.dist-info → mlrun-1.8.0.dist-info/licenses}/LICENSE +0 -0
  275. {mlrun-1.7.2rc4.dist-info → mlrun-1.8.0.dist-info}/top_level.txt +0 -0
@@ -23,33 +23,43 @@ import pandas as pd
23
23
  import mlrun.artifacts
24
24
  import mlrun.common.helpers
25
25
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
26
+ import mlrun.datastore.base
26
27
  import mlrun.feature_store
27
28
  import mlrun.model_monitoring.applications as mm_app
28
29
  import mlrun.serving
30
+ from mlrun.common.schemas import ModelEndpoint
31
+ from mlrun.common.schemas.model_monitoring import (
32
+ FunctionURI,
33
+ )
29
34
  from mlrun.data_types.infer import InferOptions, get_df_stats
30
35
  from mlrun.utils import datetime_now, logger
31
36
 
32
37
  from .helpers import update_model_endpoint_last_request
33
- from .model_endpoint import ModelEndpoint
34
38
 
35
39
  # A union of all supported dataset types:
36
40
  DatasetType = typing.Union[
37
- mlrun.DataItem, list, dict, pd.DataFrame, pd.Series, np.ndarray, typing.Any
41
+ mlrun.datastore.base.DataItem,
42
+ list,
43
+ dict,
44
+ pd.DataFrame,
45
+ pd.Series,
46
+ np.ndarray,
47
+ typing.Any,
38
48
  ]
39
49
 
40
50
 
41
51
  def get_or_create_model_endpoint(
42
52
  project: str,
53
+ model_endpoint_name: str,
43
54
  model_path: str = "",
44
- model_endpoint_name: str = "",
45
55
  endpoint_id: str = "",
46
56
  function_name: str = "",
47
- context: mlrun.MLClientCtx = None,
48
- sample_set_statistics: dict[str, typing.Any] = None,
49
- drift_threshold: typing.Optional[float] = None,
50
- possible_drift_threshold: typing.Optional[float] = None,
51
- monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
57
+ function_tag: str = "latest",
58
+ context: typing.Optional["mlrun.MLClientCtx"] = None,
59
+ sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
60
+ monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
52
61
  db_session=None,
62
+ feature_analysis: bool = False,
53
63
  ) -> ModelEndpoint:
54
64
  """
55
65
  Get a single model endpoint object. If not exist, generate a new model endpoint with the provided parameters. Note
@@ -57,40 +67,41 @@ def get_or_create_model_endpoint(
57
67
  features, set `monitoring_mode=enabled`.
58
68
 
59
69
  :param project: Project name.
60
- :param model_path: The model store path (applicable only to new endpoint_id).
61
70
  :param model_endpoint_name: If a new model endpoint is created, the model endpoint name will be presented
62
71
  under this endpoint (applicable only to new endpoint_id).
72
+ :param model_path: The model store path (applicable only to new endpoint_id).
63
73
  :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record based
64
74
  on the provided `endpoint_id`.
65
- :param function_name: If a new model endpoint is created, use this function name for generating the
66
- function URI (applicable only to new endpoint_id).
75
+ :param function_name: If a new model endpoint is created, use this function name.
76
+ :param function_tag: If a new model endpoint is created, use this function tag.
67
77
  :param context: MLRun context. If `function_name` not provided, use the context to generate the
68
78
  full function hash.
69
79
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
70
80
  the new model endpoint (applicable only to new endpoint_id).
71
- :param drift_threshold: (deprecated) The threshold of which to mark drifts (applicable only to new
72
- endpoint_id).
73
- :param possible_drift_threshold: (deprecated) The threshold of which to mark possible drifts (applicable only to new
74
- endpoint_id).
75
81
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id
76
82
  (applicable only to new endpoint_id).
77
83
  :param db_session: A runtime session that manages the current dialog with the database.
84
+ :param feature_analysis: If True, the model endpoint will be retrieved with the feature analysis mode.
78
85
 
79
86
  :return: A ModelEndpoint object
80
87
  """
81
88
 
82
- if not endpoint_id:
83
- # Generate a new model endpoint id based on the project name and model name
84
- endpoint_id = hashlib.sha1(
85
- f"{project}_{model_endpoint_name}".encode()
86
- ).hexdigest()
87
-
88
89
  if not db_session:
89
90
  # Generate a runtime database
90
91
  db_session = mlrun.get_run_db()
92
+ model_endpoint = None
93
+ if not function_name and context:
94
+ function_name = FunctionURI.from_string(
95
+ context.to_dict()["spec"]["function"]
96
+ ).function
91
97
  try:
92
98
  model_endpoint = db_session.get_model_endpoint(
93
- project=project, endpoint_id=endpoint_id
99
+ project=project,
100
+ name=model_endpoint_name,
101
+ endpoint_id=endpoint_id,
102
+ function_name=function_name,
103
+ function_tag=function_tag or "latest",
104
+ feature_analysis=feature_analysis,
94
105
  )
95
106
  # If other fields provided, validate that they are correspond to the existing model endpoint data
96
107
  _model_endpoint_validations(
@@ -99,17 +110,17 @@ def get_or_create_model_endpoint(
99
110
  sample_set_statistics=sample_set_statistics,
100
111
  )
101
112
 
102
- except mlrun.errors.MLRunNotFoundError:
113
+ except (mlrun.errors.MLRunNotFoundError, mlrun.errors.MLRunInvalidArgumentError):
103
114
  # Create a new model endpoint with the provided details
115
+ pass
116
+ if not model_endpoint:
104
117
  model_endpoint = _generate_model_endpoint(
105
118
  project=project,
106
119
  db_session=db_session,
107
- endpoint_id=endpoint_id,
108
120
  model_path=model_path,
109
121
  model_endpoint_name=model_endpoint_name,
110
122
  function_name=function_name,
111
- context=context,
112
- sample_set_statistics=sample_set_statistics,
123
+ function_tag=function_tag,
113
124
  monitoring_mode=monitoring_mode,
114
125
  )
115
126
  return model_endpoint
@@ -121,7 +132,7 @@ def record_results(
121
132
  model_endpoint_name: str,
122
133
  endpoint_id: str = "",
123
134
  function_name: str = "",
124
- context: typing.Optional[mlrun.MLClientCtx] = None,
135
+ context: typing.Optional["mlrun.MLClientCtx"] = None,
125
136
  infer_results_df: typing.Optional[pd.DataFrame] = None,
126
137
  sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
127
138
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
@@ -149,7 +160,8 @@ def record_results(
149
160
  :param context: MLRun context. Note that the context is required generating the model endpoint.
150
161
  :param infer_results_df: DataFrame that will be stored under the model endpoint parquet target. Will be
151
162
  used for doing the drift analysis. Please make sure that the dataframe includes
152
- both feature names and label columns.
163
+ both feature names and label columns. If you are recording results for existing
164
+ model endpoint, the endpoint should be a batch endpoint.
153
165
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
154
166
  the current model endpoint.
155
167
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id. Enabled
@@ -169,7 +181,7 @@ def record_results(
169
181
  if drift_threshold is not None or possible_drift_threshold is not None:
170
182
  warnings.warn(
171
183
  "Custom drift threshold arguments are deprecated since version "
172
- "1.7.0 and have no effect. They will be removed in version 1.9.0.\n"
184
+ "1.7.0 and have no effect. They will be removed in version 1.10.0.\n"
173
185
  "To enable the default histogram data drift application, run:\n"
174
186
  "`project.enable_model_monitoring()`.",
175
187
  FutureWarning,
@@ -177,7 +189,7 @@ def record_results(
177
189
  if trigger_monitoring_job is not False:
178
190
  warnings.warn(
179
191
  "`trigger_monitoring_job` argument is deprecated since version "
180
- "1.7.0 and has no effect. It will be removed in version 1.9.0.\n"
192
+ "1.7.0 and has no effect. It will be removed in version 1.10.0.\n"
181
193
  "To enable the default histogram data drift application, run:\n"
182
194
  "`project.enable_model_monitoring()`.",
183
195
  FutureWarning,
@@ -185,13 +197,13 @@ def record_results(
185
197
  if artifacts_tag != "":
186
198
  warnings.warn(
187
199
  "`artifacts_tag` argument is deprecated since version "
188
- "1.7.0 and has no effect. It will be removed in version 1.9.0.",
200
+ "1.7.0 and has no effect. It will be removed in version 1.10.0.",
189
201
  FutureWarning,
190
202
  )
191
203
  if default_batch_image != "mlrun/mlrun":
192
204
  warnings.warn(
193
205
  "`default_batch_image` argument is deprecated since version "
194
- "1.7.0 and has no effect. It will be removed in version 1.9.0.",
206
+ "1.7.0 and has no effect. It will be removed in version 1.10.0.",
195
207
  FutureWarning,
196
208
  )
197
209
 
@@ -208,25 +220,34 @@ def record_results(
208
220
  monitoring_mode=monitoring_mode,
209
221
  db_session=db,
210
222
  )
211
- logger.debug("Model endpoint", endpoint=model_endpoint.to_dict())
223
+ logger.debug("Model endpoint", endpoint=model_endpoint)
212
224
 
213
- timestamp = datetime_now()
214
225
  if infer_results_df is not None:
215
- # Write the monitoring parquet to the relevant model endpoint context
216
- write_monitoring_df(
217
- feature_set_uri=model_endpoint.status.monitoring_feature_set_uri,
218
- infer_datetime=timestamp,
219
- endpoint_id=model_endpoint.metadata.uid,
220
- infer_results_df=infer_results_df,
221
- )
226
+ if (
227
+ model_endpoint.metadata.endpoint_type
228
+ != mlrun.common.schemas.model_monitoring.EndpointType.BATCH_EP
229
+ ):
230
+ logger.warning(
231
+ "Inference results can be recorded only for batch endpoints. "
232
+ "Therefore the current results won't be monitored."
233
+ )
234
+ else:
235
+ timestamp = datetime_now()
236
+ # Write the monitoring parquet to the relevant model endpoint context
237
+ write_monitoring_df(
238
+ feature_set_uri=model_endpoint.spec.monitoring_feature_set_uri,
239
+ infer_datetime=timestamp,
240
+ endpoint_id=model_endpoint.metadata.uid,
241
+ infer_results_df=infer_results_df,
242
+ )
222
243
 
223
- # Update the last request time
224
- update_model_endpoint_last_request(
225
- project=project,
226
- model_endpoint=model_endpoint,
227
- current_request=timestamp,
228
- db=db,
229
- )
244
+ # Update the last request time
245
+ update_model_endpoint_last_request(
246
+ project=project,
247
+ model_endpoint=model_endpoint,
248
+ current_request=timestamp,
249
+ db=db,
250
+ )
230
251
 
231
252
  return model_endpoint
232
253
 
@@ -234,7 +255,7 @@ def record_results(
234
255
  def _model_endpoint_validations(
235
256
  model_endpoint: ModelEndpoint,
236
257
  model_path: str = "",
237
- sample_set_statistics: dict[str, typing.Any] = None,
258
+ sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
238
259
  ) -> None:
239
260
  """
240
261
  Validate that provided model endpoint configurations match the stored fields of the provided `ModelEndpoint`
@@ -278,7 +299,7 @@ def _model_endpoint_validations(
278
299
  # Feature stats
279
300
  if (
280
301
  sample_set_statistics
281
- and sample_set_statistics != model_endpoint.status.feature_stats
302
+ and sample_set_statistics != model_endpoint.spec.feature_stats
282
303
  ):
283
304
  logger.warning(
284
305
  "Provided sample set statistics is different from the registered statistics. "
@@ -290,7 +311,7 @@ def write_monitoring_df(
290
311
  endpoint_id: str,
291
312
  infer_results_df: pd.DataFrame,
292
313
  infer_datetime: datetime,
293
- monitoring_feature_set: typing.Optional[mlrun.feature_store.FeatureSet] = None,
314
+ monitoring_feature_set: typing.Optional["mlrun.feature_store.FeatureSet"] = None,
294
315
  feature_set_uri: str = "",
295
316
  ) -> None:
296
317
  """Write infer results dataframe to the monitoring parquet target of the current model endpoint. The dataframe will
@@ -330,13 +351,11 @@ def write_monitoring_df(
330
351
  def _generate_model_endpoint(
331
352
  project: str,
332
353
  db_session,
333
- endpoint_id: str,
334
354
  model_path: str,
335
355
  model_endpoint_name: str,
336
356
  function_name: str,
337
- context: mlrun.MLClientCtx,
338
- sample_set_statistics: dict[str, typing.Any],
339
- monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
357
+ function_tag: str,
358
+ monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
340
359
  ) -> ModelEndpoint:
341
360
  """
342
361
  Write a new model endpoint record.
@@ -344,50 +363,40 @@ def _generate_model_endpoint(
344
363
  :param project: Project name.
345
364
 
346
365
  :param db_session: A session that manages the current dialog with the database.
347
- :param endpoint_id: Model endpoint unique ID.
348
366
  :param model_path: The model Store path.
349
367
  :param model_endpoint_name: Model endpoint name will be presented under the new model endpoint.
350
- :param function_name: If a new model endpoint is created, use this function name for generating the
351
- function URI.
352
- :param context: MLRun context. If function_name not provided, use the context to generate the
353
- full function hash.
354
- :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
355
- the current model endpoint. Will be stored under
356
- `model_endpoint.status.feature_stats`.
368
+ :param function_name: If a new model endpoint is created, use this function name.
369
+ :param function_tag: If a new model endpoint is created, use this function tag.
370
+ :param monitoring_mode: Monitoring mode of the new model endpoint.
357
371
 
358
- :return `mlrun.model_monitoring.model_endpoint.ModelEndpoint` object.
372
+ :return `mlrun.common.schemas.ModelEndpoint` object.
359
373
  """
360
- model_endpoint = ModelEndpoint()
361
- model_endpoint.metadata.project = project
362
- model_endpoint.metadata.uid = endpoint_id
363
- if function_name:
364
- model_endpoint.spec.function_uri = project + "/" + function_name
365
- elif not context:
366
- raise mlrun.errors.MLRunInvalidArgumentError(
367
- "Please provide either a function name or a valid MLRun context"
368
- )
369
- else:
370
- model_endpoint.spec.function_uri = context.to_dict()["spec"]["function"]
371
- model_endpoint.spec.model_uri = model_path
372
- model_endpoint.spec.model = model_endpoint_name
373
- model_endpoint.spec.model_class = "drift-analysis"
374
- model_endpoint.spec.monitoring_mode = monitoring_mode
375
- model_endpoint.status.first_request = model_endpoint.status.last_request = (
376
- datetime_now().isoformat()
377
- )
378
- if sample_set_statistics:
379
- model_endpoint.status.feature_stats = sample_set_statistics
380
-
381
- db_session.create_model_endpoint(
382
- project=project, endpoint_id=endpoint_id, model_endpoint=model_endpoint
374
+ current_time = datetime_now()
375
+ model_endpoint = mlrun.common.schemas.ModelEndpoint(
376
+ metadata=mlrun.common.schemas.ModelEndpointMetadata(
377
+ project=project,
378
+ name=model_endpoint_name,
379
+ endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.BATCH_EP,
380
+ ),
381
+ spec=mlrun.common.schemas.ModelEndpointSpec(
382
+ function_name=function_name or "function",
383
+ function_tag=function_tag or "latest",
384
+ model_path=model_path,
385
+ model_class="drift-analysis",
386
+ ),
387
+ status=mlrun.common.schemas.ModelEndpointStatus(
388
+ monitoring_mode=monitoring_mode,
389
+ first_request=current_time,
390
+ last_request=current_time,
391
+ ),
383
392
  )
384
393
 
385
- return db_session.get_model_endpoint(project=project, endpoint_id=endpoint_id)
394
+ return db_session.create_model_endpoint(model_endpoint=model_endpoint)
386
395
 
387
396
 
388
397
  def get_sample_set_statistics(
389
398
  sample_set: DatasetType = None,
390
- model_artifact_feature_stats: dict = None,
399
+ model_artifact_feature_stats: typing.Optional[dict] = None,
391
400
  sample_set_columns: typing.Optional[list] = None,
392
401
  sample_set_drop_columns: typing.Optional[list] = None,
393
402
  sample_set_label_columns: typing.Optional[list] = None,
@@ -445,9 +454,9 @@ def get_sample_set_statistics(
445
454
 
446
455
  def read_dataset_as_dataframe(
447
456
  dataset: DatasetType,
448
- feature_columns: typing.Union[str, list[str]] = None,
449
- label_columns: typing.Union[str, list[str]] = None,
450
- drop_columns: typing.Union[str, list[str], int, list[int]] = None,
457
+ feature_columns: typing.Optional[typing.Union[str, list[str]]] = None,
458
+ label_columns: typing.Optional[typing.Union[str, list[str]]] = None,
459
+ drop_columns: typing.Optional[typing.Union[str, list[str], int, list[int]]] = None,
451
460
  ) -> tuple[pd.DataFrame, list[str]]:
452
461
  """
453
462
  Parse the given dataset into a DataFrame and drop the columns accordingly. In addition, the label columns will be
@@ -531,7 +540,7 @@ def read_dataset_as_dataframe(
531
540
 
532
541
 
533
542
  def log_result(
534
- context: mlrun.MLClientCtx,
543
+ context: "mlrun.MLClientCtx",
535
544
  result_set_name: str,
536
545
  result_set: pd.DataFrame,
537
546
  artifacts_tag: str,
@@ -559,9 +568,7 @@ def _create_model_monitoring_function_base(
559
568
  project: str,
560
569
  func: typing.Union[str, None] = None,
561
570
  application_class: typing.Union[
562
- str,
563
- mm_app.ModelMonitoringApplicationBase,
564
- None,
571
+ str, "mm_app.ModelMonitoringApplicationBase", None
565
572
  ] = None,
566
573
  name: typing.Optional[str] = None,
567
574
  image: typing.Optional[str] = None,
@@ -608,8 +615,8 @@ def _create_model_monitoring_function_base(
608
615
  app_step.__class__ = mlrun.serving.MonitoringApplicationStep
609
616
 
610
617
  app_step.error_handler(
611
- name="ApplicationErrorHandler",
612
618
  class_name="mlrun.model_monitoring.applications._application_steps._ApplicationErrorHandler",
619
+ name="ApplicationErrorHandler",
613
620
  full_event=True,
614
621
  project=project,
615
622
  )
@@ -618,6 +625,14 @@ def _create_model_monitoring_function_base(
618
625
  class_name="mlrun.model_monitoring.applications._application_steps._PushToMonitoringWriter",
619
626
  name="PushToMonitoringWriter",
620
627
  project=project,
621
- writer_application_name=mm_constants.MonitoringFunctionNames.WRITER,
622
628
  )
629
+
630
+ def block_to_mock_server(*args, **kwargs) -> typing.NoReturn:
631
+ raise NotImplementedError(
632
+ "Model monitoring serving functions do not support `.to_mock_server`. "
633
+ "You may call your model monitoring application object logic via the `.evaluate` method."
634
+ )
635
+
636
+ func_obj.to_mock_server = block_to_mock_server # Until ML-7643 is implemented
637
+
623
638
  return func_obj
@@ -15,9 +15,4 @@
15
15
 
16
16
  from .base import ModelMonitoringApplicationBase
17
17
  from .context import MonitoringApplicationContext
18
- from .evidently_base import (
19
- _HAS_EVIDENTLY,
20
- SUPPORTED_EVIDENTLY_VERSION,
21
- EvidentlyModelMonitoringApplicationBase,
22
- )
23
18
  from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult
@@ -12,56 +12,49 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import collections
15
16
  import json
16
17
  import traceback
18
+ from collections import OrderedDict
19
+ from datetime import datetime
17
20
  from typing import Any, Optional, Union
18
21
 
22
+ import mlrun.common.schemas
19
23
  import mlrun.common.schemas.alert as alert_objects
20
- import mlrun.common.schemas.model_monitoring.constants as mm_constant
21
- import mlrun.datastore
22
- import mlrun.model_monitoring
23
- from mlrun.model_monitoring.helpers import get_stream_path
24
+ import mlrun.common.schemas.model_monitoring.constants as mm_constants
25
+ import mlrun.model_monitoring.helpers
24
26
  from mlrun.serving import GraphContext
25
27
  from mlrun.serving.utils import StepToDict
26
28
  from mlrun.utils import logger
27
29
 
28
30
  from .context import MonitoringApplicationContext
29
- from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult
31
+ from .results import (
32
+ ModelMonitoringApplicationMetric,
33
+ ModelMonitoringApplicationResult,
34
+ _ModelMonitoringApplicationStats,
35
+ )
30
36
 
31
37
 
32
38
  class _PushToMonitoringWriter(StepToDict):
33
39
  kind = "monitoring_application_stream_pusher"
34
40
 
35
- def __init__(
36
- self,
37
- project: str,
38
- writer_application_name: str,
39
- stream_uri: Optional[str] = None,
40
- name: Optional[str] = None,
41
- ):
41
+ def __init__(self, project: str) -> None:
42
42
  """
43
43
  Class for pushing application results to the monitoring writer stream.
44
44
 
45
- :param project: Project name.
46
- :param writer_application_name: Writer application name.
47
- :param stream_uri: Stream URI for pushing results.
48
- :param name: Name of the PushToMonitoringWriter
49
- instance default to PushToMonitoringWriter.
45
+ :param project: Project name.
50
46
  """
51
47
  self.project = project
52
- self.application_name_to_push = writer_application_name
53
- self.stream_uri = stream_uri or get_stream_path(
54
- project=self.project, function_name=self.application_name_to_push
55
- )
56
48
  self.output_stream = None
57
- self.name = name or "PushToMonitoringWriter"
58
49
 
59
50
  def do(
60
51
  self,
61
52
  event: tuple[
62
53
  list[
63
54
  Union[
64
- ModelMonitoringApplicationResult, ModelMonitoringApplicationMetric
55
+ ModelMonitoringApplicationResult,
56
+ ModelMonitoringApplicationMetric,
57
+ _ModelMonitoringApplicationStats,
65
58
  ]
66
59
  ],
67
60
  MonitoringApplicationContext,
@@ -75,50 +68,50 @@ class _PushToMonitoringWriter(StepToDict):
75
68
  self._lazy_init()
76
69
  application_results, application_context = event
77
70
  writer_event = {
78
- mm_constant.WriterEvent.APPLICATION_NAME: application_context.application_name,
79
- mm_constant.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
80
- mm_constant.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
71
+ mm_constants.WriterEvent.ENDPOINT_NAME: application_context.endpoint_name,
72
+ mm_constants.WriterEvent.APPLICATION_NAME: application_context.application_name,
73
+ mm_constants.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
74
+ mm_constants.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
81
75
  sep=" ", timespec="microseconds"
82
76
  ),
83
- mm_constant.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
77
+ mm_constants.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
84
78
  sep=" ", timespec="microseconds"
85
79
  ),
86
80
  }
87
81
  for result in application_results:
88
82
  data = result.to_dict()
89
83
  if isinstance(result, ModelMonitoringApplicationResult):
90
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
91
- mm_constant.WriterEventKind.RESULT
84
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
85
+ mm_constants.WriterEventKind.RESULT
92
86
  )
93
- data[mm_constant.ResultData.CURRENT_STATS] = json.dumps(
94
- application_context.sample_df_stats
87
+ elif isinstance(result, _ModelMonitoringApplicationStats):
88
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
89
+ mm_constants.WriterEventKind.STATS
95
90
  )
96
- writer_event[mm_constant.WriterEvent.DATA] = json.dumps(data)
97
91
  else:
98
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
99
- mm_constant.WriterEventKind.METRIC
92
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
93
+ mm_constants.WriterEventKind.METRIC
100
94
  )
101
- writer_event[mm_constant.WriterEvent.DATA] = json.dumps(data)
102
-
103
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
104
- mm_constant.WriterEventKind.RESULT
105
- if isinstance(result, ModelMonitoringApplicationResult)
106
- else mm_constant.WriterEventKind.METRIC
95
+ writer_event[mm_constants.WriterEvent.DATA] = json.dumps(data)
96
+ logger.debug(
97
+ "Pushing data to output stream", writer_event=str(writer_event)
107
98
  )
108
- logger.info(
109
- f"Pushing data = {writer_event} \n to stream = {self.stream_uri}"
99
+ self.output_stream.push(
100
+ [writer_event], partition_key=application_context.endpoint_id
110
101
  )
111
- self.output_stream.push([writer_event])
112
- logger.info(f"Pushed data to {self.stream_uri} successfully")
102
+ logger.debug("Pushed data to output stream successfully")
113
103
 
114
104
  def _lazy_init(self):
115
105
  if self.output_stream is None:
116
- self.output_stream = mlrun.datastore.get_stream_pusher(
117
- self.stream_uri,
106
+ self.output_stream = mlrun.model_monitoring.helpers.get_output_stream(
107
+ project=self.project,
108
+ function_name=mm_constants.MonitoringFunctionNames.WRITER,
118
109
  )
119
110
 
120
111
 
121
112
  class _PrepareMonitoringEvent(StepToDict):
113
+ MAX_MODEL_ENDPOINTS: int = 1500
114
+
122
115
  def __init__(self, context: GraphContext, application_name: str) -> None:
123
116
  """
124
117
  Class for preparing the application event for the application step.
@@ -126,8 +119,12 @@ class _PrepareMonitoringEvent(StepToDict):
126
119
  :param application_name: Application name.
127
120
  """
128
121
  self.graph_context = context
122
+ _ = self.graph_context.project_obj # Ensure project exists
129
123
  self.application_name = application_name
130
- self.model_endpoints: dict[str, mlrun.model_monitoring.ModelEndpoint] = {}
124
+ self.model_endpoints: OrderedDict[str, mlrun.common.schemas.ModelEndpoint] = (
125
+ collections.OrderedDict()
126
+ )
127
+ self.feature_sets: dict[str, mlrun.common.schemas.FeatureSet] = {}
131
128
 
132
129
  def do(self, event: dict[str, Any]) -> MonitoringApplicationContext:
133
130
  """
@@ -136,16 +133,48 @@ class _PrepareMonitoringEvent(StepToDict):
136
133
  :param event: Application event.
137
134
  :return: Application context.
138
135
  """
139
- application_context = MonitoringApplicationContext(
140
- graph_context=self.graph_context,
136
+ endpoint_id = event.get(mm_constants.ApplicationEvent.ENDPOINT_ID)
137
+ endpoint_updated = datetime.fromisoformat(
138
+ event.get(mm_constants.ApplicationEvent.ENDPOINT_UPDATED)
139
+ )
140
+ if (
141
+ endpoint_id in self.model_endpoints
142
+ and endpoint_updated != self.model_endpoints[endpoint_id].metadata.updated
143
+ ):
144
+ logger.debug(
145
+ "Updated endpoint removing endpoint from cash",
146
+ new_updated=endpoint_updated.isoformat(),
147
+ old_updated=self.model_endpoints[
148
+ endpoint_id
149
+ ].metadata.updated.isoformat(),
150
+ )
151
+ self.model_endpoints.pop(endpoint_id)
152
+
153
+ application_context = MonitoringApplicationContext._from_graph_ctx(
141
154
  application_name=self.application_name,
142
155
  event=event,
143
156
  model_endpoint_dict=self.model_endpoints,
157
+ graph_context=self.graph_context,
158
+ feature_sets_dict=self.feature_sets,
144
159
  )
145
160
 
146
161
  self.model_endpoints.setdefault(
147
162
  application_context.endpoint_id, application_context.model_endpoint
148
163
  )
164
+ self.feature_sets.setdefault(
165
+ application_context.endpoint_id, application_context.feature_set
166
+ )
167
+ # every used endpoint goes to first location allowing to pop last used:
168
+ self.model_endpoints.move_to_end(application_context.endpoint_id, last=False)
169
+ if len(self.model_endpoints) > self.MAX_MODEL_ENDPOINTS:
170
+ removed_endpoint_id, _ = self.model_endpoints.popitem(
171
+ last=True
172
+ ) # Removing the LRU endpoint
173
+ self.feature_sets.pop(removed_endpoint_id, None)
174
+ logger.debug(
175
+ "Exceeded maximum number of model endpoints removing the LRU from cash",
176
+ endpoint_id=removed_endpoint_id,
177
+ )
149
178
 
150
179
  return application_context
151
180
 
@@ -166,7 +195,9 @@ class _ApplicationErrorHandler(StepToDict):
166
195
  "Endpoint ID": event.body.endpoint_id,
167
196
  "Application Class": event.body.application_name,
168
197
  "Error": "".join(
169
- traceback.format_exception(None, event.error, event.error.__traceback__)
198
+ traceback.format_exception(
199
+ None, value=event.error, tb=event.error.__traceback__
200
+ )
170
201
  ),
171
202
  "Timestamp": event.timestamp,
172
203
  }