mlrun 1.10.0rc18__py3-none-any.whl → 1.11.0rc16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__init__.py +24 -3
- mlrun/__main__.py +0 -4
- mlrun/artifacts/dataset.py +2 -2
- mlrun/artifacts/document.py +6 -1
- mlrun/artifacts/llm_prompt.py +21 -15
- mlrun/artifacts/model.py +3 -3
- mlrun/artifacts/plots.py +1 -1
- mlrun/{model_monitoring/db/tsdb/tdengine → auth}/__init__.py +2 -3
- mlrun/auth/nuclio.py +89 -0
- mlrun/auth/providers.py +429 -0
- mlrun/auth/utils.py +415 -0
- mlrun/common/constants.py +14 -0
- mlrun/common/model_monitoring/helpers.py +123 -0
- mlrun/common/runtimes/constants.py +28 -0
- mlrun/common/schemas/__init__.py +14 -3
- mlrun/common/schemas/alert.py +2 -2
- mlrun/common/schemas/api_gateway.py +3 -0
- mlrun/common/schemas/auth.py +12 -10
- mlrun/common/schemas/client_spec.py +4 -0
- mlrun/common/schemas/constants.py +25 -0
- mlrun/common/schemas/frontend_spec.py +1 -8
- mlrun/common/schemas/function.py +34 -0
- mlrun/common/schemas/hub.py +33 -20
- mlrun/common/schemas/model_monitoring/__init__.py +2 -1
- mlrun/common/schemas/model_monitoring/constants.py +12 -15
- mlrun/common/schemas/model_monitoring/functions.py +13 -4
- mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
- mlrun/common/schemas/pipeline.py +1 -1
- mlrun/common/schemas/secret.py +17 -2
- mlrun/common/secrets.py +95 -1
- mlrun/common/types.py +10 -10
- mlrun/config.py +69 -19
- mlrun/data_types/infer.py +2 -2
- mlrun/datastore/__init__.py +12 -5
- mlrun/datastore/azure_blob.py +162 -47
- mlrun/datastore/base.py +274 -10
- mlrun/datastore/datastore.py +7 -2
- mlrun/datastore/datastore_profile.py +84 -22
- mlrun/datastore/model_provider/huggingface_provider.py +225 -41
- mlrun/datastore/model_provider/mock_model_provider.py +87 -0
- mlrun/datastore/model_provider/model_provider.py +206 -74
- mlrun/datastore/model_provider/openai_provider.py +226 -66
- mlrun/datastore/s3.py +39 -18
- mlrun/datastore/sources.py +1 -1
- mlrun/datastore/store_resources.py +4 -4
- mlrun/datastore/storeytargets.py +17 -12
- mlrun/datastore/targets.py +1 -1
- mlrun/datastore/utils.py +25 -6
- mlrun/datastore/v3io.py +1 -1
- mlrun/db/base.py +63 -32
- mlrun/db/httpdb.py +373 -153
- mlrun/db/nopdb.py +54 -21
- mlrun/errors.py +4 -2
- mlrun/execution.py +66 -25
- mlrun/feature_store/api.py +1 -1
- mlrun/feature_store/common.py +1 -1
- mlrun/feature_store/feature_vector_utils.py +1 -1
- mlrun/feature_store/steps.py +8 -6
- mlrun/frameworks/_common/utils.py +3 -3
- mlrun/frameworks/_dl_common/loggers/logger.py +1 -1
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -1
- mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +1 -1
- mlrun/frameworks/_ml_common/utils.py +2 -1
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +4 -3
- mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +2 -1
- mlrun/frameworks/onnx/dataset.py +2 -1
- mlrun/frameworks/onnx/mlrun_interface.py +2 -1
- mlrun/frameworks/pytorch/callbacks/logging_callback.py +5 -4
- mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +2 -1
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +2 -1
- mlrun/frameworks/pytorch/utils.py +2 -1
- mlrun/frameworks/sklearn/metric.py +2 -1
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +5 -4
- mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +2 -1
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +2 -1
- mlrun/hub/__init__.py +52 -0
- mlrun/hub/base.py +142 -0
- mlrun/hub/module.py +172 -0
- mlrun/hub/step.py +113 -0
- mlrun/k8s_utils.py +105 -16
- mlrun/launcher/base.py +15 -7
- mlrun/launcher/local.py +4 -1
- mlrun/model.py +14 -4
- mlrun/model_monitoring/__init__.py +0 -1
- mlrun/model_monitoring/api.py +65 -28
- mlrun/model_monitoring/applications/__init__.py +1 -1
- mlrun/model_monitoring/applications/base.py +299 -128
- mlrun/model_monitoring/applications/context.py +2 -4
- mlrun/model_monitoring/controller.py +132 -58
- mlrun/model_monitoring/db/_schedules.py +38 -29
- mlrun/model_monitoring/db/_stats.py +6 -16
- mlrun/model_monitoring/db/tsdb/__init__.py +9 -7
- mlrun/model_monitoring/db/tsdb/base.py +29 -9
- mlrun/model_monitoring/db/tsdb/preaggregate.py +234 -0
- mlrun/model_monitoring/db/tsdb/stream_graph_steps.py +63 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_metrics_queries.py +414 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_predictions_queries.py +376 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_results_queries.py +590 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connection.py +434 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connector.py +541 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_operations.py +808 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_schema.py +502 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream.py +163 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream_graph_steps.py +60 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_dataframe_processor.py +141 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_query_builder.py +585 -0
- mlrun/model_monitoring/db/tsdb/timescaledb/writer_graph_steps.py +73 -0
- mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +20 -9
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +235 -51
- mlrun/model_monitoring/features_drift_table.py +2 -1
- mlrun/model_monitoring/helpers.py +30 -6
- mlrun/model_monitoring/stream_processing.py +34 -28
- mlrun/model_monitoring/writer.py +224 -4
- mlrun/package/__init__.py +2 -1
- mlrun/platforms/__init__.py +0 -43
- mlrun/platforms/iguazio.py +8 -4
- mlrun/projects/operations.py +17 -11
- mlrun/projects/pipelines.py +2 -2
- mlrun/projects/project.py +187 -123
- mlrun/run.py +95 -21
- mlrun/runtimes/__init__.py +2 -186
- mlrun/runtimes/base.py +103 -25
- mlrun/runtimes/constants.py +225 -0
- mlrun/runtimes/daskjob.py +5 -2
- mlrun/runtimes/databricks_job/databricks_runtime.py +2 -1
- mlrun/runtimes/local.py +5 -2
- mlrun/runtimes/mounts.py +20 -2
- mlrun/runtimes/nuclio/__init__.py +12 -7
- mlrun/runtimes/nuclio/api_gateway.py +36 -6
- mlrun/runtimes/nuclio/application/application.py +339 -40
- mlrun/runtimes/nuclio/function.py +222 -72
- mlrun/runtimes/nuclio/serving.py +132 -42
- mlrun/runtimes/pod.py +213 -21
- mlrun/runtimes/utils.py +49 -9
- mlrun/secrets.py +99 -14
- mlrun/serving/__init__.py +2 -0
- mlrun/serving/remote.py +84 -11
- mlrun/serving/routers.py +26 -44
- mlrun/serving/server.py +138 -51
- mlrun/serving/serving_wrapper.py +6 -2
- mlrun/serving/states.py +997 -283
- mlrun/serving/steps.py +62 -0
- mlrun/serving/system_steps.py +149 -95
- mlrun/serving/v2_serving.py +9 -10
- mlrun/track/trackers/mlflow_tracker.py +29 -31
- mlrun/utils/helpers.py +292 -94
- mlrun/utils/http.py +9 -2
- mlrun/utils/notifications/notification/base.py +18 -0
- mlrun/utils/notifications/notification/git.py +3 -5
- mlrun/utils/notifications/notification/mail.py +39 -16
- mlrun/utils/notifications/notification/slack.py +2 -4
- mlrun/utils/notifications/notification/webhook.py +2 -5
- mlrun/utils/notifications/notification_pusher.py +3 -3
- mlrun/utils/version/version.json +2 -2
- mlrun/utils/version/version.py +3 -4
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/METADATA +63 -74
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/RECORD +161 -143
- mlrun/api/schemas/__init__.py +0 -259
- mlrun/db/auth_utils.py +0 -152
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +0 -344
- mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -75
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py +0 -281
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +0 -1266
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/WHEEL +0 -0
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/entry_points.txt +0 -0
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/licenses/LICENSE +0 -0
- {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,502 @@
|
|
|
1
|
+
# Copyright 2025 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import datetime
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from io import StringIO
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
21
|
+
import mlrun.errors
|
|
22
|
+
from mlrun.model_monitoring.db.tsdb.preaggregate import PreAggregateConfig
|
|
23
|
+
from mlrun.model_monitoring.db.tsdb.timescaledb.utils.timescaledb_query_builder import (
|
|
24
|
+
TimescaleDBNaming,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
_MODEL_MONITORING_SCHEMA = "mlrun_model_monitoring"
|
|
28
|
+
|
|
29
|
+
# TimescaleDB-specific constants
|
|
30
|
+
TIME_BUCKET_COLUMN = "time_bucket"
|
|
31
|
+
|
|
32
|
+
# Database schema constants
|
|
33
|
+
MODEL_ERROR_MAX_LENGTH = 1000
|
|
34
|
+
CUSTOM_METRICS_MAX_LENGTH = 1000
|
|
35
|
+
RESULT_EXTRA_DATA_MAX_LENGTH = 1000
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def create_table_schemas(project: str) -> dict:
|
|
39
|
+
"""Create all TimescaleDB table schemas for a project.
|
|
40
|
+
|
|
41
|
+
This consolidated function eliminates duplication across connector, operations, and test fixtures.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
project: The project name for table creation
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Dictionary mapping TimescaleDBTables enum values to table schema objects
|
|
48
|
+
"""
|
|
49
|
+
import mlrun
|
|
50
|
+
|
|
51
|
+
schema = f"{_MODEL_MONITORING_SCHEMA}_{mlrun.mlconf.system_id}"
|
|
52
|
+
return {
|
|
53
|
+
mm_schemas.TimescaleDBTables.APP_RESULTS: AppResultTable(
|
|
54
|
+
project=project, schema=schema
|
|
55
|
+
),
|
|
56
|
+
mm_schemas.TimescaleDBTables.METRICS: Metrics(project=project, schema=schema),
|
|
57
|
+
mm_schemas.TimescaleDBTables.PREDICTIONS: Predictions(
|
|
58
|
+
project=project, schema=schema
|
|
59
|
+
),
|
|
60
|
+
mm_schemas.TimescaleDBTables.ERRORS: Errors(project=project, schema=schema),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _TimescaleDBColumnType:
|
|
65
|
+
"""Represents a TimescaleDB column type with optional constraints."""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self, data_type: str, length: Optional[int] = None, nullable: bool = True
|
|
69
|
+
):
|
|
70
|
+
self.data_type = data_type
|
|
71
|
+
self.length = length
|
|
72
|
+
self.nullable = nullable
|
|
73
|
+
|
|
74
|
+
def __str__(self):
|
|
75
|
+
if self.length is not None:
|
|
76
|
+
return f"{self.data_type}({self.length})"
|
|
77
|
+
else:
|
|
78
|
+
return self.data_type
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class TimescaleDBSchema:
|
|
83
|
+
"""
|
|
84
|
+
A class to represent a hypertable schema in TimescaleDB. Using this schema, you can generate the relevant queries to
|
|
85
|
+
create, insert, delete and query data from TimescaleDB. At the moment, there are 4 schemas: AppResultTable,
|
|
86
|
+
Metrics, Predictions, and Errors.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
table_name: str,
|
|
92
|
+
columns: dict[str, _TimescaleDBColumnType],
|
|
93
|
+
time_column: str,
|
|
94
|
+
project: str,
|
|
95
|
+
schema: Optional[str] = None,
|
|
96
|
+
chunk_time_interval: str = "1 day",
|
|
97
|
+
indexes: Optional[list[str]] = None,
|
|
98
|
+
):
|
|
99
|
+
self.table_name = f"{table_name}_{project.replace('-', '_')}"
|
|
100
|
+
self.columns = columns
|
|
101
|
+
self.time_column = time_column
|
|
102
|
+
self.schema = schema or _MODEL_MONITORING_SCHEMA
|
|
103
|
+
self.chunk_time_interval = chunk_time_interval
|
|
104
|
+
self.indexes = indexes or []
|
|
105
|
+
self.project = project
|
|
106
|
+
|
|
107
|
+
def full_name(self) -> str:
|
|
108
|
+
"""Return the fully qualified table name (schema.table_name)."""
|
|
109
|
+
return f"{self.schema}.{self.table_name}"
|
|
110
|
+
|
|
111
|
+
def _create_table_query(self) -> str:
|
|
112
|
+
"""Create the base table SQL."""
|
|
113
|
+
columns_def = ", ".join(
|
|
114
|
+
f"{col} {col_type}" + ("" if col_type.nullable else " NOT NULL")
|
|
115
|
+
for col, col_type in self.columns.items()
|
|
116
|
+
)
|
|
117
|
+
return f"CREATE TABLE IF NOT EXISTS {self.full_name()} ({columns_def});"
|
|
118
|
+
|
|
119
|
+
def _create_hypertable_query(self) -> str:
|
|
120
|
+
"""Convert table to hypertable."""
|
|
121
|
+
return (
|
|
122
|
+
f"SELECT create_hypertable('{self.full_name()}', '{self.time_column}', "
|
|
123
|
+
f"chunk_time_interval => INTERVAL '{self.chunk_time_interval}', if_not_exists => TRUE);"
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def _create_indexes_query(self) -> list[str]:
|
|
127
|
+
"""Create indexes for the table."""
|
|
128
|
+
queries = []
|
|
129
|
+
for index_columns in self.indexes:
|
|
130
|
+
index_name = f"idx_{self.table_name}_{index_columns.replace(',', '_').replace(' ', '_')}"
|
|
131
|
+
queries.append(
|
|
132
|
+
f"CREATE INDEX IF NOT EXISTS {index_name} "
|
|
133
|
+
f"ON {self.full_name()} ({index_columns});"
|
|
134
|
+
)
|
|
135
|
+
return queries
|
|
136
|
+
|
|
137
|
+
def _create_pre_aggregate_tables_query(
|
|
138
|
+
self, config: PreAggregateConfig
|
|
139
|
+
) -> list[str]:
|
|
140
|
+
"""Create pre-aggregate tables for each interval."""
|
|
141
|
+
queries = []
|
|
142
|
+
|
|
143
|
+
for interval in config.aggregate_intervals:
|
|
144
|
+
agg_table_name = TimescaleDBNaming.get_agg_table_name(
|
|
145
|
+
self.table_name, interval
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Create aggregate table structure
|
|
149
|
+
agg_columns = [f"{TIME_BUCKET_COLUMN} TIMESTAMPTZ NOT NULL"]
|
|
150
|
+
|
|
151
|
+
# Add aggregated columns for numeric fields
|
|
152
|
+
for col, col_type in self.columns.items():
|
|
153
|
+
if col == self.time_column:
|
|
154
|
+
continue
|
|
155
|
+
if col_type.data_type in ["DOUBLE PRECISION", "INTEGER", "BIGINT"]:
|
|
156
|
+
agg_columns.extend(
|
|
157
|
+
f"{func}_{col} {col_type}" for func in config.agg_functions
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
# For non-numeric columns, keep the original type for grouping
|
|
161
|
+
agg_columns.append(f"{col} {col_type}")
|
|
162
|
+
|
|
163
|
+
create_agg_table = f"CREATE TABLE IF NOT EXISTS {self.schema}.{agg_table_name} ({', '.join(agg_columns)});"
|
|
164
|
+
|
|
165
|
+
# Create hypertable for aggregate table
|
|
166
|
+
create_agg_hypertable = (
|
|
167
|
+
f"SELECT create_hypertable('{self.schema}.{agg_table_name}', "
|
|
168
|
+
f"'{TIME_BUCKET_COLUMN}', chunk_time_interval => INTERVAL "
|
|
169
|
+
f"'{self._get_chunk_interval_for_agg(interval)}', if_not_exists => TRUE);"
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
queries.extend([create_agg_table, create_agg_hypertable])
|
|
173
|
+
|
|
174
|
+
return queries
|
|
175
|
+
|
|
176
|
+
def _get_chunk_interval_for_agg(self, interval: str) -> str:
|
|
177
|
+
"""Get appropriate chunk interval for aggregate tables."""
|
|
178
|
+
interval_to_chunk = {
|
|
179
|
+
"10m": "1 hour",
|
|
180
|
+
"1h": "1 day",
|
|
181
|
+
"6h": "1 day",
|
|
182
|
+
"1d": "7 days",
|
|
183
|
+
"1w": "1 month",
|
|
184
|
+
"1M": "3 months",
|
|
185
|
+
}
|
|
186
|
+
return interval_to_chunk.get(interval, "1 day")
|
|
187
|
+
|
|
188
|
+
def _create_continuous_aggregates_query(
|
|
189
|
+
self, config: PreAggregateConfig
|
|
190
|
+
) -> list[str]:
|
|
191
|
+
"""Create TimescaleDB continuous aggregates for pre-computation."""
|
|
192
|
+
queries = []
|
|
193
|
+
|
|
194
|
+
for interval in config.aggregate_intervals:
|
|
195
|
+
cagg_name = TimescaleDBNaming.get_cagg_view_name(self.table_name, interval)
|
|
196
|
+
|
|
197
|
+
# Build SELECT clause for continuous aggregate
|
|
198
|
+
select_parts = [
|
|
199
|
+
f"time_bucket(INTERVAL '{interval}', {self.time_column}) AS {TIME_BUCKET_COLUMN}"
|
|
200
|
+
]
|
|
201
|
+
|
|
202
|
+
# Add aggregations for numeric columns
|
|
203
|
+
for col, col_type in self.columns.items():
|
|
204
|
+
if col == self.time_column:
|
|
205
|
+
continue
|
|
206
|
+
if col_type.data_type in ["DOUBLE PRECISION", "INTEGER", "BIGINT"]:
|
|
207
|
+
for func in config.agg_functions:
|
|
208
|
+
if func == "count":
|
|
209
|
+
select_parts.append(f"COUNT({col}) AS {func}_{col}")
|
|
210
|
+
else:
|
|
211
|
+
select_parts.append(
|
|
212
|
+
f"{func.upper()}({col}) AS {func}_{col}"
|
|
213
|
+
)
|
|
214
|
+
elif col in [
|
|
215
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
216
|
+
mm_schemas.WriterEvent.APPLICATION_NAME,
|
|
217
|
+
mm_schemas.MetricData.METRIC_NAME,
|
|
218
|
+
mm_schemas.ResultData.RESULT_NAME,
|
|
219
|
+
]:
|
|
220
|
+
select_parts.append(col)
|
|
221
|
+
|
|
222
|
+
# Group by clause
|
|
223
|
+
group_by_cols = [TIME_BUCKET_COLUMN]
|
|
224
|
+
for col in self.columns:
|
|
225
|
+
if col == self.time_column:
|
|
226
|
+
continue
|
|
227
|
+
if col in [
|
|
228
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
229
|
+
mm_schemas.WriterEvent.APPLICATION_NAME,
|
|
230
|
+
mm_schemas.MetricData.METRIC_NAME,
|
|
231
|
+
mm_schemas.ResultData.RESULT_NAME,
|
|
232
|
+
]:
|
|
233
|
+
group_by_cols.append(col)
|
|
234
|
+
|
|
235
|
+
create_cagg = (
|
|
236
|
+
f"CREATE MATERIALIZED VIEW IF NOT EXISTS {self.schema}.{cagg_name} "
|
|
237
|
+
f"WITH (timescaledb.continuous) "
|
|
238
|
+
f"AS SELECT {', '.join(select_parts)} FROM {self.full_name()} "
|
|
239
|
+
f"GROUP BY {', '.join(group_by_cols)} WITH NO DATA;"
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
queries.append(create_cagg)
|
|
243
|
+
|
|
244
|
+
return queries
|
|
245
|
+
|
|
246
|
+
def _create_retention_policies_query(self, config: PreAggregateConfig) -> list[str]:
|
|
247
|
+
"""Create retention policies for tables."""
|
|
248
|
+
queries = []
|
|
249
|
+
|
|
250
|
+
# Retention for main table
|
|
251
|
+
if "raw" in config.retention_policy:
|
|
252
|
+
queries.append(
|
|
253
|
+
f"SELECT add_retention_policy('{self.full_name()}', INTERVAL "
|
|
254
|
+
f"'{config.retention_policy['raw']}', if_not_exists => TRUE);"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Retention for continuous aggregates
|
|
258
|
+
for interval in config.aggregate_intervals:
|
|
259
|
+
if interval in config.retention_policy:
|
|
260
|
+
cagg_name = TimescaleDBNaming.get_cagg_view_name(
|
|
261
|
+
self.table_name, interval
|
|
262
|
+
)
|
|
263
|
+
queries.append(
|
|
264
|
+
f"SELECT add_retention_policy('{self.schema}.{cagg_name}', INTERVAL "
|
|
265
|
+
f"'{config.retention_policy[interval]}', if_not_exists => TRUE);"
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return queries
|
|
269
|
+
|
|
270
|
+
def drop_table_query(self) -> str:
|
|
271
|
+
"""Drop the main table."""
|
|
272
|
+
return f"DROP TABLE IF EXISTS {self.full_name()} CASCADE;"
|
|
273
|
+
|
|
274
|
+
def _get_records_query(
|
|
275
|
+
self,
|
|
276
|
+
start: datetime.datetime,
|
|
277
|
+
end: datetime.datetime,
|
|
278
|
+
columns_to_filter: Optional[list[str]] = None,
|
|
279
|
+
filter_query: Optional[str] = None,
|
|
280
|
+
interval: Optional[str] = None,
|
|
281
|
+
limit: Optional[int] = None,
|
|
282
|
+
agg_funcs: Optional[list] = None,
|
|
283
|
+
order_by: Optional[str] = None,
|
|
284
|
+
desc: Optional[bool] = None,
|
|
285
|
+
use_pre_aggregates: bool = True,
|
|
286
|
+
group_by: Optional[list[str]] = None,
|
|
287
|
+
timestamp_column: Optional[str] = None,
|
|
288
|
+
) -> str:
|
|
289
|
+
"""Build query to get records from the table or its pre-aggregates."""
|
|
290
|
+
|
|
291
|
+
# Determine table to query
|
|
292
|
+
table_name = self.table_name
|
|
293
|
+
time_col = timestamp_column or self.time_column
|
|
294
|
+
|
|
295
|
+
if interval and agg_funcs and use_pre_aggregates:
|
|
296
|
+
if timestamp_column and timestamp_column != self.time_column:
|
|
297
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
298
|
+
f"Cannot use custom timestamp_column='{timestamp_column}' with pre-aggregates. "
|
|
299
|
+
"Pre-aggregates are built on the table's default time column."
|
|
300
|
+
)
|
|
301
|
+
# Use continuous aggregate if available
|
|
302
|
+
table_name = TimescaleDBNaming.get_cagg_view_name(self.table_name, interval)
|
|
303
|
+
time_col = TIME_BUCKET_COLUMN
|
|
304
|
+
|
|
305
|
+
with StringIO() as query:
|
|
306
|
+
query.write("SELECT ")
|
|
307
|
+
|
|
308
|
+
if columns_to_filter:
|
|
309
|
+
if interval and agg_funcs and use_pre_aggregates:
|
|
310
|
+
modified_columns = []
|
|
311
|
+
for col in columns_to_filter:
|
|
312
|
+
if col == time_col:
|
|
313
|
+
modified_columns.append(TIME_BUCKET_COLUMN)
|
|
314
|
+
else:
|
|
315
|
+
# Use column name as-is - caller should provide correct pre-agg column names
|
|
316
|
+
modified_columns.append(col)
|
|
317
|
+
query.write(", ".join(modified_columns))
|
|
318
|
+
else:
|
|
319
|
+
query.write(", ".join(columns_to_filter))
|
|
320
|
+
else:
|
|
321
|
+
query.write("*")
|
|
322
|
+
|
|
323
|
+
query.write(f" FROM {self.schema}.{table_name}")
|
|
324
|
+
|
|
325
|
+
# WHERE clause
|
|
326
|
+
conditions = []
|
|
327
|
+
if filter_query:
|
|
328
|
+
conditions.append(filter_query)
|
|
329
|
+
if start:
|
|
330
|
+
conditions.append(f"{time_col} >= '{start}'")
|
|
331
|
+
if end:
|
|
332
|
+
conditions.append(f"{time_col} <= '{end}'")
|
|
333
|
+
|
|
334
|
+
if conditions:
|
|
335
|
+
query.write(" WHERE " + " AND ".join(conditions))
|
|
336
|
+
|
|
337
|
+
# GROUP BY clause (must come before ORDER BY)
|
|
338
|
+
if group_by:
|
|
339
|
+
query.write(f" GROUP BY {', '.join(group_by)}")
|
|
340
|
+
|
|
341
|
+
# ORDER BY clause (must come after GROUP BY)
|
|
342
|
+
if order_by:
|
|
343
|
+
direction = " DESC" if desc else " ASC"
|
|
344
|
+
query.write(f" ORDER BY {order_by}{direction}")
|
|
345
|
+
|
|
346
|
+
if limit:
|
|
347
|
+
query.write(f" LIMIT {limit}")
|
|
348
|
+
|
|
349
|
+
query.write(";")
|
|
350
|
+
|
|
351
|
+
return query.getvalue()
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
@dataclass
|
|
355
|
+
class AppResultTable(TimescaleDBSchema):
|
|
356
|
+
"""Schema for application results table."""
|
|
357
|
+
|
|
358
|
+
def __init__(self, project: str, schema: Optional[str] = None):
|
|
359
|
+
table_name = mm_schemas.TimescaleDBTables.APP_RESULTS
|
|
360
|
+
columns = {
|
|
361
|
+
mm_schemas.WriterEvent.END_INFER_TIME: _TimescaleDBColumnType(
|
|
362
|
+
"TIMESTAMPTZ"
|
|
363
|
+
),
|
|
364
|
+
mm_schemas.WriterEvent.START_INFER_TIME: _TimescaleDBColumnType(
|
|
365
|
+
"TIMESTAMPTZ"
|
|
366
|
+
),
|
|
367
|
+
mm_schemas.ResultData.RESULT_VALUE: _TimescaleDBColumnType(
|
|
368
|
+
"DOUBLE PRECISION"
|
|
369
|
+
),
|
|
370
|
+
mm_schemas.ResultData.RESULT_STATUS: _TimescaleDBColumnType("INTEGER"),
|
|
371
|
+
mm_schemas.ResultData.RESULT_EXTRA_DATA: _TimescaleDBColumnType(
|
|
372
|
+
"VARCHAR", RESULT_EXTRA_DATA_MAX_LENGTH
|
|
373
|
+
),
|
|
374
|
+
mm_schemas.WriterEvent.ENDPOINT_ID: _TimescaleDBColumnType("VARCHAR", 64),
|
|
375
|
+
mm_schemas.WriterEvent.APPLICATION_NAME: _TimescaleDBColumnType(
|
|
376
|
+
"VARCHAR", 64
|
|
377
|
+
),
|
|
378
|
+
mm_schemas.ResultData.RESULT_NAME: _TimescaleDBColumnType("VARCHAR", 64),
|
|
379
|
+
mm_schemas.ResultData.RESULT_KIND: _TimescaleDBColumnType("INTEGER"),
|
|
380
|
+
}
|
|
381
|
+
indexes = [
|
|
382
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
383
|
+
f"{mm_schemas.WriterEvent.APPLICATION_NAME}, {mm_schemas.ResultData.RESULT_NAME}",
|
|
384
|
+
mm_schemas.WriterEvent.END_INFER_TIME,
|
|
385
|
+
]
|
|
386
|
+
super().__init__(
|
|
387
|
+
table_name=table_name,
|
|
388
|
+
columns=columns,
|
|
389
|
+
time_column=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
390
|
+
schema=schema,
|
|
391
|
+
project=project,
|
|
392
|
+
indexes=indexes,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
@dataclass
|
|
397
|
+
class Metrics(TimescaleDBSchema):
|
|
398
|
+
"""Schema for metrics table."""
|
|
399
|
+
|
|
400
|
+
def __init__(self, project: str, schema: Optional[str] = None):
|
|
401
|
+
table_name = mm_schemas.TimescaleDBTables.METRICS
|
|
402
|
+
columns = {
|
|
403
|
+
mm_schemas.WriterEvent.END_INFER_TIME: _TimescaleDBColumnType(
|
|
404
|
+
"TIMESTAMPTZ"
|
|
405
|
+
),
|
|
406
|
+
mm_schemas.WriterEvent.START_INFER_TIME: _TimescaleDBColumnType(
|
|
407
|
+
"TIMESTAMPTZ"
|
|
408
|
+
),
|
|
409
|
+
mm_schemas.MetricData.METRIC_VALUE: _TimescaleDBColumnType(
|
|
410
|
+
"DOUBLE PRECISION"
|
|
411
|
+
),
|
|
412
|
+
mm_schemas.WriterEvent.ENDPOINT_ID: _TimescaleDBColumnType("VARCHAR", 64),
|
|
413
|
+
mm_schemas.WriterEvent.APPLICATION_NAME: _TimescaleDBColumnType(
|
|
414
|
+
"VARCHAR", 64
|
|
415
|
+
),
|
|
416
|
+
mm_schemas.MetricData.METRIC_NAME: _TimescaleDBColumnType("VARCHAR", 64),
|
|
417
|
+
}
|
|
418
|
+
indexes = [
|
|
419
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
420
|
+
f"{mm_schemas.WriterEvent.APPLICATION_NAME}, {mm_schemas.MetricData.METRIC_NAME}",
|
|
421
|
+
mm_schemas.WriterEvent.END_INFER_TIME,
|
|
422
|
+
f"{mm_schemas.WriterEvent.END_INFER_TIME}, {mm_schemas.WriterEvent.ENDPOINT_ID},\
|
|
423
|
+
{mm_schemas.WriterEvent.APPLICATION_NAME}",
|
|
424
|
+
f"{mm_schemas.WriterEvent.APPLICATION_NAME}, {mm_schemas.WriterEvent.END_INFER_TIME}",
|
|
425
|
+
]
|
|
426
|
+
super().__init__(
|
|
427
|
+
table_name=table_name,
|
|
428
|
+
columns=columns,
|
|
429
|
+
time_column=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
430
|
+
schema=schema,
|
|
431
|
+
project=project,
|
|
432
|
+
indexes=indexes,
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@dataclass
|
|
437
|
+
class Predictions(TimescaleDBSchema):
|
|
438
|
+
"""Schema for predictions table."""
|
|
439
|
+
|
|
440
|
+
def __init__(self, project: str, schema: Optional[str] = None):
|
|
441
|
+
table_name = mm_schemas.TimescaleDBTables.PREDICTIONS
|
|
442
|
+
columns = {
|
|
443
|
+
mm_schemas.WriterEvent.END_INFER_TIME: _TimescaleDBColumnType(
|
|
444
|
+
"TIMESTAMPTZ"
|
|
445
|
+
),
|
|
446
|
+
mm_schemas.EventFieldType.LATENCY: _TimescaleDBColumnType(
|
|
447
|
+
"DOUBLE PRECISION"
|
|
448
|
+
),
|
|
449
|
+
mm_schemas.EventKeyMetrics.CUSTOM_METRICS: _TimescaleDBColumnType(
|
|
450
|
+
"VARCHAR", CUSTOM_METRICS_MAX_LENGTH
|
|
451
|
+
),
|
|
452
|
+
mm_schemas.EventFieldType.ESTIMATED_PREDICTION_COUNT: _TimescaleDBColumnType(
|
|
453
|
+
"DOUBLE PRECISION"
|
|
454
|
+
),
|
|
455
|
+
mm_schemas.EventFieldType.EFFECTIVE_SAMPLE_COUNT: _TimescaleDBColumnType(
|
|
456
|
+
"INTEGER"
|
|
457
|
+
),
|
|
458
|
+
mm_schemas.WriterEvent.ENDPOINT_ID: _TimescaleDBColumnType("VARCHAR", 64),
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
indexes = [
|
|
462
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
463
|
+
mm_schemas.WriterEvent.END_INFER_TIME,
|
|
464
|
+
f"{mm_schemas.WriterEvent.END_INFER_TIME}, {mm_schemas.WriterEvent.ENDPOINT_ID}",
|
|
465
|
+
]
|
|
466
|
+
super().__init__(
|
|
467
|
+
table_name=table_name,
|
|
468
|
+
columns=columns,
|
|
469
|
+
time_column=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
470
|
+
schema=schema,
|
|
471
|
+
project=project,
|
|
472
|
+
indexes=indexes,
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
@dataclass
|
|
477
|
+
class Errors(TimescaleDBSchema):
|
|
478
|
+
"""Schema for errors table."""
|
|
479
|
+
|
|
480
|
+
def __init__(self, project: str, schema: Optional[str] = None):
|
|
481
|
+
table_name = mm_schemas.TimescaleDBTables.ERRORS
|
|
482
|
+
columns = {
|
|
483
|
+
mm_schemas.EventFieldType.TIME: _TimescaleDBColumnType("TIMESTAMPTZ"),
|
|
484
|
+
mm_schemas.EventFieldType.MODEL_ERROR: _TimescaleDBColumnType(
|
|
485
|
+
"VARCHAR", MODEL_ERROR_MAX_LENGTH
|
|
486
|
+
),
|
|
487
|
+
mm_schemas.WriterEvent.ENDPOINT_ID: _TimescaleDBColumnType("VARCHAR", 64),
|
|
488
|
+
mm_schemas.EventFieldType.ERROR_TYPE: _TimescaleDBColumnType("VARCHAR", 64),
|
|
489
|
+
}
|
|
490
|
+
indexes = [
|
|
491
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
492
|
+
mm_schemas.EventFieldType.ERROR_TYPE,
|
|
493
|
+
mm_schemas.EventFieldType.TIME,
|
|
494
|
+
]
|
|
495
|
+
super().__init__(
|
|
496
|
+
table_name=table_name,
|
|
497
|
+
columns=columns,
|
|
498
|
+
time_column=mm_schemas.EventFieldType.TIME,
|
|
499
|
+
schema=schema,
|
|
500
|
+
project=project,
|
|
501
|
+
indexes=indexes,
|
|
502
|
+
)
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# Copyright 2025 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
import mlrun
|
|
15
|
+
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
16
|
+
import mlrun.model_monitoring.db.tsdb.timescaledb.timescaledb_schema as timescaledb_schema
|
|
17
|
+
from mlrun.datastore.datastore_profile import DatastoreProfilePostgreSQL
|
|
18
|
+
from mlrun.model_monitoring.db.tsdb.timescaledb.timescaledb_connection import (
|
|
19
|
+
TimescaleDBConnection,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TimescaleDBStreamProcessor:
|
|
24
|
+
"""
|
|
25
|
+
Handles stream processing operations for TimescaleDB TSDB connector.
|
|
26
|
+
|
|
27
|
+
This class implements stream graph setup methods:
|
|
28
|
+
- Monitoring stream steps configuration
|
|
29
|
+
- Error handling setup
|
|
30
|
+
- Real-time data ingestion pipeline
|
|
31
|
+
|
|
32
|
+
Each instance creates its own TimescaleDBConnection that shares the global connection pool.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
project: str,
|
|
38
|
+
profile: DatastoreProfilePostgreSQL,
|
|
39
|
+
connection: TimescaleDBConnection,
|
|
40
|
+
):
|
|
41
|
+
"""
|
|
42
|
+
Initialize stream handler with a shared connection.
|
|
43
|
+
|
|
44
|
+
:param project: The project name
|
|
45
|
+
:param profile: Datastore profile for connection (used for table initialization)
|
|
46
|
+
:param connection: Shared TimescaleDBConnection instance
|
|
47
|
+
"""
|
|
48
|
+
self.project = project
|
|
49
|
+
self.profile = profile
|
|
50
|
+
|
|
51
|
+
# Use the injected shared connection
|
|
52
|
+
self._connection = connection
|
|
53
|
+
|
|
54
|
+
# Initialize table schemas for stream operations
|
|
55
|
+
self._init_tables()
|
|
56
|
+
|
|
57
|
+
def _init_tables(self) -> None:
|
|
58
|
+
"""Initialize TimescaleDB table schemas for stream operations."""
|
|
59
|
+
schema_name = (
|
|
60
|
+
f"{timescaledb_schema._MODEL_MONITORING_SCHEMA}_{mlrun.mlconf.system_id}"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
self.tables = {
|
|
64
|
+
mm_schemas.TimescaleDBTables.PREDICTIONS: timescaledb_schema.Predictions(
|
|
65
|
+
project=self.project, schema=schema_name
|
|
66
|
+
),
|
|
67
|
+
mm_schemas.TimescaleDBTables.ERRORS: timescaledb_schema.Errors(
|
|
68
|
+
project=self.project, schema=schema_name
|
|
69
|
+
),
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def apply_monitoring_stream_steps(self, graph, **kwargs) -> None:
|
|
73
|
+
"""
|
|
74
|
+
Apply TimescaleDB steps on the monitoring graph for real-time data ingestion.
|
|
75
|
+
|
|
76
|
+
Sets up the stream processing pipeline to write prediction latency and
|
|
77
|
+
custom metrics to TimescaleDB hypertables using the TimescaleDBTarget.
|
|
78
|
+
|
|
79
|
+
:param graph: The stream processing graph to modify
|
|
80
|
+
:param kwargs: Additional configuration parameters
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def apply_process_before_timescaledb():
|
|
84
|
+
"""Add preprocessing step for TimescaleDB data format."""
|
|
85
|
+
graph.add_step(
|
|
86
|
+
"mlrun.model_monitoring.db.tsdb.timescaledb.timescaledb_stream_graph_steps.ProcessBeforeTimescaleDB",
|
|
87
|
+
name="ProcessBeforeTimescaleDB",
|
|
88
|
+
after="FilterNOP",
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def apply_timescaledb_target(name: str, after: str):
|
|
92
|
+
"""Add TimescaleDB target for writing predictions data."""
|
|
93
|
+
predictions_table = self.tables[mm_schemas.TimescaleDBTables.PREDICTIONS]
|
|
94
|
+
|
|
95
|
+
graph.add_step(
|
|
96
|
+
"mlrun.datastore.storeytargets.TimescaleDBStoreyTarget",
|
|
97
|
+
name=name,
|
|
98
|
+
after=after,
|
|
99
|
+
url=f"ds://{self.profile.name}",
|
|
100
|
+
time_col=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
101
|
+
table=predictions_table.full_name(),
|
|
102
|
+
columns=[
|
|
103
|
+
mm_schemas.EventFieldType.LATENCY,
|
|
104
|
+
mm_schemas.EventKeyMetrics.CUSTOM_METRICS,
|
|
105
|
+
mm_schemas.EventFieldType.ESTIMATED_PREDICTION_COUNT,
|
|
106
|
+
mm_schemas.EventFieldType.EFFECTIVE_SAMPLE_COUNT,
|
|
107
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
108
|
+
],
|
|
109
|
+
max_events=kwargs.get("tsdb_batching_max_events", 1000),
|
|
110
|
+
flush_after_seconds=kwargs.get("tsdb_batching_timeout_secs", 30),
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Apply the processing steps
|
|
114
|
+
apply_process_before_timescaledb()
|
|
115
|
+
apply_timescaledb_target(
|
|
116
|
+
name="TimescaleDBTarget",
|
|
117
|
+
after="ProcessBeforeTimescaleDB",
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
def handle_model_error(
|
|
121
|
+
self,
|
|
122
|
+
graph,
|
|
123
|
+
tsdb_batching_max_events: int = 1000,
|
|
124
|
+
tsdb_batching_timeout_secs: int = 30,
|
|
125
|
+
**kwargs,
|
|
126
|
+
) -> None:
|
|
127
|
+
"""
|
|
128
|
+
Add error handling branch to the stream processing graph.
|
|
129
|
+
|
|
130
|
+
Processes model errors and writes them to the TimescaleDB errors table
|
|
131
|
+
for monitoring and alerting purposes.
|
|
132
|
+
|
|
133
|
+
:param graph: The stream processing graph to modify
|
|
134
|
+
:param tsdb_batching_max_events: Maximum events per batch
|
|
135
|
+
:param tsdb_batching_timeout_secs: Batch timeout in seconds
|
|
136
|
+
:param kwargs: Additional configuration parameters
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
errors_table = self.tables[mm_schemas.TimescaleDBTables.ERRORS]
|
|
140
|
+
|
|
141
|
+
# Add error extraction step
|
|
142
|
+
graph.add_step(
|
|
143
|
+
"mlrun.model_monitoring.db.tsdb.timescaledb.timescaledb_stream_graph_steps.TimescaleDBErrorExtractor",
|
|
144
|
+
name="error_extractor",
|
|
145
|
+
after="ForwardError",
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Add TimescaleDB target for error data
|
|
149
|
+
graph.add_step(
|
|
150
|
+
"mlrun.datastore.storeytargets.TimescaleDBStoreyTarget",
|
|
151
|
+
name="timescaledb_error",
|
|
152
|
+
after="error_extractor",
|
|
153
|
+
url=f"ds://{self.profile.name}",
|
|
154
|
+
time_col=mm_schemas.EventFieldType.TIME,
|
|
155
|
+
table=errors_table.full_name(),
|
|
156
|
+
columns=[
|
|
157
|
+
mm_schemas.EventFieldType.MODEL_ERROR,
|
|
158
|
+
mm_schemas.WriterEvent.ENDPOINT_ID,
|
|
159
|
+
mm_schemas.EventFieldType.ERROR_TYPE,
|
|
160
|
+
],
|
|
161
|
+
max_events=tsdb_batching_max_events,
|
|
162
|
+
flush_after_seconds=tsdb_batching_timeout_secs,
|
|
163
|
+
)
|