mlrun 1.8.0rc30__py3-none-any.whl → 1.8.0rc31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (33) hide show
  1. mlrun/__init__.py +2 -35
  2. mlrun/api/schemas/__init__.py +1 -6
  3. mlrun/common/runtimes/constants.py +4 -0
  4. mlrun/common/schemas/__init__.py +0 -2
  5. mlrun/common/schemas/model_monitoring/__init__.py +0 -2
  6. mlrun/common/schemas/model_monitoring/constants.py +1 -6
  7. mlrun/common/schemas/model_monitoring/grafana.py +17 -11
  8. mlrun/config.py +9 -36
  9. mlrun/datastore/storeytargets.py +20 -3
  10. mlrun/model_monitoring/applications/base.py +55 -40
  11. mlrun/model_monitoring/applications/results.py +2 -2
  12. mlrun/model_monitoring/controller.py +4 -3
  13. mlrun/model_monitoring/db/tsdb/__init__.py +9 -5
  14. mlrun/model_monitoring/db/tsdb/base.py +60 -39
  15. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +117 -52
  16. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +140 -14
  17. mlrun/model_monitoring/helpers.py +16 -15
  18. mlrun/model_monitoring/stream_processing.py +6 -13
  19. mlrun/projects/pipelines.py +11 -3
  20. mlrun/projects/project.py +84 -107
  21. mlrun/serving/states.py +1 -1
  22. mlrun/serving/v2_serving.py +20 -10
  23. mlrun/utils/helpers.py +1 -1
  24. mlrun/utils/logger.py +13 -10
  25. mlrun/utils/notifications/notification_pusher.py +24 -0
  26. mlrun/utils/regex.py +1 -0
  27. mlrun/utils/version/version.json +2 -2
  28. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/METADATA +2 -2
  29. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/RECORD +33 -33
  30. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/LICENSE +0 -0
  31. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/WHEEL +0 -0
  32. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/entry_points.txt +0 -0
  33. {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc31.dist-info}/top_level.txt +0 -0
mlrun/__init__.py CHANGED
@@ -26,7 +26,6 @@ __all__ = [
26
26
  "VolumeMount",
27
27
  ]
28
28
 
29
- import collections
30
29
  from os import environ, path
31
30
  from typing import Optional
32
31
 
@@ -215,40 +214,8 @@ def set_env_from_file(env_file: str, return_dict: bool = False) -> Optional[dict
215
214
  if None in env_vars.values():
216
215
  raise MLRunInvalidArgumentError("env file lines must be in the form key=value")
217
216
 
218
- ordered_env_vars = order_env_vars(env_vars)
219
- for key, value in ordered_env_vars.items():
217
+ for key, value in env_vars.items():
220
218
  environ[key] = value
221
219
 
222
220
  mlconf.reload() # reload mlrun configuration
223
- return ordered_env_vars if return_dict else None
224
-
225
-
226
- def order_env_vars(env_vars: dict[str, str]) -> dict[str, str]:
227
- """
228
- Order and process environment variables by first handling specific ordered keys,
229
- then processing the remaining keys in the given dictionary.
230
-
231
- The function ensures that environment variables defined in the `ordered_keys` list
232
- are added to the result dictionary first. Any other environment variables from
233
- `env_vars` are then added in the order they appear in the input dictionary.
234
-
235
- :param env_vars: A dictionary where each key is the name of an environment variable (str),
236
- and each value is the corresponding environment variable value (str).
237
- :return: A dictionary with the processed environment variables, ordered with the specific
238
- keys first, followed by the rest in their original order.
239
- """
240
- ordered_keys = mlconf.get_ordered_keys()
241
-
242
- ordered_env_vars = collections.OrderedDict()
243
-
244
- # First, add the ordered keys to the dictionary
245
- for key in ordered_keys:
246
- if key in env_vars:
247
- ordered_env_vars[key] = env_vars[key]
248
-
249
- # Then, add the remaining keys (those not in ordered_keys)
250
- for key, value in env_vars.items():
251
- if key not in ordered_keys:
252
- ordered_env_vars[key] = value
253
-
254
- return ordered_env_vars
221
+ return env_vars if return_dict else None
@@ -193,9 +193,7 @@ FeatureValues = DeprecationHelper(mlrun.common.schemas.FeatureValues)
193
193
  GrafanaColumn = DeprecationHelper(
194
194
  mlrun.common.schemas.model_monitoring.grafana.GrafanaColumn
195
195
  )
196
- GrafanaDataPoint = DeprecationHelper(
197
- mlrun.common.schemas.model_monitoring.grafana.GrafanaDataPoint
198
- )
196
+
199
197
  GrafanaNumberColumn = DeprecationHelper(
200
198
  mlrun.common.schemas.model_monitoring.grafana.GrafanaNumberColumn
201
199
  )
@@ -205,9 +203,6 @@ GrafanaStringColumn = DeprecationHelper(
205
203
  GrafanaTable = DeprecationHelper(
206
204
  mlrun.common.schemas.model_monitoring.grafana.GrafanaTable
207
205
  )
208
- GrafanaTimeSeriesTarget = DeprecationHelper(
209
- mlrun.common.schemas.model_monitoring.grafana.GrafanaTimeSeriesTarget
210
- )
211
206
  ModelEndpoint = DeprecationHelper(mlrun.common.schemas.ModelEndpoint)
212
207
  ModelEndpointList = DeprecationHelper(mlrun.common.schemas.ModelEndpointList)
213
208
  ModelEndpointMetadata = DeprecationHelper(mlrun.common.schemas.ModelEndpointMetadata)
@@ -194,6 +194,10 @@ class RunStates:
194
194
  # TODO: add aborting state once we have it
195
195
  ]
196
196
 
197
+ @staticmethod
198
+ def notification_states():
199
+ return RunStates.terminal_states() + [RunStates.running]
200
+
197
201
  @staticmethod
198
202
  def run_state_to_pipeline_run_status(run_state: str):
199
203
  if not run_state:
@@ -140,11 +140,9 @@ from .model_monitoring import (
140
140
  FeatureSetFeatures,
141
141
  FeatureValues,
142
142
  GrafanaColumn,
143
- GrafanaDataPoint,
144
143
  GrafanaNumberColumn,
145
144
  GrafanaStringColumn,
146
145
  GrafanaTable,
147
- GrafanaTimeSeriesTarget,
148
146
  ModelEndpoint,
149
147
  ModelEndpointCreationStrategy,
150
148
  ModelEndpointList,
@@ -51,11 +51,9 @@ from .constants import (
51
51
  from .grafana import (
52
52
  GrafanaColumn,
53
53
  GrafanaColumnType,
54
- GrafanaDataPoint,
55
54
  GrafanaNumberColumn,
56
55
  GrafanaStringColumn,
57
56
  GrafanaTable,
58
- GrafanaTimeSeriesTarget,
59
57
  )
60
58
  from .model_endpoints import (
61
59
  Features,
@@ -250,11 +250,6 @@ class TSDBTarget(MonitoringStrEnum):
250
250
  TDEngine = "tdengine"
251
251
 
252
252
 
253
- class DefaultProfileName(StrEnum):
254
- STREAM = "mm-infra-stream"
255
- TSDB = "mm-infra-tsdb"
256
-
257
-
258
253
  class ProjectSecretKeys:
259
254
  ACCESS_KEY = "MODEL_MONITORING_ACCESS_KEY"
260
255
  TSDB_PROFILE_NAME = "TSDB_PROFILE_NAME"
@@ -473,8 +468,8 @@ FQN_REGEX = re.compile(FQN_PATTERN)
473
468
 
474
469
  # refer to `mlrun.utils.regex.project_name`
475
470
  PROJECT_PATTERN = r"^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
476
-
477
471
  MODEL_ENDPOINT_ID_PATTERN = r"^[a-zA-Z0-9_-]+$"
472
+ RESULT_NAME_PATTERN = r"[a-zA-Z_][a-zA-Z0-9_]*"
478
473
 
479
474
  INTERSECT_DICT_KEYS = {
480
475
  ModelEndpointMonitoringMetricType.METRIC: "intersect_metrics",
@@ -46,14 +46,20 @@ class GrafanaTable(BaseModel):
46
46
  self.rows.append(list(args))
47
47
 
48
48
 
49
- class GrafanaDataPoint(BaseModel):
50
- value: float
51
- timestamp: int # Unix timestamp in milliseconds
52
-
53
-
54
- class GrafanaTimeSeriesTarget(BaseModel):
55
- target: str
56
- datapoints: list[tuple[float, int]] = []
57
-
58
- def add_data_point(self, data_point: GrafanaDataPoint):
59
- self.datapoints.append((data_point.value, data_point.timestamp))
49
+ class GrafanaModelEndpointsTable(GrafanaTable):
50
+ def __init__(self):
51
+ columns = self._init_columns()
52
+ super().__init__(columns=columns)
53
+
54
+ @staticmethod
55
+ def _init_columns():
56
+ return [
57
+ GrafanaColumn(text="endpoint_id", type=GrafanaColumnType.STRING),
58
+ GrafanaColumn(text="endpoint_name", type=GrafanaColumnType.STRING),
59
+ GrafanaColumn(text="endpoint_function", type=GrafanaColumnType.STRING),
60
+ GrafanaColumn(text="endpoint_model", type=GrafanaColumnType.STRING),
61
+ GrafanaColumn(text="endpoint_model_class", type=GrafanaColumnType.STRING),
62
+ GrafanaColumn(text="error_count", type=GrafanaColumnType.NUMBER),
63
+ GrafanaColumn(text="drift_status", type=GrafanaColumnType.NUMBER),
64
+ GrafanaColumn(text="sampling_percentage", type=GrafanaColumnType.NUMBER),
65
+ ]
mlrun/config.py CHANGED
@@ -1366,35 +1366,6 @@ class Config:
1366
1366
  ver in mlrun.mlconf.ce.mode for ver in ["lite", "full"]
1367
1367
  )
1368
1368
 
1369
- def get_s3_storage_options(self) -> dict[str, typing.Any]:
1370
- """
1371
- Generate storage options dictionary as required for handling S3 path in fsspec. The model monitoring stream
1372
- graph uses this method for generating the storage options for S3 parquet target path.
1373
- :return: A storage options dictionary in which each key-value pair represents a particular configuration,
1374
- such as endpoint_url or aws access key.
1375
- """
1376
- key = mlrun.get_secret_or_env("AWS_ACCESS_KEY_ID")
1377
- secret = mlrun.get_secret_or_env("AWS_SECRET_ACCESS_KEY")
1378
-
1379
- force_non_anonymous = mlrun.get_secret_or_env("S3_NON_ANONYMOUS")
1380
- profile = mlrun.get_secret_or_env("AWS_PROFILE")
1381
-
1382
- storage_options = dict(
1383
- anon=not (force_non_anonymous or (key and secret)),
1384
- key=key,
1385
- secret=secret,
1386
- )
1387
-
1388
- endpoint_url = mlrun.get_secret_or_env("S3_ENDPOINT_URL")
1389
- if endpoint_url:
1390
- client_kwargs = {"endpoint_url": endpoint_url}
1391
- storage_options["client_kwargs"] = client_kwargs
1392
-
1393
- if profile:
1394
- storage_options["profile"] = profile
1395
-
1396
- return storage_options
1397
-
1398
1369
  def is_explicit_ack_enabled(self) -> bool:
1399
1370
  return self.httpdb.nuclio.explicit_ack == "enabled" and (
1400
1371
  not self.nuclio_version
@@ -1402,13 +1373,6 @@ class Config:
1402
1373
  >= semver.VersionInfo.parse("1.12.10")
1403
1374
  )
1404
1375
 
1405
- @staticmethod
1406
- def get_ordered_keys():
1407
- # Define the keys to process first
1408
- return [
1409
- "MLRUN_HTTPDB__HTTP__VERIFY" # Ensure this key is processed first for proper connection setup
1410
- ]
1411
-
1412
1376
 
1413
1377
  # Global configuration
1414
1378
  config = Config.from_dict(default_config)
@@ -1626,6 +1590,15 @@ def read_env(env=None, prefix=env_prefix):
1626
1590
  # The default function pod resource values are of type str; however, when reading from environment variable numbers,
1627
1591
  # it converts them to type int if contains only number, so we want to convert them to str.
1628
1592
  _convert_resources_to_str(config)
1593
+
1594
+ # If the environment variable MLRUN_HTTPDB__HTTP__VERIFY is set, we ensure SSL verification settings take precedence
1595
+ # by moving the 'httpdb' configuration to the beginning of the config dictionary.
1596
+ # This ensures that SSL verification is applied before other settings.
1597
+ if "MLRUN_HTTPDB__HTTP__VERIFY" in env:
1598
+ httpdb = config.pop("httpdb", None)
1599
+ if httpdb:
1600
+ config = {"httpdb": httpdb, **config}
1601
+
1629
1602
  return config
1630
1603
 
1631
1604
 
@@ -42,9 +42,21 @@ def get_url_and_storage_options(path, external_storage_options=None):
42
42
 
43
43
 
44
44
  class TDEngineStoreyTarget(storey.TDEngineTarget):
45
- def __init__(self, *args, **kwargs):
46
- kwargs["url"] = mlrun.model_monitoring.helpers.get_tsdb_connection_string()
47
- super().__init__(*args, **kwargs)
45
+ def __init__(self, *args, url: str, **kwargs):
46
+ if url.startswith("ds://"):
47
+ datastore_profile = (
48
+ mlrun.datastore.datastore_profile.datastore_profile_read(url)
49
+ )
50
+ if not isinstance(
51
+ datastore_profile,
52
+ mlrun.datastore.datastore_profile.TDEngineDatastoreProfile,
53
+ ):
54
+ raise ValueError(
55
+ f"Unexpected datastore profile type:{datastore_profile.type}."
56
+ "Only TDEngineDatastoreProfile is supported"
57
+ )
58
+ url = datastore_profile.dsn()
59
+ super().__init__(*args, url=url, **kwargs)
48
60
 
49
61
 
50
62
  class StoreyTargetUtils:
@@ -69,7 +81,12 @@ class StoreyTargetUtils:
69
81
 
70
82
  class ParquetStoreyTarget(storey.ParquetTarget):
71
83
  def __init__(self, *args, **kwargs):
84
+ alt_key_name = kwargs.pop("alternative_v3io_access_key", None)
72
85
  args, kwargs = StoreyTargetUtils.process_args_and_kwargs(args, kwargs)
86
+ storage_options = kwargs.get("storage_options", {})
87
+ if storage_options and storage_options.get("v3io_access_key") and alt_key_name:
88
+ if alt_key := mlrun.get_secret_or_env(alt_key_name):
89
+ storage_options["v3io_access_key"] = alt_key
73
90
  super().__init__(*args, **kwargs)
74
91
 
75
92
 
@@ -95,8 +95,8 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
95
95
  sample_data: Optional[pd.DataFrame] = None,
96
96
  reference_data: Optional[pd.DataFrame] = None,
97
97
  endpoints: Optional[list[tuple[str, str]]] = None,
98
- start: Optional[datetime] = None,
99
- end: Optional[datetime] = None,
98
+ start: Optional[str] = None,
99
+ end: Optional[str] = None,
100
100
  base_period: Optional[int] = None,
101
101
  ):
102
102
  """
@@ -124,7 +124,6 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
124
124
  return self.do_tracking(monitoring_context)
125
125
 
126
126
  if endpoints is not None:
127
- start, end = self._validate_times(start, end, base_period)
128
127
  for window_start, window_end in self._window_generator(
129
128
  start, end, base_period
130
129
  ):
@@ -137,43 +136,40 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
137
136
  mm_constants.ApplicationEvent.END_INFER_TIME: window_end,
138
137
  }
139
138
  )
140
- context.log_result(
141
- f"{endpoint_name}_{window_start.isoformat()}_{window_end.isoformat()}",
142
- result,
139
+ result_key = (
140
+ f"{endpoint_name}_{window_start.isoformat()}_{window_end.isoformat()}"
141
+ if window_start and window_end
142
+ else endpoint_name
143
143
  )
144
+ context.log_result(result_key, result)
144
145
  else:
145
146
  return call_do_tracking()
146
147
 
147
- @staticmethod
148
- def _validate_times(
149
- start: Optional[datetime],
150
- end: Optional[datetime],
151
- base_period: Optional[int],
152
- ) -> tuple[datetime, datetime]:
153
- if (start is None) or (end is None):
154
- raise mlrun.errors.MLRunValueError(
155
- "When `endpoint_names` is provided, you must also pass the start and end times"
156
- )
157
- if (base_period is not None) and not (
158
- isinstance(base_period, int) and base_period > 0
159
- ):
160
- raise mlrun.errors.MLRunValueError(
161
- "`base_period` must be a nonnegative integer - the number of minutes in a monitoring window"
162
- )
163
- return start, end
164
-
165
148
  @staticmethod
166
149
  def _window_generator(
167
- start: datetime, end: datetime, base_period: Optional[int]
168
- ) -> Iterator[tuple[datetime, datetime]]:
150
+ start: Optional[str], end: Optional[str], base_period: Optional[int]
151
+ ) -> Iterator[tuple[Optional[datetime], Optional[datetime]]]:
152
+ if start is None or end is None:
153
+ # A single window based on the `sample_data` input - see `_handler`.
154
+ yield None, None
155
+ return
156
+
157
+ start_dt = datetime.fromisoformat(start)
158
+ end_dt = datetime.fromisoformat(end)
159
+
169
160
  if base_period is None:
170
- yield start, end
161
+ yield start_dt, end_dt
171
162
  return
172
163
 
164
+ if not isinstance(base_period, int) or base_period <= 0:
165
+ raise mlrun.errors.MLRunValueError(
166
+ "`base_period` must be a nonnegative integer - the number of minutes in a monitoring window"
167
+ )
168
+
173
169
  window_length = timedelta(minutes=base_period)
174
- current_start_time = start
175
- while current_start_time < end:
176
- current_end_time = min(current_start_time + window_length, end)
170
+ current_start_time = start_dt
171
+ while current_start_time < end_dt:
172
+ current_end_time = min(current_start_time + window_length, end_dt)
177
173
  yield current_start_time, current_end_time
178
174
  current_start_time = current_end_time
179
175
 
@@ -369,13 +365,25 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
369
365
  :param requirements: List of Python requirements to be installed in the image.
370
366
  :param requirements_file: Path to a Python requirements file to be installed in the image.
371
367
  :param endpoints: A list of tuples of the model endpoint (name, uid) to get the data from.
372
- If provided, you have to provide also the start and end times of the data to analyze.
373
- :param start: The start time of the sample data.
374
- :param end: The end time of the sample data.
368
+ If provided, and ``sample_data`` is not, you have to provide also the ``start`` and
369
+ ``end`` times of the data to analyze from the model endpoints.
370
+ :param start: The start time of the endpoint's data, not included.
371
+ If you want the model endpoint's data at ``start`` included, you need to subtract a
372
+ small ``datetime.timedelta`` from it.
373
+ :param end: The end time of the endpoint's data, included.
374
+ Please note: when ``start`` and ``end`` are set, they create a left-open time interval
375
+ ("window") :math:`(\\text{start}, \\text{end}]` that excludes the endpoint's data at
376
+ ``start`` and includes the data at ``end``:
377
+ :math:`\\text{start} < t \\leq \\text{end}`, :math:`t` is the time taken in the
378
+ window's data.
375
379
  :param base_period: The window length in minutes. If ``None``, the whole window from ``start`` to ``end``
376
380
  is taken. If an integer is specified, the application is run from ``start`` to ``end``
377
381
  in ``base_period`` length windows, except for the last window that ends at ``end`` and
378
- therefore may be shorter.
382
+ therefore may be shorter:
383
+ :math:`(\\text{start}, \\text{start} + \\text{base_period}],
384
+ (\\text{start} + \\text{base_period}, \\text{start} + 2\\cdot\\text{base_period}],
385
+ ..., (\\text{start} + m\\cdot\\text{base_period}, \\text{end}]`,
386
+ where :math:`m` is some positive integer.
379
387
 
380
388
  :returns: The output of the
381
389
  :py:meth:`~mlrun.model_monitoring.applications.ModelMonitoringApplicationBase.do_tracking`
@@ -395,16 +403,23 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
395
403
  project=project,
396
404
  )
397
405
 
398
- params: dict[str, Union[list[tuple[str, str]], datetime, int, None]] = {}
406
+ params: dict[str, Union[list[tuple[str, str]], str, int, None]] = {}
399
407
  if endpoints:
400
- start, end = cls._validate_times(start, end, base_period)
401
408
  params["endpoints"] = endpoints
402
- params["start"] = start
403
- params["end"] = end
404
- params["base_period"] = base_period
409
+ if sample_data is None:
410
+ if start is None or end is None:
411
+ raise mlrun.errors.MLRunValueError(
412
+ "`start` and `end` times must be provided when `endpoints` "
413
+ "is provided without `sample_data`"
414
+ )
415
+ params["start"] = (
416
+ start.isoformat() if isinstance(start, datetime) else start
417
+ )
418
+ params["end"] = end.isoformat() if isinstance(end, datetime) else end
419
+ params["base_period"] = base_period
405
420
  elif start or end or base_period:
406
421
  raise mlrun.errors.MLRunValueError(
407
- "Custom start and end times or base_period are supported only with endpoints data"
422
+ "Custom `start` and `end` times or base_period are supported only with endpoints data"
408
423
  )
409
424
 
410
425
  inputs: dict[str, str] = {}
@@ -33,10 +33,10 @@ class _ModelMonitoringApplicationDataRes(ABC):
33
33
  name: str
34
34
 
35
35
  def __post_init__(self):
36
- pat = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
36
+ pat = re.compile(mm_constants.RESULT_NAME_PATTERN)
37
37
  if not re.fullmatch(pat, self.name):
38
38
  raise mlrun.errors.MLRunValueError(
39
- "Attribute name must comply with the regex `[a-zA-Z_][a-zA-Z0-9_]*`"
39
+ f"Attribute name must comply with the regex `{mm_constants.RESULT_NAME_PATTERN}`"
40
40
  )
41
41
 
42
42
  @abstractmethod
@@ -250,9 +250,10 @@ class MonitoringApplicationController:
250
250
 
251
251
  self.model_monitoring_access_key = self._get_model_monitoring_access_key()
252
252
  self.v3io_access_key = mlrun.mlconf.get_v3io_access_key()
253
- self.storage_options = None
254
- if mlrun.mlconf.artifact_path.startswith("s3://"):
255
- self.storage_options = mlrun.mlconf.get_s3_storage_options()
253
+ store, _, _ = mlrun.store_manager.get_or_create_store(
254
+ mlrun.mlconf.artifact_path
255
+ )
256
+ self.storage_options = store.get_storage_options()
256
257
 
257
258
  @staticmethod
258
259
  def _get_model_monitoring_access_key() -> Optional[str]:
@@ -19,6 +19,7 @@ import mlrun.common.schemas.secret
19
19
  import mlrun.datastore.datastore_profile
20
20
  import mlrun.errors
21
21
  import mlrun.model_monitoring.helpers
22
+ from mlrun.datastore.datastore_profile import DatastoreProfile
22
23
 
23
24
  from .base import TSDBConnector
24
25
 
@@ -29,10 +30,13 @@ class ObjectTSDBFactory(enum.Enum):
29
30
  v3io_tsdb = "v3io-tsdb"
30
31
  tdengine = "tdengine"
31
32
 
32
- def to_tsdb_connector(self, project: str, **kwargs) -> TSDBConnector:
33
+ def to_tsdb_connector(
34
+ self, project: str, profile: DatastoreProfile, **kwargs
35
+ ) -> TSDBConnector:
33
36
  """
34
37
  Return a TSDBConnector object based on the provided enum value.
35
38
  :param project: The name of the project.
39
+ :param profile: Datastore profile containing DSN and credentials for TSDB connection
36
40
  :return: `TSDBConnector` object.
37
41
  """
38
42
 
@@ -51,7 +55,7 @@ class ObjectTSDBFactory(enum.Enum):
51
55
 
52
56
  from .tdengine.tdengine_connector import TDEngineConnector
53
57
 
54
- return TDEngineConnector(project=project, **kwargs)
58
+ return TDEngineConnector(project=project, profile=profile, **kwargs)
55
59
 
56
60
  @classmethod
57
61
  def _missing_(cls, value: typing.Any):
@@ -87,12 +91,10 @@ def get_tsdb_connector(
87
91
  kwargs = {}
88
92
  if isinstance(profile, mlrun.datastore.datastore_profile.DatastoreProfileV3io):
89
93
  tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.V3IO_TSDB
90
- kwargs["v3io_access_key"] = profile.v3io_access_key
91
94
  elif isinstance(
92
95
  profile, mlrun.datastore.datastore_profile.TDEngineDatastoreProfile
93
96
  ):
94
97
  tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.TDEngine
95
- kwargs["connection_string"] = profile.dsn()
96
98
  else:
97
99
  extra_message = (
98
100
  ""
@@ -109,4 +111,6 @@ def get_tsdb_connector(
109
111
  tsdb_connector_factory = ObjectTSDBFactory(tsdb_connector_type)
110
112
 
111
113
  # Convert into TSDB connector object
112
- return tsdb_connector_factory.to_tsdb_connector(project=project, **kwargs)
114
+ return tsdb_connector_factory.to_tsdb_connector(
115
+ project=project, profile=profile, **kwargs
116
+ )