mlrun 1.7.1__py3-none-any.whl → 1.7.1rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -21,7 +21,6 @@ from .base import ObjectFormat
21
21
 
22
22
 
23
23
  class ArtifactFormat(ObjectFormat, mlrun.common.types.StrEnum):
24
- full = "full"
25
24
  minimal = "minimal"
26
25
 
27
26
  @staticmethod
@@ -28,17 +28,6 @@ class FeatureSetFormat(ObjectFormat, mlrun.common.types.StrEnum):
28
28
  return {
29
29
  FeatureSetFormat.full: None,
30
30
  FeatureSetFormat.minimal: FeatureSetFormat.filter_obj_method(
31
- [
32
- "metadata.name",
33
- "metadata.project",
34
- "metadata.tag",
35
- "metadata.uid",
36
- "metadata.labels",
37
- "spec.entities",
38
- "spec.description",
39
- "spec.targets",
40
- "spec.engine", # It's not needed by the UI, but we override it anyway to storey if empty
41
- "status.state",
42
- ]
31
+ ["kind", "metadata", "spec", "status.state"]
43
32
  ),
44
33
  }[_format]
mlrun/db/httpdb.py CHANGED
@@ -1075,9 +1075,7 @@ class HTTPRunDB(RunDBInterface):
1075
1075
  category: Union[str, mlrun.common.schemas.ArtifactCategories] = None,
1076
1076
  tree: str = None,
1077
1077
  producer_uri: str = None,
1078
- format_: Optional[
1079
- mlrun.common.formatters.ArtifactFormat
1080
- ] = mlrun.common.formatters.ArtifactFormat.full,
1078
+ format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
1081
1079
  limit: int = None,
1082
1080
  ) -> ArtifactList:
1083
1081
  """List artifacts filtered by various parameters.
@@ -23,7 +23,7 @@ import mlrun.model_monitoring.applications.base as mm_base
23
23
  import mlrun.model_monitoring.applications.context as mm_context
24
24
  from mlrun.errors import MLRunIncompatibleVersionError
25
25
 
26
- SUPPORTED_EVIDENTLY_VERSION = semver.Version.parse("0.4.39")
26
+ SUPPORTED_EVIDENTLY_VERSION = semver.Version.parse("0.4.32")
27
27
 
28
28
 
29
29
  def _check_evidently_version(*, cur: semver.Version, ref: semver.Version) -> None:
@@ -82,10 +82,9 @@ class TDEngineSchema:
82
82
  super_table: str,
83
83
  columns: dict[str, _TDEngineColumn],
84
84
  tags: dict[str, str],
85
- project: str,
86
85
  database: Optional[str] = None,
87
86
  ):
88
- self.super_table = f"{super_table}_{project.replace('-', '_')}"
87
+ self.super_table = super_table
89
88
  self.columns = columns
90
89
  self.tags = tags
91
90
  self.database = database or _MODEL_MONITORING_DATABASE
@@ -149,9 +148,6 @@ class TDEngineSchema:
149
148
  ) -> str:
150
149
  return f"DROP TABLE if EXISTS {self.database}.{subtable};"
151
150
 
152
- def drop_supertable_query(self) -> str:
153
- return f"DROP STABLE if EXISTS {self.database}.{self.super_table};"
154
-
155
151
  def _get_subtables_query(
156
152
  self,
157
153
  values: dict[str, Union[str, int, float, datetime.datetime]],
@@ -231,7 +227,7 @@ class TDEngineSchema:
231
227
 
232
228
  @dataclass
233
229
  class AppResultTable(TDEngineSchema):
234
- def __init__(self, project: str, database: Optional[str] = None):
230
+ def __init__(self, database: Optional[str] = None):
235
231
  super_table = mm_schemas.TDEngineSuperTables.APP_RESULTS
236
232
  columns = {
237
233
  mm_schemas.WriterEvent.END_INFER_TIME: _TDEngineColumn.TIMESTAMP,
@@ -240,23 +236,18 @@ class AppResultTable(TDEngineSchema):
240
236
  mm_schemas.ResultData.RESULT_STATUS: _TDEngineColumn.INT,
241
237
  }
242
238
  tags = {
239
+ mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
243
240
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
244
241
  mm_schemas.WriterEvent.APPLICATION_NAME: _TDEngineColumn.BINARY_64,
245
242
  mm_schemas.ResultData.RESULT_NAME: _TDEngineColumn.BINARY_64,
246
243
  mm_schemas.ResultData.RESULT_KIND: _TDEngineColumn.INT,
247
244
  }
248
- super().__init__(
249
- super_table=super_table,
250
- columns=columns,
251
- tags=tags,
252
- database=database,
253
- project=project,
254
- )
245
+ super().__init__(super_table, columns, tags, database)
255
246
 
256
247
 
257
248
  @dataclass
258
249
  class Metrics(TDEngineSchema):
259
- def __init__(self, project: str, database: Optional[str] = None):
250
+ def __init__(self, database: Optional[str] = None):
260
251
  super_table = mm_schemas.TDEngineSuperTables.METRICS
261
252
  columns = {
262
253
  mm_schemas.WriterEvent.END_INFER_TIME: _TDEngineColumn.TIMESTAMP,
@@ -264,22 +255,17 @@ class Metrics(TDEngineSchema):
264
255
  mm_schemas.MetricData.METRIC_VALUE: _TDEngineColumn.FLOAT,
265
256
  }
266
257
  tags = {
258
+ mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
267
259
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
268
260
  mm_schemas.WriterEvent.APPLICATION_NAME: _TDEngineColumn.BINARY_64,
269
261
  mm_schemas.MetricData.METRIC_NAME: _TDEngineColumn.BINARY_64,
270
262
  }
271
- super().__init__(
272
- super_table=super_table,
273
- columns=columns,
274
- tags=tags,
275
- database=database,
276
- project=project,
277
- )
263
+ super().__init__(super_table, columns, tags, database)
278
264
 
279
265
 
280
266
  @dataclass
281
267
  class Predictions(TDEngineSchema):
282
- def __init__(self, project: str, database: Optional[str] = None):
268
+ def __init__(self, database: Optional[str] = None):
283
269
  super_table = mm_schemas.TDEngineSuperTables.PREDICTIONS
284
270
  columns = {
285
271
  mm_schemas.EventFieldType.TIME: _TDEngineColumn.TIMESTAMP,
@@ -287,12 +273,7 @@ class Predictions(TDEngineSchema):
287
273
  mm_schemas.EventKeyMetrics.CUSTOM_METRICS: _TDEngineColumn.BINARY_10000,
288
274
  }
289
275
  tags = {
276
+ mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
290
277
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
291
278
  }
292
- super().__init__(
293
- super_table=super_table,
294
- columns=columns,
295
- tags=tags,
296
- database=database,
297
- project=project,
298
- )
279
+ super().__init__(super_table, columns, tags, database)
@@ -82,13 +82,13 @@ class TDEngineConnector(TSDBConnector):
82
82
  """Initialize the super tables for the TSDB."""
83
83
  self.tables = {
84
84
  mm_schemas.TDEngineSuperTables.APP_RESULTS: tdengine_schemas.AppResultTable(
85
- project=self.project, database=self.database
85
+ self.database
86
86
  ),
87
87
  mm_schemas.TDEngineSuperTables.METRICS: tdengine_schemas.Metrics(
88
- project=self.project, database=self.database
88
+ self.database
89
89
  ),
90
90
  mm_schemas.TDEngineSuperTables.PREDICTIONS: tdengine_schemas.Predictions(
91
- project=self.project, database=self.database
91
+ self.database
92
92
  ),
93
93
  }
94
94
 
@@ -112,9 +112,11 @@ class TDEngineConnector(TSDBConnector):
112
112
  """
113
113
 
114
114
  table_name = (
115
+ f"{self.project}_"
115
116
  f"{event[mm_schemas.WriterEvent.ENDPOINT_ID]}_"
116
- f"{event[mm_schemas.WriterEvent.APPLICATION_NAME]}"
117
+ f"{event[mm_schemas.WriterEvent.APPLICATION_NAME]}_"
117
118
  )
119
+ event[mm_schemas.EventFieldType.PROJECT] = self.project
118
120
 
119
121
  if kind == mm_schemas.WriterEventKind.RESULT:
120
122
  # Write a new result
@@ -185,9 +187,7 @@ class TDEngineConnector(TSDBConnector):
185
187
  name=name,
186
188
  after=after,
187
189
  url=self._tdengine_connection_string,
188
- supertable=self.tables[
189
- mm_schemas.TDEngineSuperTables.PREDICTIONS
190
- ].super_table,
190
+ supertable=mm_schemas.TDEngineSuperTables.PREDICTIONS,
191
191
  table_col=mm_schemas.EventFieldType.TABLE_COLUMN,
192
192
  time_col=mm_schemas.EventFieldType.TIME,
193
193
  database=self.database,
@@ -220,23 +220,22 @@ class TDEngineConnector(TSDBConnector):
220
220
  "Deleting all project resources using the TDEngine connector",
221
221
  project=self.project,
222
222
  )
223
- drop_statements = []
224
223
  for table in self.tables:
225
- drop_statements.append(self.tables[table].drop_supertable_query())
226
-
227
- try:
228
- self.connection.run(
229
- statements=drop_statements,
224
+ get_subtable_names_query = self.tables[table]._get_subtables_query(
225
+ values={mm_schemas.EventFieldType.PROJECT: self.project}
226
+ )
227
+ subtables = self.connection.run(
228
+ query=get_subtable_names_query,
230
229
  timeout=self._timeout,
231
230
  retries=self._retries,
232
- )
233
- except Exception as e:
234
- logger.warning(
235
- "Failed to drop TDEngine tables. You may need to drop them manually. "
236
- "These can be found under the following supertables: app_results, "
237
- "metrics, and predictions.",
238
- project=self.project,
239
- error=mlrun.errors.err_to_str(e),
231
+ ).data
232
+ drop_statements = []
233
+ for subtable in subtables:
234
+ drop_statements.append(
235
+ self.tables[table]._drop_subtable_query(subtable=subtable[0])
236
+ )
237
+ self.connection.run(
238
+ statements=drop_statements, timeout=self._timeout, retries=self._retries
240
239
  )
241
240
  logger.debug(
242
241
  "Deleted all project resources using the TDEngine connector",
@@ -289,6 +288,13 @@ class TDEngineConnector(TSDBConnector):
289
288
  :raise: MLRunInvalidArgumentError if query the provided table failed.
290
289
  """
291
290
 
291
+ project_condition = f"project = '{self.project}'"
292
+ filter_query = (
293
+ f"({filter_query}) AND ({project_condition})"
294
+ if filter_query
295
+ else project_condition
296
+ )
297
+
292
298
  full_query = tdengine_schemas.TDEngineSchema._get_records_query(
293
299
  table=table,
294
300
  start=start,
@@ -340,12 +346,12 @@ class TDEngineConnector(TSDBConnector):
340
346
  timestamp_column = mm_schemas.WriterEvent.END_INFER_TIME
341
347
  columns = [timestamp_column, mm_schemas.WriterEvent.APPLICATION_NAME]
342
348
  if type == "metrics":
343
- table = self.tables[mm_schemas.TDEngineSuperTables.METRICS].super_table
349
+ table = mm_schemas.TDEngineSuperTables.METRICS
344
350
  name = mm_schemas.MetricData.METRIC_NAME
345
351
  columns += [name, mm_schemas.MetricData.METRIC_VALUE]
346
352
  df_handler = self.df_to_metrics_values
347
353
  elif type == "results":
348
- table = self.tables[mm_schemas.TDEngineSuperTables.APP_RESULTS].super_table
354
+ table = mm_schemas.TDEngineSuperTables.APP_RESULTS
349
355
  name = mm_schemas.ResultData.RESULT_NAME
350
356
  columns += [
351
357
  name,
@@ -411,7 +417,7 @@ class TDEngineConnector(TSDBConnector):
411
417
  "both or neither of `aggregation_window` and `agg_funcs` must be provided"
412
418
  )
413
419
  df = self._get_records(
414
- table=self.tables[mm_schemas.TDEngineSuperTables.PREDICTIONS].super_table,
420
+ table=mm_schemas.TDEngineSuperTables.PREDICTIONS,
415
421
  start=start,
416
422
  end=end,
417
423
  columns=[mm_schemas.EventFieldType.LATENCY],
@@ -104,32 +104,32 @@ class OutputStream:
104
104
  self._mock = mock
105
105
  self._mock_queue = []
106
106
 
107
- def create_stream(self):
108
- # this import creates an import loop via the utils module, so putting it in execution path
109
- from mlrun.utils.helpers import logger
110
-
111
- logger.debug(
112
- "Creating output stream",
113
- endpoint=self._endpoint,
114
- container=self._container,
115
- stream_path=self._stream_path,
116
- shards=self._shards,
117
- retention_in_hours=self._retention_in_hours,
118
- )
119
- response = self._v3io_client.stream.create(
120
- container=self._container,
121
- stream_path=self._stream_path,
122
- shard_count=self._shards or 1,
123
- retention_period_hours=self._retention_in_hours or 24,
124
- raise_for_status=v3io.dataplane.RaiseForStatus.never,
125
- )
126
- if not (response.status_code == 400 and "ResourceInUse" in str(response.body)):
127
- response.raise_for_status([409, 204])
128
-
129
107
  def _lazy_init(self):
130
108
  if self._create and not self._mock:
109
+ # this import creates an import loop via the utils module, so putting it in execution path
110
+ from mlrun.utils.helpers import logger
111
+
131
112
  self._create = False
132
- self.create_stream()
113
+
114
+ logger.debug(
115
+ "Creating output stream",
116
+ endpoint=self._endpoint,
117
+ container=self._container,
118
+ stream_path=self._stream_path,
119
+ shards=self._shards,
120
+ retention_in_hours=self._retention_in_hours,
121
+ )
122
+ response = self._v3io_client.stream.create(
123
+ container=self._container,
124
+ stream_path=self._stream_path,
125
+ shard_count=self._shards or 1,
126
+ retention_period_hours=self._retention_in_hours or 24,
127
+ raise_for_status=v3io.dataplane.RaiseForStatus.never,
128
+ )
129
+ if not (
130
+ response.status_code == 400 and "ResourceInUse" in str(response.body)
131
+ ):
132
+ response.raise_for_status([409, 204])
133
133
 
134
134
  def push(self, data, partition_key=None):
135
135
  self._lazy_init()
@@ -27,12 +27,7 @@ __all__ = [
27
27
  ]
28
28
 
29
29
  from .operations import build_function, deploy_function, run_function # noqa
30
- from .pipelines import (
31
- import_remote_project,
32
- load_and_run_workflow,
33
- load_and_run,
34
- pipeline_context,
35
- ) # noqa
30
+ from .pipelines import load_and_run, pipeline_context # noqa
36
31
  from .project import (
37
32
  MlrunProject,
38
33
  ProjectMetadata,
@@ -984,25 +984,14 @@ def github_webhook(request):
984
984
  return {"msg": "pushed"}
985
985
 
986
986
 
987
- def load_and_run(context, *args, **kwargs):
988
- """
989
- This function serves as an alias to `load_and_run_workflow`,
990
- allowing to continue using `load_and_run` without modifying existing workflows or exported runs.
991
- This approach ensures backward compatibility,
992
- while directing all new calls to the updated `load_and_run_workflow` function.
993
- """
994
- kwargs.pop("load_only", None)
995
- kwargs.pop("save", None)
996
- load_and_run_workflow(context, *args, **kwargs)
997
-
998
-
999
- def load_and_run_workflow(
987
+ def load_and_run(
1000
988
  context: mlrun.execution.MLClientCtx,
1001
989
  url: str = None,
1002
990
  project_name: str = "",
1003
991
  init_git: bool = None,
1004
992
  subpath: str = None,
1005
993
  clone: bool = False,
994
+ save: bool = True,
1006
995
  workflow_name: str = None,
1007
996
  workflow_path: str = None,
1008
997
  workflow_arguments: dict[str, typing.Any] = None,
@@ -1015,12 +1004,14 @@ def load_and_run_workflow(
1015
1004
  local: bool = None,
1016
1005
  schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None,
1017
1006
  cleanup_ttl: int = None,
1007
+ load_only: bool = False,
1018
1008
  wait_for_completion: bool = False,
1019
1009
  project_context: str = None,
1020
1010
  ):
1021
1011
  """
1022
1012
  Auxiliary function that the RemoteRunner run once or run every schedule.
1023
1013
  This function loads a project from a given remote source and then runs the workflow.
1014
+
1024
1015
  :param context: mlrun context.
1025
1016
  :param url: remote url that represents the project's source.
1026
1017
  See 'mlrun.load_project()' for details
@@ -1028,6 +1019,7 @@ def load_and_run_workflow(
1028
1019
  :param init_git: if True, will git init the context dir
1029
1020
  :param subpath: project subpath (within the archive)
1030
1021
  :param clone: if True, always clone (delete any existing content)
1022
+ :param save: whether to save the created project and artifact in the DB
1031
1023
  :param workflow_name: name of the workflow
1032
1024
  :param workflow_path: url to a workflow file, if not a project workflow
1033
1025
  :param workflow_arguments: kubeflow pipelines arguments (parameters)
@@ -1043,31 +1035,48 @@ def load_and_run_workflow(
1043
1035
  :param schedule: ScheduleCronTrigger class instance or a standard crontab expression string
1044
1036
  :param cleanup_ttl: pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the
1045
1037
  workflow and all its resources are deleted)
1038
+ :param load_only: for just loading the project, inner use.
1046
1039
  :param wait_for_completion: wait for workflow completion before returning
1047
1040
  :param project_context: project context path (used for loading the project)
1048
1041
  """
1049
- project_context = project_context or f"./{project_name}"
1050
-
1051
- # Load the project to fetch files which the runner needs, such as remote source files
1052
- pull_remote_project_files(
1053
- context=context,
1054
- project_context=project_context,
1055
- url=url,
1056
- project_name=project_name,
1057
- init_git=init_git,
1058
- subpath=subpath,
1059
- clone=clone,
1060
- schedule=schedule,
1061
- workflow_name=workflow_name,
1062
- )
1042
+ try:
1043
+ project = mlrun.load_project(
1044
+ context=project_context or f"./{project_name}",
1045
+ url=url,
1046
+ name=project_name,
1047
+ init_git=init_git,
1048
+ subpath=subpath,
1049
+ clone=clone,
1050
+ save=save,
1051
+ sync_functions=True,
1052
+ )
1053
+ except Exception as error:
1054
+ if schedule:
1055
+ notification_pusher = mlrun.utils.notifications.CustomNotificationPusher(
1056
+ ["slack"]
1057
+ )
1058
+ url = get_ui_url(project_name, context.uid)
1059
+ link = f"<{url}|*view workflow job details*>"
1060
+ message = (
1061
+ f":x: Failed to run scheduled workflow {workflow_name} in Project {project_name} !\n"
1062
+ f"error: ```{error}```\n{link}"
1063
+ )
1064
+ # Sending Slack Notification without losing the original error:
1065
+ try:
1066
+ notification_pusher.push(
1067
+ message=message,
1068
+ severity=mlrun.common.schemas.NotificationSeverity.ERROR,
1069
+ )
1063
1070
 
1064
- # Retrieve the project object:
1065
- # - If the project exists in the MLRun database, it will be loaded from there.
1066
- # - If it doesn't exist in the database, it will be created from the previously loaded local directory.
1067
- project = mlrun.get_or_create_project(
1068
- context=project_context or f"./{project_name}",
1069
- name=project_name,
1070
- )
1071
+ except Exception as exc:
1072
+ logger.error("Failed to send slack notification", exc=err_to_str(exc))
1073
+
1074
+ raise error
1075
+
1076
+ context.logger.info(f"Loaded project {project.name} successfully")
1077
+
1078
+ if load_only:
1079
+ return
1071
1080
 
1072
1081
  # extract "start" notification if exists
1073
1082
  start_notifications = [
@@ -1100,156 +1109,18 @@ def load_and_run_workflow(
1100
1109
  raise RuntimeError(f"Workflow {workflow_log_message} failed") from run.exc
1101
1110
 
1102
1111
  if wait_for_completion:
1103
- handle_workflow_completion(
1104
- run=run,
1105
- project=project,
1106
- context=context,
1107
- workflow_log_message=workflow_log_message,
1108
- )
1109
-
1110
-
1111
- def pull_remote_project_files(
1112
- context: mlrun.execution.MLClientCtx,
1113
- project_context: str,
1114
- url: str,
1115
- project_name: str,
1116
- init_git: typing.Optional[bool],
1117
- subpath: typing.Optional[str],
1118
- clone: bool,
1119
- schedule: typing.Optional[
1120
- typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger]
1121
- ],
1122
- workflow_name: typing.Optional[str],
1123
- ) -> None:
1124
- """
1125
- Load the project to clone remote files if they exist.
1126
- If an exception occurs during project loading, send a notification if the workflow is scheduled.
1127
-
1128
- :param context: MLRun execution context.
1129
- :param project_context: Path to the project context.
1130
- :param url: URL of the project repository.
1131
- :param project_name: Name of the project.
1132
- :param init_git: Initialize a git repository.
1133
- :param subpath: Project subpath within the repository.
1134
- :param clone: Whether to clone the repository.
1135
- :param schedule: Schedule for running the workflow.
1136
- :param workflow_name: Name of the workflow to run.
1137
- """
1138
- try:
1139
- # Load the project to clone remote files if they exist.
1140
- # Using save=False to avoid overriding changes from the database if it already exists.
1141
- mlrun.load_project(
1142
- context=project_context,
1143
- url=url,
1144
- name=project_name,
1145
- init_git=init_git,
1146
- subpath=subpath,
1147
- clone=clone,
1148
- save=False,
1149
- )
1150
- except Exception as error:
1151
- notify_scheduled_workflow_failure(
1152
- schedule=schedule,
1153
- project_name=project_name,
1154
- workflow_name=workflow_name,
1155
- error=error,
1156
- context_uid=context.uid,
1157
- )
1158
- raise error
1159
-
1160
-
1161
- def notify_scheduled_workflow_failure(
1162
- schedule,
1163
- project_name: str,
1164
- workflow_name: str,
1165
- error: Exception,
1166
- context_uid: str,
1167
- ) -> None:
1168
- if schedule:
1169
- notification_pusher = mlrun.utils.notifications.CustomNotificationPusher(
1170
- ["slack"]
1171
- )
1172
- url = get_ui_url(project_name, context_uid)
1173
- link = f"<{url}|*view workflow job details*>"
1174
- message = (
1175
- f":x: Failed to run scheduled workflow {workflow_name} "
1176
- f"in Project {project_name}!\n"
1177
- f"Error: ```{err_to_str(error)}```\n{link}"
1178
- )
1179
- # Sending Slack Notification without losing the original error:
1180
1112
  try:
1181
- notification_pusher.push(
1182
- message=message,
1183
- severity=mlrun.common.schemas.NotificationSeverity.ERROR,
1184
- )
1185
-
1113
+ run.wait_for_completion()
1186
1114
  except Exception as exc:
1187
- logger.error("Failed to send slack notification", exc=err_to_str(exc))
1188
-
1189
-
1190
- def handle_workflow_completion(
1191
- run: _PipelineRunStatus,
1192
- project,
1193
- context: mlrun.execution.MLClientCtx,
1194
- workflow_log_message: str,
1195
- ) -> None:
1196
- """
1197
- Handle workflow completion by waiting for it to finish and logging the final state.
1198
-
1199
- :param run: Run object containing workflow execution details.
1200
- :param project: MLRun project object.
1201
- :param context: MLRun execution context.
1202
- :param workflow_log_message: Message used for logging.
1203
- """
1204
- try:
1205
- run.wait_for_completion()
1206
- except Exception as exc:
1207
- mlrun.utils.logger.error(
1208
- "Failed waiting for workflow completion",
1209
- workflow=workflow_log_message,
1210
- exc=err_to_str(exc),
1211
- )
1212
-
1213
- pipeline_state, _, _ = project.get_run_status(run)
1214
- context.log_result(key="workflow_state", value=pipeline_state, commit=True)
1215
- if pipeline_state != mlrun_pipelines.common.models.RunStatuses.succeeded:
1216
- raise RuntimeError(
1217
- f"Workflow {workflow_log_message} failed, state={pipeline_state}"
1218
- )
1219
-
1220
-
1221
- def import_remote_project(
1222
- context: mlrun.execution.MLClientCtx,
1223
- url: str = None,
1224
- project_name: str = "",
1225
- init_git: bool = None,
1226
- subpath: str = None,
1227
- clone: bool = False,
1228
- save: bool = True,
1229
- project_context: str = None,
1230
- ):
1231
- """
1232
- This function loads a project from a given remote source.
1233
-
1234
- :param context: mlrun context.
1235
- :param url: remote url that represents the project's source.
1236
- See 'mlrun.load_project()' for details
1237
- :param project_name: project name
1238
- :param init_git: if True, will git init the context dir
1239
- :param subpath: project subpath (within the archive)
1240
- :param clone: if True, always clone (delete any existing content)
1241
- :param save: whether to save the created project and artifact in the DB
1242
- :param project_context: project context path (used for loading the project)
1243
- """
1244
- project = mlrun.load_project(
1245
- context=project_context or f"./{project_name}",
1246
- url=url,
1247
- name=project_name,
1248
- init_git=init_git,
1249
- subpath=subpath,
1250
- clone=clone,
1251
- save=save,
1252
- sync_functions=True,
1253
- )
1115
+ logger.error(
1116
+ "Failed waiting for workflow completion",
1117
+ workflow=workflow_log_message,
1118
+ exc=err_to_str(exc),
1119
+ )
1254
1120
 
1255
- context.logger.info(f"Loaded project {project.name} successfully")
1121
+ pipeline_state, _, _ = project.get_run_status(run)
1122
+ context.log_result(key="workflow_state", value=pipeline_state, commit=True)
1123
+ if pipeline_state != mlrun_pipelines.common.models.RunStatuses.succeeded:
1124
+ raise RuntimeError(
1125
+ f"Workflow {workflow_log_message} failed, state={pipeline_state}"
1126
+ )