mlrun 1.6.0rc11__py3-none-any.whl → 1.6.0rc13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

mlrun/projects/project.py CHANGED
@@ -1931,7 +1931,7 @@ class MlrunProject(ModelObj):
1931
1931
 
1932
1932
  return resolved_function_name, function_object, func
1933
1933
 
1934
- def enable_model_monitoring_controller(
1934
+ def enable_model_monitoring(
1935
1935
  self,
1936
1936
  default_controller_image: str = "mlrun/mlrun",
1937
1937
  base_period: int = 10,
@@ -1944,8 +1944,9 @@ class MlrunProject(ModelObj):
1944
1944
  :param default_controller_image: The default image of the model monitoring controller job. Note that the writer
1945
1945
  function, which is a real time nuclio functino, will be deployed with the same
1946
1946
  image. By default, the image is mlrun/mlrun.
1947
- :param base_period: Minutes to determine the frequency in which the model monitoring controller job
1948
- is running. By default, the base period is 5 minutes.
1947
+ :param base_period: The time period in minutes in which the model monitoring controller job
1948
+ runs. By default, the base period is 10 minutes. The schedule for the job
1949
+ will be the following cron expression: "*/{base_period} * * * *".
1949
1950
  :return: model monitoring controller job as a dictionary.
1950
1951
  """
1951
1952
  db = mlrun.db.get_run_db(secrets=self._secrets)
@@ -1955,7 +1956,7 @@ class MlrunProject(ModelObj):
1955
1956
  base_period=base_period,
1956
1957
  )
1957
1958
 
1958
- def disable_model_monitoring_controller(self):
1959
+ def disable_model_monitoring(self):
1959
1960
  db = mlrun.db.get_run_db(secrets=self._secrets)
1960
1961
  db.delete_function(
1961
1962
  project=self.name,
@@ -2981,6 +2982,12 @@ class MlrunProject(ModelObj):
2981
2982
  :param extra_args: A string containing additional builder arguments in the format of command-line options,
2982
2983
  e.g. extra_args="--skip-tls-verify --build-arg A=val"
2983
2984
  """
2985
+ if not overwrite_build_params:
2986
+ # TODO: change overwrite_build_params default to True in 1.8.0
2987
+ warnings.warn(
2988
+ "The `overwrite_build_params` parameter default will change from 'False' to 'True in 1.8.0.",
2989
+ mlrun.utils.OverwriteBuildParamsWarning,
2990
+ )
2984
2991
  default_image_name = mlrun.mlconf.default_project_image_name.format(
2985
2992
  name=self.name
2986
2993
  )
@@ -3052,35 +3059,48 @@ class MlrunProject(ModelObj):
3052
3059
  FutureWarning,
3053
3060
  )
3054
3061
 
3055
- self.build_config(
3056
- image=image,
3057
- set_as_default=set_as_default,
3058
- base_image=base_image,
3059
- commands=commands,
3060
- secret_name=secret_name,
3061
- with_mlrun=with_mlrun,
3062
- requirements=requirements,
3063
- requirements_file=requirements_file,
3064
- overwrite_build_params=overwrite_build_params,
3065
- )
3062
+ if not overwrite_build_params:
3063
+ # TODO: change overwrite_build_params default to True in 1.8.0
3064
+ warnings.warn(
3065
+ "The `overwrite_build_params` parameter default will change from 'False' to 'True in 1.8.0.",
3066
+ mlrun.utils.OverwriteBuildParamsWarning,
3067
+ )
3066
3068
 
3067
- function = mlrun.new_function("mlrun--project--image--builder", kind="job")
3068
-
3069
- build = self.spec.build
3070
- result = self.build_function(
3071
- function=function,
3072
- with_mlrun=build.with_mlrun,
3073
- image=build.image,
3074
- base_image=build.base_image,
3075
- commands=build.commands,
3076
- secret_name=build.secret,
3077
- requirements=build.requirements,
3078
- overwrite_build_params=overwrite_build_params,
3079
- mlrun_version_specifier=mlrun_version_specifier,
3080
- builder_env=builder_env,
3081
- extra_args=extra_args,
3082
- force_build=force_build,
3083
- )
3069
+ # TODO: remove filter once overwrite_build_params default is changed to True in 1.8.0
3070
+ with warnings.catch_warnings():
3071
+ warnings.simplefilter(
3072
+ "ignore", category=mlrun.utils.OverwriteBuildParamsWarning
3073
+ )
3074
+
3075
+ self.build_config(
3076
+ image=image,
3077
+ set_as_default=set_as_default,
3078
+ base_image=base_image,
3079
+ commands=commands,
3080
+ secret_name=secret_name,
3081
+ with_mlrun=with_mlrun,
3082
+ requirements=requirements,
3083
+ requirements_file=requirements_file,
3084
+ overwrite_build_params=overwrite_build_params,
3085
+ )
3086
+
3087
+ function = mlrun.new_function("mlrun--project--image--builder", kind="job")
3088
+
3089
+ build = self.spec.build
3090
+ result = self.build_function(
3091
+ function=function,
3092
+ with_mlrun=build.with_mlrun,
3093
+ image=build.image,
3094
+ base_image=build.base_image,
3095
+ commands=build.commands,
3096
+ secret_name=build.secret,
3097
+ requirements=build.requirements,
3098
+ overwrite_build_params=overwrite_build_params,
3099
+ mlrun_version_specifier=mlrun_version_specifier,
3100
+ builder_env=builder_env,
3101
+ extra_args=extra_args,
3102
+ force_build=force_build,
3103
+ )
3084
3104
 
3085
3105
  try:
3086
3106
  mlrun.db.get_run_db(secrets=self._secrets).delete_function(
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import os
16
+ from ast import FunctionDef, parse, unparse
16
17
  from base64 import b64decode, b64encode
17
18
  from typing import Callable, Dict, List, Optional, Union
18
19
 
@@ -22,61 +23,92 @@ from mlrun.model import HyperParamOptions, RunObject
22
23
  from mlrun.runtimes.kubejob import KubejobRuntime
23
24
 
24
25
 
26
+ def get_log_artifacts_code(runobj: RunObject, task_parameters: dict):
27
+ artifact_json_dir = task_parameters.get(
28
+ "artifact_json_dir",
29
+ mlrun.mlconf.function.databricks.artifact_directory_path,
30
+ )
31
+ artifact_json_path = (
32
+ f"{artifact_json_dir}/mlrun_artifact_{runobj.metadata.uid}.json"
33
+ )
34
+ return (
35
+ log_artifacts_code_template.format(f"/dbfs{artifact_json_path}"),
36
+ artifact_json_path,
37
+ )
38
+
39
+
40
+ def replace_log_artifact_function(code: str, log_artifacts_code: str):
41
+ # user can use a dummy function in oder to avoid edit his code.
42
+ # replace mlrun_log_artifact function if already exist.
43
+ is_replaced = False
44
+ parsed_code = parse(code)
45
+ for node in parsed_code.body:
46
+ if isinstance(node, FunctionDef) and node.name == "mlrun_log_artifact":
47
+ new_function_ast = parse(log_artifacts_code)
48
+ node.args = new_function_ast.body[0].args
49
+ node.body = new_function_ast.body[0].body
50
+ is_replaced = True
51
+ break
52
+ return unparse(parsed_code), is_replaced
53
+
54
+
25
55
  class DatabricksRuntime(KubejobRuntime):
26
56
  kind = "databricks"
27
57
  _is_remote = True
28
58
 
29
- def _get_log_artifacts_code(self, runobj: RunObject, task_parameters: dict):
30
- artifact_json_dir = task_parameters.get(
31
- "artifact_json_dir",
32
- mlrun.mlconf.function.databricks.artifact_directory_path,
33
- )
34
- artifact_json_path = (
35
- f"{artifact_json_dir}/mlrun_artifact_{runobj.metadata.uid}.json"
59
+ @staticmethod
60
+ def _verify_returns(returns):
61
+ # TODO complete returns feature
62
+ if returns:
63
+ raise MLRunInvalidArgumentError(
64
+ "Databricks function does not support returns."
65
+ )
66
+
67
+ def _get_modified_user_code(self, original_handler: str, log_artifacts_code: str):
68
+ encoded_code = (
69
+ self.spec.build.functionSourceCode if hasattr(self.spec, "build") else None
36
70
  )
37
- return (
38
- artifacts_code_template.format(f"/dbfs{artifact_json_path}"),
39
- artifact_json_path,
71
+ if not encoded_code:
72
+ raise ValueError("Databricks function must be provided with user code")
73
+
74
+ decoded_code = b64decode(encoded_code).decode("utf-8")
75
+ decoded_code, is_replaced = replace_log_artifact_function(
76
+ code=decoded_code, log_artifacts_code=log_artifacts_code
40
77
  )
78
+ if is_replaced:
79
+ decoded_code = (
80
+ logger_and_consts_code + _databricks_script_code + decoded_code
81
+ )
82
+ else:
83
+ decoded_code = (
84
+ logger_and_consts_code
85
+ + log_artifacts_code
86
+ + _databricks_script_code
87
+ + decoded_code
88
+ )
89
+ if original_handler:
90
+ decoded_code += f"\nresult = {original_handler}(**handler_arguments)\n"
91
+ decoded_code += _return_artifacts_code
92
+ return b64encode(decoded_code.encode("utf-8")).decode("utf-8")
41
93
 
42
94
  def get_internal_parameters(self, runobj: RunObject):
43
95
  """
44
- Return the internal function code.
96
+ Return the internal function parameters + code.
45
97
  """
46
98
  task_parameters = runobj.spec.parameters.get("task_parameters", {})
47
99
  if "original_handler" in task_parameters:
48
100
  original_handler = task_parameters["original_handler"]
49
101
  else:
50
102
  original_handler = runobj.spec.handler or ""
51
- encoded_code = (
52
- self.spec.build.functionSourceCode if hasattr(self.spec, "build") else None
53
- )
54
- if not encoded_code:
55
- raise ValueError("Databricks function must be provided with user code")
56
- decoded_code = b64decode(encoded_code).decode("utf-8")
57
- artifacts_code, artifact_json_path = self._get_log_artifacts_code(
103
+ log_artifacts_code, artifact_json_path = get_log_artifacts_code(
58
104
  runobj=runobj, task_parameters=task_parameters
59
105
  )
60
- code = artifacts_code + _databricks_script_code + decoded_code
61
- if original_handler:
62
- code += f"\nresult = {original_handler}(**handler_arguments)\n"
63
- code += """\n
64
- default_key_template = 'mlrun_return_value_'
65
- if result:
66
- if isinstance(result, dict):
67
- for key, path in result.items():
68
- mlrun_log_artifact(name=key, path=path)
69
- elif isinstance(result, (list, tuple, set)):
70
- for index, value in enumerate(result):
71
- key = f'{default_key_template}{index+1}'
72
- mlrun_log_artifact(name=key, path=value)
73
- elif isinstance(result, str):
74
- mlrun_log_artifact(name=f'{default_key_template}1', path=result)
75
- else:
76
- mlrun_logger.warning(f'cannot log artifacts with the result of handler function \
77
- - result in unsupported type. {type(result)}')
78
- """
79
- code = b64encode(code.encode("utf-8")).decode("utf-8")
106
+ returns = runobj.spec.returns or []
107
+ self._verify_returns(returns=returns)
108
+ code = self._get_modified_user_code(
109
+ original_handler=original_handler,
110
+ log_artifacts_code=log_artifacts_code,
111
+ )
80
112
  updated_task_parameters = {
81
113
  "original_handler": original_handler,
82
114
  "artifact_json_path": artifact_json_path,
@@ -159,6 +191,22 @@ if result:
159
191
  )
160
192
 
161
193
 
194
+ logger_and_consts_code = """ \n
195
+ import os
196
+ import logging
197
+ mlrun_logger = logging.getLogger('mlrun_logger')
198
+ mlrun_logger.setLevel(logging.DEBUG)
199
+
200
+ mlrun_console_handler = logging.StreamHandler()
201
+ mlrun_console_handler.setLevel(logging.DEBUG)
202
+ mlrun_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
203
+ mlrun_console_handler.setFormatter(mlrun_formatter)
204
+ mlrun_logger.addHandler(mlrun_console_handler)
205
+
206
+ mlrun_default_artifact_template = 'mlrun_return_value_'
207
+ mlrun_artifact_index = 0
208
+ """
209
+
162
210
  _databricks_script_code = """
163
211
 
164
212
  import argparse
@@ -168,16 +216,17 @@ parser.add_argument('handler_arguments')
168
216
  handler_arguments = parser.parse_args().handler_arguments
169
217
  handler_arguments = json.loads(handler_arguments)
170
218
 
171
- """
172
219
 
173
- artifacts_code_template = """\n
174
- import logging
175
- mlrun_logger = logging.getLogger('mlrun_logger')
176
- mlrun_logger.setLevel(logging.DEBUG)
220
+ """
177
221
 
178
- def mlrun_log_artifact(name, path):
179
- if not name or not path:
180
- mlrun_logger.error(f'name and path required for logging an mlrun artifact - {{name}} : {{path}}')
222
+ log_artifacts_code_template = """\n
223
+ def mlrun_log_artifact(name='', path=''):
224
+ global mlrun_artifact_index
225
+ mlrun_artifact_index+=1 # by how many artifacts we tried to log, not how many succeed.
226
+ if name is None or name == '':
227
+ name = f'{{mlrun_default_artifact_template}}{{mlrun_artifact_index}}'
228
+ if not path:
229
+ mlrun_logger.error(f'path required for logging an mlrun artifact - {{name}} : {{path}}')
181
230
  return
182
231
  if not isinstance(name, str) or not isinstance(path, str):
183
232
  mlrun_logger.error(f'name and path must be in string type for logging an mlrun artifact - {{name}} : {{path}}')
@@ -186,21 +235,38 @@ def mlrun_log_artifact(name, path):
186
235
  mlrun_logger.error(f'path for an mlrun artifact must start with /dbfs or dbfs:/ - {{name}} : {{path}}')
187
236
  return
188
237
  mlrun_artifacts_path = '{}'
189
- import json
190
- import os
191
- new_data = {{name:path}}
192
- if os.path.exists(mlrun_artifacts_path):
193
- with open(mlrun_artifacts_path, 'r+') as json_file:
194
- existing_data = json.load(json_file)
195
- existing_data.update(new_data)
196
- json_file.seek(0)
197
- json.dump(existing_data, json_file)
198
- else:
199
- parent_dir = os.path.dirname(mlrun_artifacts_path)
200
- if parent_dir != '/dbfs':
201
- os.makedirs(parent_dir, exist_ok=True)
202
- with open(mlrun_artifacts_path, 'w') as json_file:
203
- json.dump(new_data, json_file)
204
- mlrun_logger.info(f'successfully wrote artifact details to the artifact JSON file in DBFS - {{name}} : {{path}}')
238
+ try:
239
+ new_data = {{name:path}}
240
+ if os.path.exists(mlrun_artifacts_path):
241
+ with open(mlrun_artifacts_path, 'r+') as json_file:
242
+ existing_data = json.load(json_file)
243
+ existing_data.update(new_data)
244
+ json_file.seek(0)
245
+ json.dump(existing_data, json_file)
246
+ else:
247
+ parent_dir = os.path.dirname(mlrun_artifacts_path)
248
+ if parent_dir != '/dbfs':
249
+ os.makedirs(parent_dir, exist_ok=True)
250
+ with open(mlrun_artifacts_path, 'w') as json_file:
251
+ json.dump(new_data, json_file)
252
+ success_log = f'successfully wrote artifact details to the artifact JSON file in DBFS - {{name}} : {{path}}'
253
+ mlrun_logger.info(success_log)
254
+ except Exception as unknown_exception:
255
+ mlrun_logger.error(f'log mlrun artifact failed - {{name}} : {{path}}. error: {{unknown_exception}}')
205
256
  \n
206
257
  """
258
+
259
+ _return_artifacts_code = """\n
260
+ if result:
261
+ if isinstance(result, dict):
262
+ for key, path in result.items():
263
+ mlrun_log_artifact(name=key, path=path)
264
+ elif isinstance(result, (list, tuple, set)):
265
+ for artifact_path in result:
266
+ mlrun_log_artifact(path=artifact_path)
267
+ elif isinstance(result, str):
268
+ mlrun_log_artifact(path=result)
269
+ else:
270
+ mlrun_logger.warning(f'can not log artifacts with the result of handler function \
271
+ - result in unsupported type. {type(result)}')
272
+ """
@@ -200,7 +200,6 @@ def run_mlrun_databricks_job(
200
200
  is_finished=True,
201
201
  )
202
202
  run_output = workspace.jobs.get_run_output(get_task(run).run_id)
203
- context.log_result("databricks_runtime_task", run_output.as_dict())
204
203
  finally:
205
204
  workspace.dbfs.delete(script_path_on_dbfs)
206
205
  workspace.dbfs.delete(artifact_json_path)
@@ -550,6 +550,13 @@ class RemoteRuntime(KubeResource):
550
550
  """
551
551
  # todo: verify that the function name is normalized
552
552
 
553
+ old_http_session = getattr(self, "_http_session", None)
554
+ if old_http_session:
555
+ # ensure existing http session is terminated prior to (re)deploy to ensure that a connection to an old
556
+ # replica will not be reused
557
+ old_http_session.close()
558
+ self._http_session = None
559
+
553
560
  verbose = verbose or self.verbose
554
561
  if verbose:
555
562
  self.set_env("MLRUN_LOG_LEVEL", "DEBUG")
@@ -939,7 +946,7 @@ class RemoteRuntime(KubeResource):
939
946
  http_client_kwargs["json"] = body
940
947
  try:
941
948
  logger.info("invoking function", method=method, path=path)
942
- if not hasattr(self, "_http_session"):
949
+ if not getattr(self, "_http_session", None):
943
950
  self._http_session = requests.Session()
944
951
  resp = self._http_session.request(
945
952
  method, path, headers=headers, **http_client_kwargs
mlrun/runtimes/kubejob.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import time
16
+ import warnings
16
17
 
17
18
  import mlrun.common.schemas
18
19
  import mlrun.db
@@ -130,7 +131,12 @@ class KubejobRuntime(KubeResource):
130
131
  :param builder_env: Kaniko builder pod env vars dict (for config/credentials)
131
132
  e.g. builder_env={"GIT_TOKEN": token}
132
133
  """
133
-
134
+ if not overwrite:
135
+ # TODO: change overwrite default to True in 1.8.0
136
+ warnings.warn(
137
+ "The `overwrite` parameter default will change from 'False' to 'True in 1.8.0.",
138
+ mlrun.utils.OverwriteBuildParamsWarning,
139
+ )
134
140
  image = mlrun.utils.helpers.remove_image_protocol_prefix(image)
135
141
  self.spec.build.build_config(
136
142
  image=image,
mlrun/serving/server.py CHANGED
@@ -357,7 +357,7 @@ def v2_serving_init(context, namespace=None):
357
357
  if hasattr(context, "platform") and hasattr(
358
358
  context.platform, "set_termination_callback"
359
359
  ):
360
- context.logger.debug(
360
+ context.logger.info(
361
361
  "Setting termination callback to terminate graph on worker shutdown"
362
362
  )
363
363
 
@@ -368,6 +368,23 @@ def v2_serving_init(context, namespace=None):
368
368
 
369
369
  context.platform.set_termination_callback(termination_callback)
370
370
 
371
+ if hasattr(context, "platform") and hasattr(context.platform, "set_drain_callback"):
372
+ context.logger.info(
373
+ "Setting drain callback to terminate and restart the graph on a drain event (such as rebalancing)"
374
+ )
375
+
376
+ def drain_callback():
377
+ context.logger.info("Drain callback called")
378
+ server.wait_for_completion()
379
+ context.logger.info(
380
+ "Termination of async flow is completed. Rerunning async flow."
381
+ )
382
+ # Rerun the flow without reconstructing it
383
+ server.graph._run_async_flow()
384
+ context.logger.info("Async flow restarted")
385
+
386
+ context.platform.set_drain_callback(drain_callback)
387
+
371
388
 
372
389
  def v2_serving_handler(context, event, get_body=False):
373
390
  """hook for nuclio handler()"""
mlrun/serving/states.py CHANGED
@@ -921,6 +921,7 @@ class FlowStep(BaseStep):
921
921
 
922
922
  if self.engine != "sync":
923
923
  self._build_async_flow()
924
+ self._run_async_flow()
924
925
 
925
926
  def check_and_process_graph(self, allow_empty=False):
926
927
  """validate correct graph layout and initialize the .next links"""
@@ -1075,7 +1076,10 @@ class FlowStep(BaseStep):
1075
1076
  if next_state.async_object and error_step.async_object:
1076
1077
  error_step.async_object.to(next_state.async_object)
1077
1078
 
1078
- self._controller = source.run()
1079
+ self._async_flow = source
1080
+
1081
+ def _run_async_flow(self):
1082
+ self._controller = self._async_flow.run()
1079
1083
 
1080
1084
  def get_queue_links(self):
1081
1085
  """return dict of function and queue its listening on, for building stream triggers"""
mlrun/utils/helpers.py CHANGED
@@ -65,6 +65,10 @@ DEFAULT_TIME_PARTITIONS = ["year", "month", "day", "hour"]
65
65
  DEFAULT_TIME_PARTITIONING_GRANULARITY = "hour"
66
66
 
67
67
 
68
+ class OverwriteBuildParamsWarning(FutureWarning):
69
+ pass
70
+
71
+
68
72
  # TODO: remove in 1.7.0
69
73
  @deprecated(
70
74
  version="1.5.0",
@@ -183,7 +187,7 @@ def verify_field_regex(
183
187
  if mode == mlrun.common.schemas.RegexMatchModes.all:
184
188
  if raise_on_failure:
185
189
  raise mlrun.errors.MLRunInvalidArgumentError(
186
- f"Field '{field_name}' is malformed. {field_value} does not match required pattern: {pattern}"
190
+ f"Field '{field_name}' is malformed. '{field_value}' does not match required pattern: {pattern}"
187
191
  )
188
192
  return False
189
193
  elif mode == mlrun.common.schemas.RegexMatchModes.any:
@@ -193,7 +197,7 @@ def verify_field_regex(
193
197
  elif mode == mlrun.common.schemas.RegexMatchModes.any:
194
198
  if raise_on_failure:
195
199
  raise mlrun.errors.MLRunInvalidArgumentError(
196
- f"Field '{field_name}' is malformed. {field_value} does not match any of the"
200
+ f"Field '{field_name}' is malformed. '{field_value}' does not match any of the"
197
201
  f" required patterns: {patterns}"
198
202
  )
199
203
  return False
@@ -333,7 +337,7 @@ def remove_image_protocol_prefix(image: str) -> str:
333
337
  def verify_field_of_type(field_name: str, field_value, expected_type: type):
334
338
  if not isinstance(field_value, expected_type):
335
339
  raise mlrun.errors.MLRunInvalidArgumentError(
336
- f"Field '{field_name}' should be of type {expected_type.__name__} "
340
+ f"Field '{field_name}' should be of type '{expected_type.__name__}' "
337
341
  f"(got: {type(field_value).__name__} with value: {field_value})."
338
342
  )
339
343
 
@@ -357,14 +361,14 @@ def verify_dict_items_type(
357
361
  if dictionary:
358
362
  if type(dictionary) != dict:
359
363
  raise mlrun.errors.MLRunInvalidArgumentTypeError(
360
- f"{name} expected to be of type dict, got type : {type(dictionary)}"
364
+ f"'{name}' expected to be of type dict, got type: {type(dictionary)}"
361
365
  )
362
366
  try:
363
367
  verify_list_items_type(dictionary.keys(), expected_keys_types)
364
368
  verify_list_items_type(dictionary.values(), expected_values_types)
365
369
  except mlrun.errors.MLRunInvalidArgumentTypeError as exc:
366
370
  raise mlrun.errors.MLRunInvalidArgumentTypeError(
367
- f"{name} should be of type Dict[{get_pretty_types_names(expected_keys_types)},"
371
+ f"'{name}' should be of type Dict[{get_pretty_types_names(expected_keys_types)},"
368
372
  f"{get_pretty_types_names(expected_values_types)}]."
369
373
  ) from exc
370
374
 
@@ -407,7 +411,7 @@ def normalize_name(name: str, verbose: bool = True):
407
411
  if verbose:
408
412
  warnings.warn(
409
413
  "Names with underscore '_' are about to be deprecated, use dashes '-' instead. "
410
- f"Replacing {name} underscores with dashes.",
414
+ f"Replacing '{name}' underscores with dashes.",
411
415
  FutureWarning,
412
416
  )
413
417
  name = name.replace("_", "-")
@@ -669,7 +673,7 @@ def parse_artifact_uri(uri, default_project=""):
669
673
  iteration = int(iteration)
670
674
  except ValueError:
671
675
  raise ValueError(
672
- f"illegal store path {uri}, iteration must be integer value"
676
+ f"illegal store path '{uri}', iteration must be integer value"
673
677
  )
674
678
  return (
675
679
  group_dict["project"] or default_project,
@@ -1199,7 +1203,7 @@ def get_function(function, namespace):
1199
1203
  function_object = create_function(function)
1200
1204
  except (ImportError, ValueError) as exc:
1201
1205
  raise ImportError(
1202
- f"state/function init failed, handler {function} not found"
1206
+ f"state/function init failed, handler '{function}' not found"
1203
1207
  ) from exc
1204
1208
  return function_object
1205
1209
 
@@ -1383,7 +1387,7 @@ def get_in_artifact(artifact: dict, key, default=None, raise_on_missing=False):
1383
1387
 
1384
1388
  if raise_on_missing:
1385
1389
  raise mlrun.errors.MLRunInvalidArgumentError(
1386
- f"artifact {artifact} is missing metadata/spec/status"
1390
+ f"artifact '{artifact}' is missing metadata/spec/status"
1387
1391
  )
1388
1392
  return default
1389
1393
 
@@ -1419,7 +1423,7 @@ def is_running_in_jupyter_notebook() -> bool:
1419
1423
 
1420
1424
  def as_number(field_name, field_value):
1421
1425
  if isinstance(field_value, str) and not field_value.isnumeric():
1422
- raise ValueError(f"{field_name} must be numeric (str/int types)")
1426
+ raise ValueError(f"'{field_name}' must be numeric (str/int types)")
1423
1427
  return int(field_value)
1424
1428
 
1425
1429
 
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "420a54197d2ce4d81806ce83dbaeacf0e275bf2e",
3
- "version": "1.6.0-rc11"
2
+ "git_commit": "abca3cad961e987ec75d8ecfcdf0b6856dc9b7d1",
3
+ "version": "1.6.0-rc13"
4
4
  }