code-loader 1.0.143.dev3__tar.gz → 1.0.144__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of code-loader might be problematic. Click here for more details.

Files changed (36) hide show
  1. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/PKG-INFO +1 -1
  2. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/datasetclasses.py +1 -9
  3. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/inner_leap_binder/leapbinder_decorators.py +108 -471
  4. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/pyproject.toml +1 -1
  5. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/LICENSE +0 -0
  6. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/README.md +0 -0
  7. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/__init__.py +0 -0
  8. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/__init__.py +0 -0
  9. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/enums.py +0 -0
  10. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/exceptions.py +0 -0
  11. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/mapping.py +0 -0
  12. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/responsedataclasses.py +0 -0
  13. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/contract/visualizer_classes.py +0 -0
  14. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/default_losses.py +0 -0
  15. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/default_metrics.py +0 -0
  16. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/__init__.py +0 -0
  17. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/api.py +0 -0
  18. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/cli_config_utils.py +0 -0
  19. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/client.py +0 -0
  20. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/epoch.py +0 -0
  21. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/experiment.py +0 -0
  22. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/experiment_context.py +0 -0
  23. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/types.py +0 -0
  24. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/utils.py +0 -0
  25. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/experiment_api/workingspace_config_utils.py +0 -0
  26. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/inner_leap_binder/__init__.py +0 -0
  27. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/inner_leap_binder/leapbinder.py +0 -0
  28. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/leaploader.py +0 -0
  29. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/leaploaderbase.py +0 -0
  30. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/mixpanel_tracker.py +0 -0
  31. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/plot_functions/__init__.py +0 -0
  32. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/plot_functions/plot_functions.py +0 -0
  33. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/plot_functions/visualize.py +0 -0
  34. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/utils.py +0 -0
  35. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/visualizers/__init__.py +0 -0
  36. {code_loader-1.0.143.dev3 → code_loader-1.0.144}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 1.0.143.dev3
3
+ Version: 1.0.144
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -1,4 +1,3 @@
1
- import warnings
2
1
  from dataclasses import dataclass, field
3
2
  from typing import Any, Callable, List, Optional, Dict, Union, Type
4
3
  import re
@@ -57,14 +56,7 @@ class PreprocessResponse:
57
56
  for sample_id in self.sample_ids:
58
57
  assert isinstance(sample_id, str), f"Sample id should be of type str. Got: {type(sample_id)}"
59
58
  else:
60
- raise Exception("length is deprecated, please use sample_ids instead.")
61
-
62
- if self.state is None:
63
- warnings.warn(
64
- "PreprocessResponse.state is not set. For best practice, assign a unique `state` value to each PreprocessResponse instance."
65
- )
66
- else:
67
- assert isinstance(self.state, DataStateType), f"PreprocessResponse.state must be of type {DataStateType.__name__} but got {type(self.state)}"
59
+ raise Exception("length is deprecated.")
68
60
 
69
61
  def __hash__(self) -> int:
70
62
  return id(self)
@@ -1,12 +1,10 @@
1
1
  # mypy: ignore-errors
2
2
  import os
3
- import warnings
4
3
  import logging
5
4
  from collections import defaultdict
6
5
  from functools import lru_cache
7
6
  from pathlib import Path
8
7
  from typing import Optional, Union, Callable, List, Dict, Set, Any
9
- from typing import Optional, Union, Callable, List, Dict, get_args, get_origin
10
8
 
11
9
  import numpy as np
12
10
  import numpy.typing as npt
@@ -31,116 +29,8 @@ import functools
31
29
 
32
30
  _called_from_inside_tl_decorator = 0
33
31
  _called_from_inside_tl_integration_test_decorator = False
34
- _call_from_tl_platform= os.environ.get('IS_TENSORLEAP_PLATFORM') == 'true'
35
-
36
-
37
-
38
-
39
- def validate_args_structure(*args, types_order, func_name, expected_names, **kwargs):
40
- def _type_to_str(t):
41
- origin = get_origin(t)
42
- if origin is Union:
43
- return " | ".join(tt.__name__ for tt in get_args(t))
44
- elif hasattr(t, "__name__"):
45
- return t.__name__
46
- else:
47
- return str(t)
48
-
49
- def _format_types(types, names=None):
50
- return ", ".join(
51
- f"{(names[i] + ': ') if names else f'arg{i}: '}{_type_to_str(ty)}"
52
- for i, ty in enumerate(types)
53
- )
54
-
55
- if expected_names:
56
- normalized_args = []
57
- for i, name in enumerate(expected_names):
58
- if i < len(args):
59
- normalized_args.append(args[i])
60
- elif name in kwargs:
61
- normalized_args.append(kwargs[name])
62
- else:
63
- raise AssertionError(
64
- f"{func_name} validation failed: "
65
- f"Missing required argument '{name}'. "
66
- f"Expected arguments: {expected_names}."
67
- )
68
- else:
69
- normalized_args = list(args)
70
- if len(normalized_args) != len(types_order):
71
- expected = _format_types(types_order, expected_names)
72
- got_types = ", ".join(type(arg).__name__ for arg in normalized_args)
73
- raise AssertionError(
74
- f"{func_name} validation failed: "
75
- f"Expected exactly {len(types_order)} arguments ({expected}), "
76
- f"but got {len(normalized_args)} argument(s) of type(s): ({got_types}). "
77
- f"Correct usage example: {func_name}({expected})"
78
- )
79
-
80
- for i, (arg, expected_type) in enumerate(zip(normalized_args, types_order)):
81
- origin = get_origin(expected_type)
82
- if origin is Union:
83
- allowed_types = get_args(expected_type)
84
- else:
85
- allowed_types = (expected_type,)
86
-
87
- if not isinstance(arg, allowed_types):
88
- allowed_str = " | ".join(t.__name__ for t in allowed_types)
89
- raise AssertionError(
90
- f"{func_name} validation failed: "
91
- f"Argument '{expected_names[i] if expected_names else f'arg{i}'}' "
92
- f"expected type {allowed_str}, but got {type(arg).__name__}. "
93
- f"Correct usage example: {func_name}({_format_types(types_order, expected_names)})"
94
- )
95
-
96
-
97
- def validate_output_structure(result, func_name: str, expected_type_name="np.ndarray",gt_flag=False):
98
- if result is None or (isinstance(result, float) and np.isnan(result)):
99
- if gt_flag:
100
- raise AssertionError(
101
- f"{func_name} validation failed: "
102
- f"The function returned {result!r}. "
103
- f"If you are working with an unlabeled dataset and no ground truth is available, "
104
- f"use 'return np.array([], dtype=np.float32)' instead. "
105
- f"Otherwise, {func_name} expected a single {expected_type_name} object. "
106
- f"Make sure the function ends with 'return <{expected_type_name}>'."
107
- )
108
-
109
- raise AssertionError(
110
- f"{func_name} validation failed: "
111
- f"The function returned None. "
112
- f"Expected a single {expected_type_name} object. "
113
- f"Make sure the function ends with 'return <{expected_type_name}>'."
114
- )
115
- if isinstance(result, tuple):
116
- element_descriptions = [
117
- f"[{i}] type: {type(r).__name__}"
118
- for i, r in enumerate(result)
119
- ]
120
- element_summary = "\n ".join(element_descriptions)
121
-
122
- raise AssertionError(
123
- f"{func_name} validation failed: "
124
- f"The function returned multiple outputs ({len(result)} values), "
125
- f"but only a single {expected_type_name} is allowed.\n\n"
126
- f"Returned elements:\n"
127
- f" {element_summary}\n\n"
128
- f"Correct usage example:\n"
129
- f" def {func_name}(...):\n"
130
- f" return <{expected_type_name}>\n\n"
131
- f"If you intended to return multiple values, combine them into a single "
132
- f"{expected_type_name} (e.g., by concatenation or stacking)."
133
- )
134
-
135
- def batch_warning(result, func_name):
136
- if result.shape[0] == 1:
137
- warnings.warn(
138
- f"{func_name} warning: Tensorleap will add a batch dimension at axis 0 to the output of {func_name}, "
139
- f"although the detected size of axis 0 is already 1. "
140
- f"This may lead to an extra batch dimension (e.g., shape (1, 1, ...)). "
141
- f"Please ensure that the output of '{func_name}' is not already batched "
142
- f"to avoid computation errors."
143
- )
32
+
33
+
144
34
  def _add_mapping_connection(user_unique_name, connection_destinations, arg_names, name, node_mapping_type):
145
35
  connection_destinations = [connection_destination for connection_destination in connection_destinations
146
36
  if not isinstance(connection_destination, SamplePreprocessResponse)]
@@ -163,19 +53,7 @@ def tensorleap_integration_test():
163
53
  def decorating_function(integration_test_function: Callable):
164
54
  leap_binder.integration_test_func = integration_test_function
165
55
 
166
- def _validate_input_args(*args, **kwargs):
167
- sample_id,preprocess_response=args
168
- assert type(sample_id) == preprocess_response.sample_id_type, (
169
- f"tensorleap_integration_test validation failed: "
170
- f"sample_id type ({type(sample_id).__name__}) does not match the expected "
171
- f"type ({preprocess_response.sample_id_type}) from the PreprocessResponse."
172
- )
173
-
174
56
  def inner(*args, **kwargs):
175
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
176
- func_name='integration_test',expected_names=["idx", "preprocess"],**kwargs)
177
- _validate_input_args(*args, **kwargs)
178
-
179
57
  global _called_from_inside_tl_integration_test_decorator
180
58
  # Clear integration test events for new test
181
59
  try:
@@ -184,8 +62,6 @@ def tensorleap_integration_test():
184
62
  logger.debug(f"Failed to clear integration events: {e}")
185
63
  try:
186
64
  _called_from_inside_tl_integration_test_decorator = True
187
- if not _call_from_tl_platform:
188
- update_env_params_func("tensorleap_integration_test", "v")#put here because otherwise it will become v only if it finishes all the script
189
65
  ret = integration_test_function(*args, **kwargs)
190
66
 
191
67
  try:
@@ -198,7 +74,7 @@ def tensorleap_integration_test():
198
74
  line_number = first_tb.lineno
199
75
  if isinstance(e, TypeError) and 'is not subscriptable' in str(e):
200
76
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
201
- f"indexing is supported only on the model's predictions inside the integration test. Please remove this indexing operation usage from the integration test code.")
77
+ f'Please remove this indexing operation usage from the integration test code.')
202
78
  else:
203
79
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
204
80
  f'Integration test is only allowed to call Tensorleap decorators. '
@@ -210,8 +86,8 @@ def tensorleap_integration_test():
210
86
  _called_from_inside_tl_integration_test_decorator = False
211
87
 
212
88
  leap_binder.check()
213
- return inner
214
89
 
90
+ return inner
215
91
 
216
92
  return decorating_function
217
93
 
@@ -221,63 +97,34 @@ def _safe_get_item(key):
221
97
  except ValueError:
222
98
  raise Exception(f'Tensorleap currently supports models with no more then 10 inputs')
223
99
 
100
+
224
101
  def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]] = []):
225
- assert isinstance(prediction_types, list),(
226
- f"tensorleap_load_model validation failed: "
227
- f" prediction_types is an optional argument of type List[PredictionTypeHandler]] but got {type(prediction_types).__name__}."
228
- )
229
102
  for i, prediction_type in enumerate(prediction_types):
230
- assert isinstance(prediction_type, PredictionTypeHandler),(f"tensorleap_load_model validation failed: "
231
- f" prediction_types at position {i} must be of type PredictionTypeHandler but got {type(prediction_types[i]).__name__}.")
232
103
  leap_binder.add_prediction(prediction_type.name, prediction_type.labels, prediction_type.channel_dim, i)
233
104
 
234
- def _validate_result(result) -> None:
235
- valid_types=["onnxruntime","keras"]
236
- err_message=f"tensorleap_load_model validation failed:\nSupported models are Keras and onnxruntime only and non of them was returned."
237
- validate_output_structure(result, func_name="tensorleap_load_model", expected_type_name= [" | ".join(t for t in valid_types)][0])
238
- try:
239
- import keras
240
- except ImportError:
241
- keras = None
242
- try:
243
- import tensorflow as tf
244
- except ImportError:
245
- tf = None
246
- try:
247
- import onnxruntime
248
- except ImportError:
249
- onnxruntime = None
250
-
251
- if not keras and not onnxruntime:
252
- raise AssertionError(err_message)
253
-
254
- is_keras_model = (
255
- bool(keras and isinstance(result, getattr(keras, "Model", tuple())))
256
- or bool(tf and isinstance(result, getattr(tf.keras, "Model", tuple())))
257
- )
258
- is_onnx_model = bool(onnxruntime and isinstance(result, onnxruntime.InferenceSession))
259
-
260
- if not any([is_keras_model, is_onnx_model]):
261
- raise AssertionError( err_message)
262
-
263
-
264
-
265
105
  def decorating_function(load_model_func):
266
106
  class TempMapping:
267
107
  pass
268
108
 
269
109
  @lru_cache()
270
- def inner(*args, **kwargs):
271
- validate_args_structure(*args, types_order=[],
272
- func_name='tensorleap_load_model',expected_names=[],**kwargs)
110
+ def inner():
273
111
  class ModelPlaceholder:
274
112
  def __init__(self):
275
- self.model = load_model_func() #TODO- check why this fails on onnx model
276
- _validate_result(self.model)
113
+ self.model = load_model_func()
114
+ # Emit integration test event once per test
115
+ try:
116
+ emit_integration_event_once(AnalyticsEvent.LOAD_MODEL_INTEGRATION_TEST, {
117
+ 'prediction_types_count': len(prediction_types)
118
+ })
119
+ except Exception as e:
120
+ logger.debug(f"Failed to emit load_model integration test event: {e}")
277
121
 
278
122
  # keras interface
279
123
  def __call__(self, arg):
280
124
  ret = self.model(arg)
125
+ if isinstance(ret, list):
126
+ return [r.numpy() for r in ret]
127
+
281
128
  return ret.numpy()
282
129
 
283
130
  def _convert_onnx_inputs_to_correct_type(
@@ -337,10 +184,8 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
337
184
 
338
185
  def get_inputs(self):
339
186
  return self.model.get_inputs()
340
- model_placeholder=ModelPlaceholder()
341
- if not _call_from_tl_platform:
342
- update_env_params_func("tensorleap_load_model", "v")
343
- return model_placeholder
187
+
188
+ return ModelPlaceholder()
344
189
 
345
190
  def mapping_inner():
346
191
  class ModelOutputPlaceholder:
@@ -403,11 +248,12 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
403
248
 
404
249
  return ModelPlaceholder()
405
250
 
406
- def final_inner(*args, **kwargs):
251
+ def final_inner():
407
252
  if os.environ.get(mapping_runtime_mode_env_var_mame):
408
253
  return mapping_inner()
409
254
  else:
410
- return inner(*args, **kwargs)
255
+ return inner()
256
+
411
257
  return final_inner
412
258
 
413
259
  return decorating_function
@@ -418,168 +264,81 @@ def tensorleap_custom_metric(name: str,
418
264
  compute_insights: Optional[Union[bool, Dict[str, bool]]] = None,
419
265
  connects_to=None):
420
266
  name_to_unique_name = defaultdict(set)
267
+
421
268
  def decorating_function(
422
269
  user_function: Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs,
423
270
  ConfusionMatrixCallableInterfaceMultiArgs]):
424
-
425
- def _validate_decorators_signature():
426
- err_message = f"{user_function.__name__} validation failed.\n"
427
- if not isinstance(name, str):
428
- raise TypeError(err_message + f"`name` must be a string, got type {type(name).__name__}.")
429
- valid_directions = {MetricDirection.Upward, MetricDirection.Downward}
430
- if isinstance(direction, MetricDirection):
431
- if direction not in valid_directions:
432
- raise ValueError(
433
- err_message +
434
- f"Invalid MetricDirection: {direction}. Must be one of {valid_directions}, "
435
- f"got type {type(direction).__name__}."
436
- )
437
- elif isinstance(direction, dict):
438
- if not all(isinstance(k, str) for k in direction.keys()):
439
- invalid_keys = {k: type(k).__name__ for k in direction.keys() if not isinstance(k, str)}
440
- raise TypeError(
441
- err_message +
442
- f"All keys in `direction` must be strings, got invalid key types: {invalid_keys}."
443
- )
444
- for k, v in direction.items():
445
- if v not in valid_directions:
446
- raise ValueError(
447
- err_message +
448
- f"Invalid direction for key '{k}': {v}. Must be one of {valid_directions}, "
449
- f"got type {type(v).__name__}."
450
- )
451
- else:
452
- raise TypeError(
453
- err_message +
454
- f"`direction` must be a MetricDirection or a Dict[str, MetricDirection], "
455
- f"got type {type(direction).__name__}."
456
- )
457
- if compute_insights is not None:
458
- if not isinstance(compute_insights, (bool, dict)):
459
- raise TypeError(
460
- err_message +
461
- f"`compute_insights` must be a bool or a Dict[str, bool], "
462
- f"got type {type(compute_insights).__name__}."
463
- )
464
- if isinstance(compute_insights, dict):
465
- if not all(isinstance(k, str) for k in compute_insights.keys()):
466
- invalid_keys = {k: type(k).__name__ for k in compute_insights.keys() if not isinstance(k, str)}
467
- raise TypeError(
468
- err_message +
469
- f"All keys in `compute_insights` must be strings, got invalid key types: {invalid_keys}."
470
- )
471
- for k, v in compute_insights.items():
472
- if not isinstance(v, bool):
473
- raise TypeError(
474
- err_message +
475
- f"Invalid type for compute_insights['{k}']: expected bool, got type {type(v).__name__}."
476
- )
477
- if connects_to is not None:
478
- valid_types = (str, list, tuple, set)
479
- if not isinstance(connects_to, valid_types):
480
- raise TypeError(
481
- err_message +
482
- f"`connects_to` must be one of {valid_types}, got type {type(connects_to).__name__}."
483
- )
484
- if isinstance(connects_to, (list, tuple, set)):
485
- invalid_elems = [f"{type(e).__name__}" for e in connects_to if not isinstance(e, str)]
486
- if invalid_elems:
487
- raise TypeError(
488
- err_message +
489
- f"All elements in `connects_to` must be strings, "
490
- f"but found element types: {invalid_elems}."
491
- )
492
-
493
-
494
- _validate_decorators_signature()
495
-
496
271
  for metric_handler in leap_binder.setup_container.metrics:
497
272
  if metric_handler.metric_handler_data.name == name:
498
273
  raise Exception(f'Metric with name {name} already exists. '
499
274
  f'Please choose another')
500
275
 
501
276
  def _validate_input_args(*args, **kwargs) -> None:
502
- assert len(args)+len(kwargs) > 0, (
503
- f"{user_function.__name__}() validation failed: "
504
- f"Expected at least one positional|key-word argument of type np.ndarray, "
505
- f"but received none. "
506
- f"Correct usage example: tensorleap_custom_metric(input_array: np.ndarray, ...)"
507
- )
508
277
  for i, arg in enumerate(args):
509
278
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
510
- f'{user_function.__name__}() validation failed: '
279
+ f'tensorleap_custom_metric validation failed: '
511
280
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
512
281
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
513
282
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
514
- (f'{user_function.__name__}() validation failed: Argument #{i} '
283
+ (f'tensorleap_custom_metric validation failed: Argument #{i} '
515
284
  f'first dim should be as the batch size. Got {arg.shape[0]} '
516
285
  f'instead of {leap_binder.batch_size_to_validate}')
517
286
 
518
287
  for _arg_name, arg in kwargs.items():
519
288
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
520
- f'{user_function.__name__}() validation failed: '
289
+ f'tensorleap_custom_metric validation failed: '
521
290
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
522
291
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
523
292
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
524
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
293
+ (f'tensorleap_custom_metric validation failed: Argument {_arg_name} '
525
294
  f'first dim should be as the batch size. Got {arg.shape[0]} '
526
295
  f'instead of {leap_binder.batch_size_to_validate}')
527
296
 
528
297
  def _validate_result(result) -> None:
529
- validate_output_structure(result, func_name=user_function.__name__,
530
- expected_type_name="List[float | int | None | List[ConfusionMatrixElement] ] | NDArray[np.float32] or dictonary with one of these types as its values types")
531
- supported_types_message = (f'{user_function.__name__}() validation failed: '
532
- f'{user_function.__name__}() has returned unsupported type.\nSupported types are List[float|int|None], '
533
- f'List[List[ConfusionMatrixElement]], NDArray[np.float32] or dictonary with one of these types as its values types. ')
298
+ supported_types_message = (f'tensorleap_custom_metric validation failed: '
299
+ f'Metric has returned unsupported type. Supported types are List[float], '
300
+ f'List[List[ConfusionMatrixElement]], NDArray[np.float32]. ')
534
301
 
535
- def _validate_single_metric(single_metric_result,key=None):
302
+ def _validate_single_metric(single_metric_result):
536
303
  if isinstance(single_metric_result, list):
537
304
  if isinstance(single_metric_result[0], list):
538
- assert all(isinstance(cm, ConfusionMatrixElement) for cm in single_metric_result[0]), (
539
- f"{supported_types_message} "
540
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
541
- f"List[List[{', '.join(type(cm).__name__ for cm in single_metric_result[0])}]]."
542
- )
543
-
305
+ assert isinstance(single_metric_result[0][0], ConfusionMatrixElement), \
306
+ f'{supported_types_message}Got List[List[{type(single_metric_result[0][0])}]].'
544
307
  else:
545
- assert all(isinstance(v, (float,int,type(None),np.float32)) for v in single_metric_result), (
546
- f"{supported_types_message}\n"
547
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
548
- f"List[{', '.join(type(v).__name__ for v in single_metric_result)}]."
549
- )
308
+ assert isinstance(single_metric_result[0], (
309
+ float, int,
310
+ type(None))), f'{supported_types_message}Got List[{type(single_metric_result[0])}].'
550
311
  else:
551
312
  assert isinstance(single_metric_result,
552
- np.ndarray), f'{supported_types_message}\nGot {type(single_metric_result)}.'
553
- assert len(single_metric_result.shape) == 1, (f'{user_function.__name__}() validation failed: '
313
+ np.ndarray), f'{supported_types_message}Got {type(single_metric_result)}.'
314
+ assert len(single_metric_result.shape) == 1, (f'tensorleap_custom_metric validation failed: '
554
315
  f'The return shape should be 1D. Got {len(single_metric_result.shape)}D.')
555
316
 
556
317
  if leap_binder.batch_size_to_validate:
557
318
  assert len(single_metric_result) == leap_binder.batch_size_to_validate, \
558
- f'{user_function.__name__}() validation failed: The return len {f"of srt{key} value" if key is not None else ""} should be as the batch size.'
319
+ f'tensorleap_custom_metrix validation failed: The return len should be as the batch size.'
559
320
 
560
321
  if isinstance(result, dict):
561
322
  for key, value in result.items():
562
- _validate_single_metric(value,key)
563
-
564
323
  assert isinstance(key, str), \
565
- (f'{user_function.__name__}() validation failed: '
324
+ (f'tensorleap_custom_metric validation failed: '
566
325
  f'Keys in the return dict should be of type str. Got {type(key)}.')
567
326
  _validate_single_metric(value)
568
327
 
569
328
  if isinstance(direction, dict):
570
329
  for direction_key in direction:
571
330
  assert direction_key in result, \
572
- (f'{user_function.__name__}() validation failed: '
331
+ (f'tensorleap_custom_metric validation failed: '
573
332
  f'Keys in the direction mapping should be part of result keys. Got key {direction_key}.')
574
333
 
575
334
  if compute_insights is not None:
576
335
  assert isinstance(compute_insights, dict), \
577
- (f'{user_function.__name__}() validation failed: '
336
+ (f'tensorleap_custom_metric validation failed: '
578
337
  f'compute_insights should be dict if using the dict results. Got {type(compute_insights)}.')
579
338
 
580
339
  for ci_key in compute_insights:
581
340
  assert ci_key in result, \
582
- (f'{user_function.__name__}() validation failed: '
341
+ (f'tensorleap_custom_metric validation failed: '
583
342
  f'Keys in the compute_insights mapping should be part of result keys. Got key {ci_key}.')
584
343
 
585
344
  else:
@@ -587,7 +346,7 @@ def tensorleap_custom_metric(name: str,
587
346
 
588
347
  if compute_insights is not None:
589
348
  assert isinstance(compute_insights, bool), \
590
- (f'{user_function.__name__}() validation failed: '
349
+ (f'tensorleap_custom_metric validation failed: '
591
350
  f'compute_insights should be boolean. Got {type(compute_insights)}.')
592
351
 
593
352
  @functools.wraps(user_function)
@@ -619,8 +378,6 @@ def tensorleap_custom_metric(name: str,
619
378
  result = inner_without_validate(*args, **kwargs)
620
379
 
621
380
  _validate_result(result)
622
- if not _call_from_tl_platform:
623
- update_env_params_func("tensorleap_custom_metric","v")
624
381
  return result
625
382
 
626
383
  def mapping_inner(*args, **kwargs):
@@ -660,38 +417,28 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
660
417
  name_to_unique_name = defaultdict(set)
661
418
 
662
419
  def decorating_function(user_function: VisualizerCallableInterface):
663
- assert isinstance(visualizer_type,LeapDataType),(f"{user_function.__name__} validation failed: "
664
- f"visualizer_type should be of type {LeapDataType.__name__} but got {type(visualizer_type)}"
665
- )
666
-
667
420
  for viz_handler in leap_binder.setup_container.visualizers:
668
421
  if viz_handler.visualizer_handler_data.name == name:
669
422
  raise Exception(f'Visualizer with name {name} already exists. '
670
423
  f'Please choose another')
671
424
 
672
425
  def _validate_input_args(*args, **kwargs):
673
- assert len(args) + len(kwargs) > 0, (
674
- f"{user_function.__name__}() validation failed: "
675
- f"Expected at least one positional|key-word argument of type np.ndarray, "
676
- f"but received none. "
677
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
678
- )
679
426
  for i, arg in enumerate(args):
680
427
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
681
- f'{user_function.__name__}() validation failed: '
428
+ f'tensorleap_custom_visualizer validation failed: '
682
429
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
683
430
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
684
431
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
685
- (f'{user_function.__name__}() validation failed: '
432
+ (f'tensorleap_custom_visualizer validation failed: '
686
433
  f'Argument #{i} should be without batch dimension. ')
687
434
 
688
435
  for _arg_name, arg in kwargs.items():
689
436
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
690
- f'{user_function.__name__}() validation failed: '
437
+ f'tensorleap_custom_visualizer validation failed: '
691
438
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
692
439
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
693
440
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
694
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
441
+ (f'tensorleap_custom_visualizer validation failed: Argument {_arg_name} '
695
442
  f'should be without batch dimension. ')
696
443
 
697
444
  def _validate_result(result):
@@ -705,11 +452,8 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
705
452
  LeapDataType.ImageWithBBox: LeapImageWithBBox,
706
453
  LeapDataType.ImageWithHeatmap: LeapImageWithHeatmap
707
454
  }
708
- validate_output_structure(result, func_name=user_function.__name__,
709
- expected_type_name=result_type_map[visualizer_type])
710
-
711
455
  assert isinstance(result, result_type_map[visualizer_type]), \
712
- (f'{user_function.__name__}() validation failed: '
456
+ (f'tensorleap_custom_visualizer validation failed: '
713
457
  f'The return type should be {result_type_map[visualizer_type]}. Got {type(result)}.')
714
458
 
715
459
  @functools.wraps(user_function)
@@ -741,8 +485,6 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
741
485
  result = inner_without_validate(*args, **kwargs)
742
486
 
743
487
  _validate_result(result)
744
- if not _call_from_tl_platform:
745
- update_env_params_func("tensorleap_custom_visualizer","v")
746
488
  return result
747
489
 
748
490
  def mapping_inner(*args, **kwargs):
@@ -784,26 +526,30 @@ def tensorleap_metadata(
784
526
  f'Please choose another')
785
527
 
786
528
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
529
+ assert isinstance(sample_id, (int, str)), \
530
+ (f'tensorleap_metadata validation failed: '
531
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
532
+ assert isinstance(preprocess_response, PreprocessResponse), \
533
+ (f'tensorleap_metadata validation failed: '
534
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
787
535
  assert type(sample_id) == preprocess_response.sample_id_type, \
788
- (f'{user_function.__name__}() validation failed: '
536
+ (f'tensorleap_metadata validation failed: '
789
537
  f'Argument sample_id should be as the same type as defined in the preprocess response '
790
538
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
791
539
 
792
540
  def _validate_result(result):
793
541
  supported_result_types = (type(None), int, str, bool, float, dict, np.floating,
794
542
  np.bool_, np.unsignedinteger, np.signedinteger, np.integer)
795
- validate_output_structure(result, func_name=user_function.__name__,
796
- expected_type_name=supported_result_types)
797
543
  assert isinstance(result, supported_result_types), \
798
- (f'{user_function.__name__}() validation failed: '
544
+ (f'tensorleap_metadata validation failed: '
799
545
  f'Unsupported return type. Got {type(result)}. should be any of {str(supported_result_types)}')
800
546
  if isinstance(result, dict):
801
547
  for key, value in result.items():
802
548
  assert isinstance(key, str), \
803
- (f'{user_function.__name__}() validation failed: '
549
+ (f'tensorleap_metadata validation failed: '
804
550
  f'Keys in the return dict should be of type str. Got {type(key)}.')
805
551
  assert isinstance(value, supported_result_types), \
806
- (f'{user_function.__name__}() validation failed: '
552
+ (f'tensorleap_metadata validation failed: '
807
553
  f'Values in the return dict should be of type {str(supported_result_types)}. Got {type(value)}.')
808
554
 
809
555
  def inner_without_validate(sample_id, preprocess_response):
@@ -820,19 +566,15 @@ def tensorleap_metadata(
820
566
 
821
567
  leap_binder.set_metadata(inner_without_validate, name, metadata_type)
822
568
 
823
- def inner(*args,**kwargs):
569
+ def inner(sample_id, preprocess_response):
824
570
  if os.environ.get(mapping_runtime_mode_env_var_mame):
825
571
  return None
826
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
827
- func_name=user_function.__name__, expected_names=["idx", "preprocess"],**kwargs)
828
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
572
+
829
573
  _validate_input_args(sample_id, preprocess_response)
830
574
 
831
575
  result = inner_without_validate(sample_id, preprocess_response)
832
576
 
833
577
  _validate_result(result)
834
- if not _call_from_tl_platform:
835
- update_env_params_func("tensorleap_metadata","v")
836
578
  return result
837
579
 
838
580
  return inner
@@ -894,24 +636,20 @@ def tensorleap_preprocess():
894
636
  leap_binder.set_preprocess(user_function)
895
637
 
896
638
  def _validate_input_args(*args, **kwargs):
897
- assert len(args) + len(kwargs) > 0, \
898
- (f'{user_function.__name__}() validation failed: '
639
+ assert len(args) == 0 and len(kwargs) == 0, \
640
+ (f'tensorleap_preprocess validation failed: '
899
641
  f'The function should not take any arguments. Got {args} and {kwargs}.')
900
642
 
901
643
  def _validate_result(result):
902
- assert isinstance(result, list), (
903
- f"{user_function.__name__}() validation failed: expected return type list[{PreprocessResponse.__name__}]"
904
- f"(e.g., [PreprocessResponse1, PreprocessResponse2, ...]), but returned type is {type(result).__name__}."
905
- if not isinstance(result, tuple)
906
- else f"{user_function.__name__}() validation failed: expected to return a single list[{PreprocessResponse.__name__}] object, "
907
- f"but returned {len(result)} objects instead."
908
- )
644
+ assert isinstance(result, list), \
645
+ (f'tensorleap_preprocess validation failed: '
646
+ f'The return type should be a list. Got {type(result)}.')
909
647
  for i, response in enumerate(result):
910
648
  assert isinstance(response, PreprocessResponse), \
911
- (f'{user_function.__name__}() validation failed: '
649
+ (f'tensorleap_preprocess validation failed: '
912
650
  f'Element #{i} in the return list should be a PreprocessResponse. Got {type(response)}.')
913
651
  assert len(set(result)) == len(result), \
914
- (f'{user_function.__name__}() validation failed: '
652
+ (f'tensorleap_preprocess validation failed: '
915
653
  f'The return list should not contain duplicate PreprocessResponse objects.')
916
654
 
917
655
  def inner(*args, **kwargs):
@@ -919,6 +657,7 @@ def tensorleap_preprocess():
919
657
  return [None, None, None, None]
920
658
 
921
659
  _validate_input_args(*args, **kwargs)
660
+
922
661
  result = user_function()
923
662
  _validate_result(result)
924
663
 
@@ -929,8 +668,7 @@ def tensorleap_preprocess():
929
668
  })
930
669
  except Exception as e:
931
670
  logger.debug(f"Failed to emit preprocess integration test event: {e}")
932
- if not _call_from_tl_platform:
933
- update_env_params_func("tensorleap_preprocess", "v")
671
+
934
672
  return result
935
673
 
936
674
  return inner
@@ -1129,23 +867,29 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1129
867
  raise Exception(f"Channel dim for input {name} is expected to be either -1 or positive")
1130
868
 
1131
869
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
870
+ assert isinstance(sample_id, (int, str)), \
871
+ (f'tensorleap_input_encoder validation failed: '
872
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
873
+ assert isinstance(preprocess_response, PreprocessResponse), \
874
+ (f'tensorleap_input_encoder validation failed: '
875
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1132
876
  assert type(sample_id) == preprocess_response.sample_id_type, \
1133
- (f'{user_function.__name__}() validation failed: '
877
+ (f'tensorleap_input_encoder validation failed: '
1134
878
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1135
879
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1136
880
 
1137
881
  def _validate_result(result):
1138
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray")
1139
882
  assert isinstance(result, np.ndarray), \
1140
- (f'{user_function.__name__}() validation failed: '
883
+ (f'tensorleap_input_encoder validation failed: '
1141
884
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1142
885
  assert result.dtype == np.float32, \
1143
- (f'{user_function.__name__}() validation failed: '
886
+ (f'tensorleap_input_encoder validation failed: '
1144
887
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1145
- assert channel_dim - 1 <= len(result.shape), (f'{user_function.__name__}() validation failed: '
888
+ assert channel_dim - 1 <= len(result.shape), (f'tensorleap_input_encoder validation failed: '
1146
889
  f'The channel_dim ({channel_dim}) should be <= to the rank of the resulting input rank ({len(result.shape)}).')
1147
890
 
1148
891
  def inner_without_validate(sample_id, preprocess_response):
892
+
1149
893
  global _called_from_inside_tl_decorator
1150
894
  _called_from_inside_tl_decorator += 1
1151
895
 
@@ -1159,10 +903,7 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1159
903
  leap_binder.set_input(inner_without_validate, name, channel_dim=channel_dim)
1160
904
 
1161
905
 
1162
- def inner(*args, **kwargs):
1163
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1164
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1165
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
906
+ def inner(sample_id, preprocess_response):
1166
907
  _validate_input_args(sample_id, preprocess_response)
1167
908
 
1168
909
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1170,7 +911,6 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1170
911
  _validate_result(result)
1171
912
 
1172
913
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1173
- batch_warning(result,user_function.__name__)
1174
914
  result = np.expand_dims(result, axis=0)
1175
915
  # Emit integration test event once per test
1176
916
  try:
@@ -1181,18 +921,17 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1181
921
  })
1182
922
  except Exception as e:
1183
923
  logger.debug(f"Failed to emit input_encoder integration test event: {e}")
1184
- if not _call_from_tl_platform:
1185
- update_env_params_func("tensorleap_input_encoder", "v")
1186
924
 
1187
925
  return result
1188
926
 
1189
927
 
928
+
1190
929
  node_mapping_type = NodeMappingType.Input
1191
930
  if model_input_index is not None:
1192
931
  node_mapping_type = NodeMappingType(f'Input{str(model_input_index)}')
1193
932
  inner.node_mapping = NodeMapping(name, node_mapping_type)
1194
933
 
1195
- def mapping_inner(*args, **kwargs):
934
+ def mapping_inner(sample_id, preprocess_response):
1196
935
  class TempMapping:
1197
936
  pass
1198
937
 
@@ -1204,11 +943,11 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1204
943
 
1205
944
  mapping_inner.node_mapping = NodeMapping(name, node_mapping_type)
1206
945
 
1207
- def final_inner(*args, **kwargs):
946
+ def final_inner(sample_id, preprocess_response):
1208
947
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1209
- return mapping_inner(*args, **kwargs)
948
+ return mapping_inner(sample_id, preprocess_response)
1210
949
  else:
1211
- return inner(*args, **kwargs)
950
+ return inner(sample_id, preprocess_response)
1212
951
 
1213
952
  final_inner.node_mapping = NodeMapping(name, node_mapping_type)
1214
953
 
@@ -1225,18 +964,23 @@ def tensorleap_gt_encoder(name: str):
1225
964
  f'Please choose another')
1226
965
 
1227
966
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
967
+ assert isinstance(sample_id, (int, str)), \
968
+ (f'tensorleap_gt_encoder validation failed: '
969
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
970
+ assert isinstance(preprocess_response, PreprocessResponse), \
971
+ (f'tensorleap_gt_encoder validation failed: '
972
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1228
973
  assert type(sample_id) == preprocess_response.sample_id_type, \
1229
- (f'{user_function.__name__}() validation failed: '
974
+ (f'tensorleap_gt_encoder validation failed: '
1230
975
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1231
976
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1232
977
 
1233
978
  def _validate_result(result):
1234
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray",gt_flag=True)
1235
979
  assert isinstance(result, np.ndarray), \
1236
- (f'{user_function.__name__}() validation failed: '
980
+ (f'tensorleap_gt_encoder validation failed: '
1237
981
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1238
982
  assert result.dtype == np.float32, \
1239
- (f'{user_function.__name__}() validation failed: '
983
+ (f'tensorleap_gt_encoder validation failed: '
1240
984
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1241
985
 
1242
986
  def inner_without_validate(sample_id, preprocess_response):
@@ -1253,10 +997,7 @@ def tensorleap_gt_encoder(name: str):
1253
997
  leap_binder.set_ground_truth(inner_without_validate, name)
1254
998
 
1255
999
 
1256
- def inner(*args, **kwargs):
1257
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1258
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1259
- sample_id, preprocess_response = args
1000
+ def inner(sample_id, preprocess_response):
1260
1001
  _validate_input_args(sample_id, preprocess_response)
1261
1002
 
1262
1003
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1264,7 +1005,6 @@ def tensorleap_gt_encoder(name: str):
1264
1005
  _validate_result(result)
1265
1006
 
1266
1007
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1267
- batch_warning(result, user_function.__name__)
1268
1008
  result = np.expand_dims(result, axis=0)
1269
1009
  # Emit integration test event once per test
1270
1010
  try:
@@ -1273,13 +1013,12 @@ def tensorleap_gt_encoder(name: str):
1273
1013
  })
1274
1014
  except Exception as e:
1275
1015
  logger.debug(f"Failed to emit gt_encoder integration test event: {e}")
1276
- if not _call_from_tl_platform:
1277
- update_env_params_func("tensorleap_gt_encoder", "v")
1016
+
1278
1017
  return result
1279
1018
 
1280
1019
  inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1281
1020
 
1282
- def mapping_inner(*args, **kwargs):
1021
+ def mapping_inner(sample_id, preprocess_response):
1283
1022
  class TempMapping:
1284
1023
  pass
1285
1024
 
@@ -1290,11 +1029,11 @@ def tensorleap_gt_encoder(name: str):
1290
1029
 
1291
1030
  mapping_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1292
1031
 
1293
- def final_inner(*args, **kwargs):
1032
+ def final_inner(sample_id, preprocess_response):
1294
1033
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1295
- return mapping_inner(*args, **kwargs)
1034
+ return mapping_inner(sample_id, preprocess_response)
1296
1035
  else:
1297
- return inner(*args, **kwargs)
1036
+ return inner(sample_id, preprocess_response)
1298
1037
 
1299
1038
  final_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1300
1039
 
@@ -1315,37 +1054,28 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1315
1054
  valid_types = (np.ndarray, SamplePreprocessResponse)
1316
1055
 
1317
1056
  def _validate_input_args(*args, **kwargs):
1318
- assert len(args) + len(kwargs) > 0, (
1319
- f"{user_function.__name__}() validation failed: "
1320
- f"Expected at least one positional|key-word argument of the allowed types (np.ndarray|SamplePreprocessResponse|list(np.ndarray|SamplePreprocessResponse)). "
1321
- f"but received none. "
1322
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
1323
- )
1324
1057
  for i, arg in enumerate(args):
1325
1058
  if isinstance(arg, list):
1326
1059
  for y, elem in enumerate(arg):
1327
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1060
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1328
1061
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1329
1062
  else:
1330
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1063
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1331
1064
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
1332
1065
  for _arg_name, arg in kwargs.items():
1333
1066
  if isinstance(arg, list):
1334
1067
  for y, elem in enumerate(arg):
1335
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1068
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1336
1069
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1337
1070
  else:
1338
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1071
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1339
1072
  f'Argument #{_arg_name} should be a numpy array. Got {type(arg)}.')
1340
1073
 
1341
1074
  def _validate_result(result):
1342
- validate_output_structure(result, func_name=user_function.__name__,
1343
- expected_type_name="np.ndarray")
1344
1075
  assert isinstance(result, np.ndarray), \
1345
- (f'{user_function.__name__} validation failed: '
1076
+ (f'tensorleap_custom_loss validation failed: '
1346
1077
  f'The return type should be a numpy array. Got {type(result)}.')
1347
- assert result.ndim<2 ,(f'{user_function.__name__} validation failed: '
1348
- f'The return type should be a 1Dim numpy array but got {result.ndim}Dim.')
1078
+
1349
1079
 
1350
1080
  @functools.wraps(user_function)
1351
1081
  def inner_without_validate(*args, **kwargs):
@@ -1376,9 +1106,6 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1376
1106
  result = inner_without_validate(*args, **kwargs)
1377
1107
 
1378
1108
  _validate_result(result)
1379
- if not _call_from_tl_platform:
1380
- update_env_params_func("tensorleap_custom_loss", "v")
1381
-
1382
1109
  return result
1383
1110
 
1384
1111
  def mapping_inner(*args, **kwargs):
@@ -1435,93 +1162,3 @@ def tensorleap_custom_layer(name: str):
1435
1162
  return custom_layer
1436
1163
 
1437
1164
  return decorating_function
1438
-
1439
-
1440
- def tensorleap_status_table():
1441
- '''
1442
- Usage example:
1443
- ###################
1444
- leap_integration.py
1445
- ###################
1446
- from code_loader.inner_leap_binder.leapbinder_decorators import tensorleap_status_table
1447
- ...
1448
- ...
1449
- ...
1450
- if __name__ == '__main__':
1451
- tensorleap_status_table()
1452
- ...
1453
- '''
1454
- import atexit
1455
- import sys
1456
- import traceback
1457
- CHECK = "✅"
1458
- CROSS = "❌"
1459
-
1460
- table = [
1461
- {"name": "tensorleap_preprocess", "Added to integration": CROSS},
1462
- {"name": "tensorleap_integration_test", "Added to integration": CROSS},
1463
- {"name": "tensorleap_input_encoder", "Added to integration": CROSS},
1464
- {"name": "tensorleap_gt_encoder", "Added to integration": CROSS},
1465
- {"name": "tensorleap_load_model", "Added to integration": CROSS},
1466
- {"name": "tensorleap_custom_loss", "Added to integration": CROSS},
1467
- {"name": "tensorleap_custom_metric (optional)", "Added to integration": CROSS},
1468
- {"name": "tensorleap_metadata (optional)", "Added to integration": CROSS},
1469
- {"name": "tensorleap_custom_visualizer (optional)", "Added to integration": CROSS},
1470
-
1471
- ]
1472
-
1473
- _finalizer_called = {"done": False}
1474
-
1475
- def _remove_suffix(s: str, suffix: str) -> str:
1476
- #This is needed because str.remove_suffix was presented in python3.9+
1477
- if suffix and s.endswith(suffix):
1478
- return s[:-len(suffix)]
1479
- return s
1480
-
1481
- def _print_table():
1482
- ready_mess = "\nAll parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system."
1483
- not_ready_mess = "\nSome mandatory components have not yet been added to the Integration test. Recommended next interface to add is: "
1484
- mandatory_ready_mess = "\nAll mandatory parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system or continue to the next optional reccomeded interface,adding: "
1485
-
1486
- name_width = max(len(row["name"]) for row in table)
1487
- status_width = max(len(row["Added to integration"]) for row in table)
1488
- header = f"{'Decorator Name'.ljust(name_width)} | {'Added to integration'.ljust(status_width)}"
1489
- sep = "-" * len(header)
1490
- print("\n" + header)
1491
- print(sep)
1492
- ready=True
1493
- for row in table:
1494
- print(f"{row['name'].ljust(name_width)} | {row['Added to integration'].ljust(status_width)}")
1495
- if row['Added to integration']==CROSS and ready:
1496
- ready=False
1497
- next_step=row['name']
1498
-
1499
-
1500
- print(ready_mess) if ready else print(mandatory_ready_mess+next_step) if "optional" in next_step else print(not_ready_mess+next_step)
1501
- def update_env_params(name: str, status: str = "✓"):
1502
- for row in table:
1503
- if _remove_suffix(row["name"]," (optional)") == name:
1504
- row["Added to integration"] = CHECK if status=="v" else CROSS
1505
- break
1506
- def run_on_exit():
1507
- if _finalizer_called["done"]:
1508
- return
1509
- _finalizer_called["done"] = True
1510
- _print_table()
1511
- def handle_exception(exc_type, exc_value, exc_traceback):
1512
- traceback.print_exception(exc_type, exc_value, exc_traceback)
1513
- run_on_exit()
1514
- atexit.register(run_on_exit)
1515
- sys.excepthook = handle_exception
1516
- return update_env_params
1517
-
1518
-
1519
- if not _call_from_tl_platform:
1520
- update_env_params_func = tensorleap_status_table()
1521
-
1522
-
1523
-
1524
-
1525
-
1526
-
1527
-
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "1.0.143.dev3"
3
+ version = "1.0.144"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"