code-loader 1.0.139.dev4__tar.gz → 1.0.139.dev6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of code-loader might be problematic. Click here for more details.

Files changed (36) hide show
  1. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/PKG-INFO +3 -4
  2. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/datasetclasses.py +9 -1
  3. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/inner_leap_binder/leapbinder_decorators.py +465 -117
  4. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/mixpanel_tracker.py +0 -41
  5. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/pyproject.toml +1 -1
  6. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/LICENSE +0 -0
  7. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/README.md +0 -0
  8. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/__init__.py +0 -0
  9. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/__init__.py +0 -0
  10. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/enums.py +0 -0
  11. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/exceptions.py +0 -0
  12. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/mapping.py +0 -0
  13. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/responsedataclasses.py +0 -0
  14. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/contract/visualizer_classes.py +0 -0
  15. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/default_losses.py +0 -0
  16. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/default_metrics.py +0 -0
  17. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/__init__.py +0 -0
  18. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/api.py +0 -0
  19. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/cli_config_utils.py +0 -0
  20. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/client.py +0 -0
  21. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/epoch.py +0 -0
  22. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/experiment.py +0 -0
  23. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/experiment_context.py +0 -0
  24. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/types.py +0 -0
  25. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/utils.py +0 -0
  26. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/experiment_api/workingspace_config_utils.py +0 -0
  27. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/inner_leap_binder/__init__.py +0 -0
  28. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/inner_leap_binder/leapbinder.py +0 -0
  29. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/leaploader.py +0 -0
  30. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/leaploaderbase.py +0 -0
  31. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/plot_functions/__init__.py +0 -0
  32. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/plot_functions/plot_functions.py +0 -0
  33. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/plot_functions/visualize.py +0 -0
  34. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/utils.py +0 -0
  35. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/visualizers/__init__.py +0 -0
  36. {code_loader-1.0.139.dev4 → code_loader-1.0.139.dev6}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,9 +1,9 @@
1
- Metadata-Version: 2.4
1
+ Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 1.0.139.dev4
3
+ Version: 1.0.139.dev6
4
4
  Summary:
5
+ Home-page: https://github.com/tensorleap/code-loader
5
6
  License: MIT
6
- License-File: LICENSE
7
7
  Author: dorhar
8
8
  Author-email: doron.harnoy@tensorleap.ai
9
9
  Requires-Python: >=3.8,<3.13
@@ -20,7 +20,6 @@ Requires-Dist: numpy (>=2.3.2,<3.0.0) ; python_version >= "3.11" and python_vers
20
20
  Requires-Dist: psutil (>=5.9.5,<6.0.0)
21
21
  Requires-Dist: pyyaml (>=6.0.2,<7.0.0)
22
22
  Requires-Dist: requests (>=2.32.3,<3.0.0)
23
- Project-URL: Homepage, https://github.com/tensorleap/code-loader
24
23
  Project-URL: Repository, https://github.com/tensorleap/code-loader
25
24
  Description-Content-Type: text/markdown
26
25
 
@@ -1,3 +1,4 @@
1
+ import warnings
1
2
  from dataclasses import dataclass, field
2
3
  from typing import Any, Callable, List, Optional, Dict, Union, Type
3
4
  import re
@@ -56,7 +57,14 @@ class PreprocessResponse:
56
57
  for sample_id in self.sample_ids:
57
58
  assert isinstance(sample_id, str), f"Sample id should be of type str. Got: {type(sample_id)}"
58
59
  else:
59
- raise Exception("length is deprecated.")
60
+ raise Exception("length is deprecated, please use sample_ids instead.")
61
+
62
+ if self.state is None:
63
+ warnings.warn(
64
+ "PreprocessResponse.state is not set. For best practice, assign a unique `state` value to each PreprocessResponse instance."
65
+ )
66
+ else:
67
+ assert isinstance(self.state, DataStateType), f"PreprocessResponse.state must be of type {DataStateType.__name__} but got {type(self.state)}"
60
68
 
61
69
  def __hash__(self) -> int:
62
70
  return id(self)
@@ -1,9 +1,10 @@
1
1
  # mypy: ignore-errors
2
2
  import os
3
+ import warnings
3
4
  from collections import defaultdict
4
5
  from functools import lru_cache
5
6
  from pathlib import Path
6
- from typing import Optional, Union, Callable, List, Dict, Set, Any
7
+ from typing import Optional, Union, Callable, List, Dict, get_args, get_origin
7
8
 
8
9
  import numpy as np
9
10
  import numpy.typing as npt
@@ -25,9 +26,116 @@ import functools
25
26
 
26
27
  _called_from_inside_tl_decorator = 0
27
28
  _called_from_inside_tl_integration_test_decorator = False
28
- _integration_events_emitted: Set[str] = set()
29
-
30
-
29
+ _update_env_status = None
30
+
31
+
32
+
33
+
34
+ def validate_args_structure(*args, types_order, func_name, expected_names, **kwargs):
35
+ def _type_to_str(t):
36
+ origin = get_origin(t)
37
+ if origin is Union:
38
+ return " | ".join(tt.__name__ for tt in get_args(t))
39
+ elif hasattr(t, "__name__"):
40
+ return t.__name__
41
+ else:
42
+ return str(t)
43
+
44
+ def _format_types(types, names=None):
45
+ return ", ".join(
46
+ f"{(names[i] + ': ') if names else f'arg{i}: '}{_type_to_str(ty)}"
47
+ for i, ty in enumerate(types)
48
+ )
49
+
50
+ if expected_names:
51
+ normalized_args = []
52
+ for i, name in enumerate(expected_names):
53
+ if i < len(args):
54
+ normalized_args.append(args[i])
55
+ elif name in kwargs:
56
+ normalized_args.append(kwargs[name])
57
+ else:
58
+ raise AssertionError(
59
+ f"{func_name} validation failed: "
60
+ f"Missing required argument '{name}'. "
61
+ f"Expected arguments: {expected_names}."
62
+ )
63
+ else:
64
+ normalized_args = list(args)
65
+ if len(normalized_args) != len(types_order):
66
+ expected = _format_types(types_order, expected_names)
67
+ got_types = ", ".join(type(arg).__name__ for arg in normalized_args)
68
+ raise AssertionError(
69
+ f"{func_name} validation failed: "
70
+ f"Expected exactly {len(types_order)} arguments ({expected}), "
71
+ f"but got {len(normalized_args)} argument(s) of type(s): ({got_types}). "
72
+ f"Correct usage example: {func_name}({expected})"
73
+ )
74
+
75
+ for i, (arg, expected_type) in enumerate(zip(normalized_args, types_order)):
76
+ origin = get_origin(expected_type)
77
+ if origin is Union:
78
+ allowed_types = get_args(expected_type)
79
+ else:
80
+ allowed_types = (expected_type,)
81
+
82
+ if not isinstance(arg, allowed_types):
83
+ allowed_str = " | ".join(t.__name__ for t in allowed_types)
84
+ raise AssertionError(
85
+ f"{func_name} validation failed: "
86
+ f"Argument '{expected_names[i] if expected_names else f'arg{i}'}' "
87
+ f"expected type {allowed_str}, but got {type(arg).__name__}. "
88
+ f"Correct usage example: {func_name}({_format_types(types_order, expected_names)})"
89
+ )
90
+
91
+
92
+ def validate_output_structure(result, func_name: str, expected_type_name="np.ndarray",gt_flag=False):
93
+ if result is None or (isinstance(result, float) and np.isnan(result)):
94
+ if gt_flag:
95
+ raise AssertionError(
96
+ f"{func_name} validation failed: "
97
+ f"The function returned {result!r}. "
98
+ f"If you are working with an unlabeled dataset and no ground truth is available, "
99
+ f"use 'return np.array([], dtype=np.float32)' instead. "
100
+ f"Otherwise, {func_name} expected a single {expected_type_name} object. "
101
+ f"Make sure the function ends with 'return <{expected_type_name}>'."
102
+ )
103
+
104
+ raise AssertionError(
105
+ f"{func_name} validation failed: "
106
+ f"The function returned None. "
107
+ f"Expected a single {expected_type_name} object. "
108
+ f"Make sure the function ends with 'return <{expected_type_name}>'."
109
+ )
110
+ if isinstance(result, tuple):
111
+ element_descriptions = [
112
+ f"[{i}] type: {type(r).__name__}"
113
+ for i, r in enumerate(result)
114
+ ]
115
+ element_summary = "\n ".join(element_descriptions)
116
+
117
+ raise AssertionError(
118
+ f"{func_name} validation failed: "
119
+ f"The function returned multiple outputs ({len(result)} values), "
120
+ f"but only a single {expected_type_name} is allowed.\n\n"
121
+ f"Returned elements:\n"
122
+ f" {element_summary}\n\n"
123
+ f"Correct usage example:\n"
124
+ f" def {func_name}(...):\n"
125
+ f" return <{expected_type_name}>\n\n"
126
+ f"If you intended to return multiple values, combine them into a single "
127
+ f"{expected_type_name} (e.g., by concatenation or stacking)."
128
+ )
129
+
130
+ def batch_warning(result, func_name):
131
+ if result.shape[0] == 1:
132
+ warnings.warn(
133
+ f"{func_name} warning: Tensorleap will add a batch dimension at axis 0 to the output of {func_name}, "
134
+ f"although the detected size of axis 0 is already 1. "
135
+ f"This may lead to an extra batch dimension (e.g., shape (1, 1, ...)). "
136
+ f"Please ensure that the output of '{func_name}' is not already batched "
137
+ f"to avoid computation errors."
138
+ )
31
139
  def _add_mapping_connection(user_unique_name, connection_destinations, arg_names, name, node_mapping_type):
32
140
  connection_destinations = [connection_destination for connection_destination in connection_destinations
33
141
  if not isinstance(connection_destination, SamplePreprocessResponse)]
@@ -50,12 +158,24 @@ def tensorleap_integration_test():
50
158
  def decorating_function(integration_test_function: Callable):
51
159
  leap_binder.integration_test_func = integration_test_function
52
160
 
161
+ def _validate_input_args(*args, **kwargs):
162
+ sample_id,preprocess_response=args
163
+ assert type(sample_id) == preprocess_response.sample_id_type, (
164
+ f"tensorleap_integration_test validation failed: "
165
+ f"sample_id type ({type(sample_id).__name__}) does not match the expected "
166
+ f"type ({preprocess_response.sample_id_type}) from the PreprocessResponse."
167
+ )
168
+
53
169
  def inner(*args, **kwargs):
170
+ validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
171
+ func_name='integration_test',expected_names=["idx", "preprocess"],**kwargs)
172
+ _validate_input_args(*args, **kwargs)
173
+
54
174
  global _called_from_inside_tl_integration_test_decorator
55
- global _integration_events_emitted
56
- _integration_events_emitted.clear() # Clear events for new test
57
175
  try:
58
176
  _called_from_inside_tl_integration_test_decorator = True
177
+ if not _update_env_status is None:
178
+ _update_env_status("tensorleap_integration_test", "v")#put here because otherwise it will become v only if it finishes all the script
59
179
  ret = integration_test_function(*args, **kwargs)
60
180
 
61
181
  try:
@@ -68,7 +188,7 @@ def tensorleap_integration_test():
68
188
  line_number = first_tb.lineno
69
189
  if isinstance(e, TypeError) and 'is not subscriptable' in str(e):
70
190
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
71
- f'Please remove this indexing operation usage from the integration test code.')
191
+ f"indexing is supported only on the model's predictions inside the integration test. Please remove this indexing operation usage from the integration test code.")
72
192
  else:
73
193
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
74
194
  f'Integration test is only allowed to call Tensorleap decorators. '
@@ -80,9 +200,9 @@ def tensorleap_integration_test():
80
200
  _called_from_inside_tl_integration_test_decorator = False
81
201
 
82
202
  leap_binder.check()
83
-
84
203
  return inner
85
204
 
205
+
86
206
  return decorating_function
87
207
 
88
208
  def _safe_get_item(key):
@@ -91,31 +211,59 @@ def _safe_get_item(key):
91
211
  except ValueError:
92
212
  raise Exception(f'Tensorleap currently supports models with no more then 10 inputs')
93
213
 
94
- def _emit_integration_event_once(event_name: str, props: Dict[str, Any]) -> None:
95
- """Emit an integration test event only once per test run."""
96
- if event_name in _integration_events_emitted:
97
- return
98
-
99
- try:
100
- from code_loader.mixpanel_tracker import track_integration_test_event
101
- track_integration_test_event(event_name, props)
102
- _integration_events_emitted.add(event_name)
103
- except Exception:
104
- pass
105
-
106
214
  def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]] = []):
215
+ assert isinstance(prediction_types, list),(
216
+ f"tensorleap_load_model validation failed: "
217
+ f" prediction_types is an optional argument of type List[PredictionTypeHandler]] but got {type(prediction_types).__name__}."
218
+ )
107
219
  for i, prediction_type in enumerate(prediction_types):
220
+ assert isinstance(prediction_type, PredictionTypeHandler),(f"tensorleap_load_model validation failed: "
221
+ f" prediction_types at position {i} must be of type PredictionTypeHandler but got {type(prediction_types[i]).__name__}.")
108
222
  leap_binder.add_prediction(prediction_type.name, prediction_type.labels, prediction_type.channel_dim, i)
109
223
 
224
+ def _validate_result(result) -> None:
225
+ valid_types=["onnxruntime","keras"]
226
+ err_message=f"tensorleap_load_model validation failed:\nSupported models are Keras and onnxruntime only and non of them was returned."
227
+ validate_output_structure(result, func_name="tensorleap_load_model", expected_type_name= [" | ".join(t for t in valid_types)][0])
228
+ try:
229
+ import keras
230
+ except ImportError:
231
+ keras = None
232
+ try:
233
+ import tensorflow as tf
234
+ except ImportError:
235
+ tf = None
236
+ try:
237
+ import onnxruntime
238
+ except ImportError:
239
+ onnxruntime = None
240
+
241
+ if not keras and not onnxruntime:
242
+ raise AssertionError(err_message)
243
+
244
+ is_keras_model = (
245
+ bool(keras and isinstance(result, getattr(keras, "Model", tuple())))
246
+ or bool(tf and isinstance(result, getattr(tf.keras, "Model", tuple())))
247
+ )
248
+ is_onnx_model = bool(onnxruntime and isinstance(result, onnxruntime.InferenceSession))
249
+
250
+ if not any([is_keras_model, is_onnx_model]):
251
+ raise AssertionError( err_message)
252
+
253
+
254
+
110
255
  def decorating_function(load_model_func):
111
256
  class TempMapping:
112
257
  pass
113
258
 
114
259
  @lru_cache()
115
- def inner():
260
+ def inner(*args, **kwargs):
261
+ validate_args_structure(*args, types_order=[],
262
+ func_name='tensorleap_load_model',expected_names=[],**kwargs)
116
263
  class ModelPlaceholder:
117
264
  def __init__(self):
118
- self.model = load_model_func()
265
+ self.model = load_model_func() #TODO- check why this fails on onnx model
266
+ _validate_result(self.model)
119
267
 
120
268
  # keras interface
121
269
  def __call__(self, arg):
@@ -179,8 +327,10 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
179
327
 
180
328
  def get_inputs(self):
181
329
  return self.model.get_inputs()
182
-
183
- return ModelPlaceholder()
330
+ model_placeholder=ModelPlaceholder()
331
+ if not _update_env_status is None:
332
+ _update_env_status("tensorleap_load_model", "v")
333
+ return model_placeholder
184
334
 
185
335
  def mapping_inner():
186
336
  class ModelOutputPlaceholder:
@@ -243,12 +393,11 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
243
393
 
244
394
  return ModelPlaceholder()
245
395
 
246
- def final_inner():
396
+ def final_inner(*args, **kwargs):
247
397
  if os.environ.get(mapping_runtime_mode_env_var_mame):
248
398
  return mapping_inner()
249
399
  else:
250
- return inner()
251
-
400
+ return inner(*args, **kwargs)
252
401
  return final_inner
253
402
 
254
403
  return decorating_function
@@ -259,81 +408,168 @@ def tensorleap_custom_metric(name: str,
259
408
  compute_insights: Optional[Union[bool, Dict[str, bool]]] = None,
260
409
  connects_to=None):
261
410
  name_to_unique_name = defaultdict(set)
262
-
263
411
  def decorating_function(
264
412
  user_function: Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs,
265
413
  ConfusionMatrixCallableInterfaceMultiArgs]):
414
+
415
+ def _validate_decorators_signature():
416
+ err_message = f"{user_function.__name__} validation failed.\n"
417
+ if not isinstance(name, str):
418
+ raise TypeError(err_message + f"`name` must be a string, got type {type(name).__name__}.")
419
+ valid_directions = {MetricDirection.Upward, MetricDirection.Downward}
420
+ if isinstance(direction, MetricDirection):
421
+ if direction not in valid_directions:
422
+ raise ValueError(
423
+ err_message +
424
+ f"Invalid MetricDirection: {direction}. Must be one of {valid_directions}, "
425
+ f"got type {type(direction).__name__}."
426
+ )
427
+ elif isinstance(direction, dict):
428
+ if not all(isinstance(k, str) for k in direction.keys()):
429
+ invalid_keys = {k: type(k).__name__ for k in direction.keys() if not isinstance(k, str)}
430
+ raise TypeError(
431
+ err_message +
432
+ f"All keys in `direction` must be strings, got invalid key types: {invalid_keys}."
433
+ )
434
+ for k, v in direction.items():
435
+ if v not in valid_directions:
436
+ raise ValueError(
437
+ err_message +
438
+ f"Invalid direction for key '{k}': {v}. Must be one of {valid_directions}, "
439
+ f"got type {type(v).__name__}."
440
+ )
441
+ else:
442
+ raise TypeError(
443
+ err_message +
444
+ f"`direction` must be a MetricDirection or a Dict[str, MetricDirection], "
445
+ f"got type {type(direction).__name__}."
446
+ )
447
+ if compute_insights is not None:
448
+ if not isinstance(compute_insights, (bool, dict)):
449
+ raise TypeError(
450
+ err_message +
451
+ f"`compute_insights` must be a bool or a Dict[str, bool], "
452
+ f"got type {type(compute_insights).__name__}."
453
+ )
454
+ if isinstance(compute_insights, dict):
455
+ if not all(isinstance(k, str) for k in compute_insights.keys()):
456
+ invalid_keys = {k: type(k).__name__ for k in compute_insights.keys() if not isinstance(k, str)}
457
+ raise TypeError(
458
+ err_message +
459
+ f"All keys in `compute_insights` must be strings, got invalid key types: {invalid_keys}."
460
+ )
461
+ for k, v in compute_insights.items():
462
+ if not isinstance(v, bool):
463
+ raise TypeError(
464
+ err_message +
465
+ f"Invalid type for compute_insights['{k}']: expected bool, got type {type(v).__name__}."
466
+ )
467
+ if connects_to is not None:
468
+ valid_types = (str, list, tuple, set)
469
+ if not isinstance(connects_to, valid_types):
470
+ raise TypeError(
471
+ err_message +
472
+ f"`connects_to` must be one of {valid_types}, got type {type(connects_to).__name__}."
473
+ )
474
+ if isinstance(connects_to, (list, tuple, set)):
475
+ invalid_elems = [f"{type(e).__name__}" for e in connects_to if not isinstance(e, str)]
476
+ if invalid_elems:
477
+ raise TypeError(
478
+ err_message +
479
+ f"All elements in `connects_to` must be strings, "
480
+ f"but found element types: {invalid_elems}."
481
+ )
482
+
483
+
484
+ _validate_decorators_signature()
485
+
266
486
  for metric_handler in leap_binder.setup_container.metrics:
267
487
  if metric_handler.metric_handler_data.name == name:
268
488
  raise Exception(f'Metric with name {name} already exists. '
269
489
  f'Please choose another')
270
490
 
271
491
  def _validate_input_args(*args, **kwargs) -> None:
492
+ assert len(args) > 0, (
493
+ f"{user_function.__name__}() validation failed: "
494
+ f"Expected at least one positional|key-word argument of type np.ndarray, "
495
+ f"but received none. "
496
+ f"Correct usage example: tensorleap_custom_metric(input_array: np.ndarray, ...)"
497
+ )
272
498
  for i, arg in enumerate(args):
273
499
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
274
- f'tensorleap_custom_metric validation failed: '
500
+ f'{user_function.__name__}() validation failed: '
275
501
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
276
502
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
277
503
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
278
- (f'tensorleap_custom_metric validation failed: Argument #{i} '
504
+ (f'{user_function.__name__}() validation failed: Argument #{i} '
279
505
  f'first dim should be as the batch size. Got {arg.shape[0]} '
280
506
  f'instead of {leap_binder.batch_size_to_validate}')
281
507
 
282
508
  for _arg_name, arg in kwargs.items():
283
509
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
284
- f'tensorleap_custom_metric validation failed: '
510
+ f'{user_function.__name__}() validation failed: '
285
511
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
286
512
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
287
513
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
288
- (f'tensorleap_custom_metric validation failed: Argument {_arg_name} '
514
+ (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
289
515
  f'first dim should be as the batch size. Got {arg.shape[0]} '
290
516
  f'instead of {leap_binder.batch_size_to_validate}')
291
517
 
292
518
  def _validate_result(result) -> None:
293
- supported_types_message = (f'tensorleap_custom_metric validation failed: '
294
- f'Metric has returned unsupported type. Supported types are List[float], '
295
- f'List[List[ConfusionMatrixElement]], NDArray[np.float32]. ')
519
+ validate_output_structure(result, func_name=user_function.__name__,
520
+ expected_type_name="List[float | int | None | List[ConfusionMatrixElement] ] | NDArray[np.float32] or dictonary with one of these types as its values types")
521
+ supported_types_message = (f'{user_function.__name__}() validation failed: '
522
+ f'{user_function.__name__}() has returned unsupported type.\nSupported types are List[float|int|None], '
523
+ f'List[List[ConfusionMatrixElement]], NDArray[np.float32] or dictonary with one of these types as its values types. ')
296
524
 
297
- def _validate_single_metric(single_metric_result):
525
+ def _validate_single_metric(single_metric_result,key=None):
298
526
  if isinstance(single_metric_result, list):
299
527
  if isinstance(single_metric_result[0], list):
300
- assert isinstance(single_metric_result[0][0], ConfusionMatrixElement), \
301
- f'{supported_types_message}Got List[List[{type(single_metric_result[0][0])}]].'
528
+ assert all(isinstance(cm, ConfusionMatrixElement) for cm in single_metric_result[0]), (
529
+ f"{supported_types_message} "
530
+ f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
531
+ f"List[List[{', '.join(type(cm).__name__ for cm in single_metric_result[0])}]]."
532
+ )
533
+
302
534
  else:
303
- assert isinstance(single_metric_result[0], (
304
- float, int,
305
- type(None))), f'{supported_types_message}Got List[{type(single_metric_result[0])}].'
535
+ assert all(isinstance(v, (float,int,type(None),np.float32)) for v in single_metric_result), (
536
+ f"{supported_types_message}\n"
537
+ f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
538
+ f"List[{', '.join(type(v).__name__ for v in single_metric_result)}]."
539
+ )
306
540
  else:
307
541
  assert isinstance(single_metric_result,
308
- np.ndarray), f'{supported_types_message}Got {type(single_metric_result)}.'
309
- assert len(single_metric_result.shape) == 1, (f'tensorleap_custom_metric validation failed: '
542
+ np.ndarray), f'{supported_types_message}\nGot {type(single_metric_result)}.'
543
+ assert len(single_metric_result.shape) == 1, (f'{user_function.__name__}() validation failed: '
310
544
  f'The return shape should be 1D. Got {len(single_metric_result.shape)}D.')
311
545
 
312
546
  if leap_binder.batch_size_to_validate:
313
547
  assert len(single_metric_result) == leap_binder.batch_size_to_validate, \
314
- f'tensorleap_custom_metrix validation failed: The return len should be as the batch size.'
548
+ f'{user_function.__name__}() validation failed: The return len {f"of srt{key} value" if key is not None else ""} should be as the batch size.'
315
549
 
316
550
  if isinstance(result, dict):
317
551
  for key, value in result.items():
552
+ _validate_single_metric(value,key)
553
+
318
554
  assert isinstance(key, str), \
319
- (f'tensorleap_custom_metric validation failed: '
555
+ (f'{user_function.__name__}() validation failed: '
320
556
  f'Keys in the return dict should be of type str. Got {type(key)}.')
321
557
  _validate_single_metric(value)
322
558
 
323
559
  if isinstance(direction, dict):
324
560
  for direction_key in direction:
325
561
  assert direction_key in result, \
326
- (f'tensorleap_custom_metric validation failed: '
562
+ (f'{user_function.__name__}() validation failed: '
327
563
  f'Keys in the direction mapping should be part of result keys. Got key {direction_key}.')
328
564
 
329
565
  if compute_insights is not None:
330
566
  assert isinstance(compute_insights, dict), \
331
- (f'tensorleap_custom_metric validation failed: '
567
+ (f'{user_function.__name__}() validation failed: '
332
568
  f'compute_insights should be dict if using the dict results. Got {type(compute_insights)}.')
333
569
 
334
570
  for ci_key in compute_insights:
335
571
  assert ci_key in result, \
336
- (f'tensorleap_custom_metric validation failed: '
572
+ (f'{user_function.__name__}() validation failed: '
337
573
  f'Keys in the compute_insights mapping should be part of result keys. Got key {ci_key}.')
338
574
 
339
575
  else:
@@ -341,7 +577,7 @@ def tensorleap_custom_metric(name: str,
341
577
 
342
578
  if compute_insights is not None:
343
579
  assert isinstance(compute_insights, bool), \
344
- (f'tensorleap_custom_metric validation failed: '
580
+ (f'{user_function.__name__}() validation failed: '
345
581
  f'compute_insights should be boolean. Got {type(compute_insights)}.')
346
582
 
347
583
  @functools.wraps(user_function)
@@ -373,6 +609,8 @@ def tensorleap_custom_metric(name: str,
373
609
  result = inner_without_validate(*args, **kwargs)
374
610
 
375
611
  _validate_result(result)
612
+ if not _update_env_status is None:
613
+ _update_env_status("tensorleap_custom_metric","v")
376
614
  return result
377
615
 
378
616
  def mapping_inner(*args, **kwargs):
@@ -412,28 +650,38 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
412
650
  name_to_unique_name = defaultdict(set)
413
651
 
414
652
  def decorating_function(user_function: VisualizerCallableInterface):
653
+ assert isinstance(visualizer_type,LeapDataType),(f"{user_function.__name__} validation failed: "
654
+ f"visualizer_type should be of type {LeapDataType.__name__} but got {type(visualizer_type)}"
655
+ )
656
+
415
657
  for viz_handler in leap_binder.setup_container.visualizers:
416
658
  if viz_handler.visualizer_handler_data.name == name:
417
659
  raise Exception(f'Visualizer with name {name} already exists. '
418
660
  f'Please choose another')
419
661
 
420
662
  def _validate_input_args(*args, **kwargs):
663
+ assert len(args) > 0, (
664
+ f"{user_function.__name__}() validation failed: "
665
+ f"Expected at least one positional|key-word argument of type np.ndarray, "
666
+ f"but received none. "
667
+ f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
668
+ )
421
669
  for i, arg in enumerate(args):
422
670
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
423
- f'tensorleap_custom_visualizer validation failed: '
671
+ f'{user_function.__name__}() validation failed: '
424
672
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
425
673
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
426
674
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
427
- (f'tensorleap_custom_visualizer validation failed: '
675
+ (f'{user_function.__name__}() validation failed: '
428
676
  f'Argument #{i} should be without batch dimension. ')
429
677
 
430
678
  for _arg_name, arg in kwargs.items():
431
679
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
432
- f'tensorleap_custom_visualizer validation failed: '
680
+ f'{user_function.__name__}() validation failed: '
433
681
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
434
682
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
435
683
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
436
- (f'tensorleap_custom_visualizer validation failed: Argument {_arg_name} '
684
+ (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
437
685
  f'should be without batch dimension. ')
438
686
 
439
687
  def _validate_result(result):
@@ -447,8 +695,11 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
447
695
  LeapDataType.ImageWithBBox: LeapImageWithBBox,
448
696
  LeapDataType.ImageWithHeatmap: LeapImageWithHeatmap
449
697
  }
698
+ validate_output_structure(result, func_name=user_function.__name__,
699
+ expected_type_name=result_type_map[visualizer_type])
700
+
450
701
  assert isinstance(result, result_type_map[visualizer_type]), \
451
- (f'tensorleap_custom_visualizer validation failed: '
702
+ (f'{user_function.__name__}() validation failed: '
452
703
  f'The return type should be {result_type_map[visualizer_type]}. Got {type(result)}.')
453
704
 
454
705
  @functools.wraps(user_function)
@@ -480,6 +731,8 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
480
731
  result = inner_without_validate(*args, **kwargs)
481
732
 
482
733
  _validate_result(result)
734
+ if not _update_env_status is None:
735
+ _update_env_status("tensorleap_custom_visualizer","v")
483
736
  return result
484
737
 
485
738
  def mapping_inner(*args, **kwargs):
@@ -521,30 +774,26 @@ def tensorleap_metadata(
521
774
  f'Please choose another')
522
775
 
523
776
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
524
- assert isinstance(sample_id, (int, str)), \
525
- (f'tensorleap_metadata validation failed: '
526
- f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
527
- assert isinstance(preprocess_response, PreprocessResponse), \
528
- (f'tensorleap_metadata validation failed: '
529
- f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
530
777
  assert type(sample_id) == preprocess_response.sample_id_type, \
531
- (f'tensorleap_metadata validation failed: '
778
+ (f'{user_function.__name__}() validation failed: '
532
779
  f'Argument sample_id should be as the same type as defined in the preprocess response '
533
780
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
534
781
 
535
782
  def _validate_result(result):
536
783
  supported_result_types = (type(None), int, str, bool, float, dict, np.floating,
537
784
  np.bool_, np.unsignedinteger, np.signedinteger, np.integer)
785
+ validate_output_structure(result, func_name=user_function.__name__,
786
+ expected_type_name=supported_result_types)
538
787
  assert isinstance(result, supported_result_types), \
539
- (f'tensorleap_metadata validation failed: '
788
+ (f'{user_function.__name__}() validation failed: '
540
789
  f'Unsupported return type. Got {type(result)}. should be any of {str(supported_result_types)}')
541
790
  if isinstance(result, dict):
542
791
  for key, value in result.items():
543
792
  assert isinstance(key, str), \
544
- (f'tensorleap_metadata validation failed: '
793
+ (f'{user_function.__name__}() validation failed: '
545
794
  f'Keys in the return dict should be of type str. Got {type(key)}.')
546
795
  assert isinstance(value, supported_result_types), \
547
- (f'tensorleap_metadata validation failed: '
796
+ (f'{user_function.__name__}() validation failed: '
548
797
  f'Values in the return dict should be of type {str(supported_result_types)}. Got {type(value)}.')
549
798
 
550
799
  def inner_without_validate(sample_id, preprocess_response):
@@ -561,15 +810,19 @@ def tensorleap_metadata(
561
810
 
562
811
  leap_binder.set_metadata(inner_without_validate, name, metadata_type)
563
812
 
564
- def inner(sample_id, preprocess_response):
813
+ def inner(*args,**kwargs):
565
814
  if os.environ.get(mapping_runtime_mode_env_var_mame):
566
815
  return None
567
-
816
+ validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
817
+ func_name=user_function.__name__, expected_names=["idx", "preprocess"],**kwargs)
818
+ sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
568
819
  _validate_input_args(sample_id, preprocess_response)
569
820
 
570
821
  result = inner_without_validate(sample_id, preprocess_response)
571
822
 
572
823
  _validate_result(result)
824
+ if not _update_env_status is None:
825
+ _update_env_status("tensorleap_metadata","v")
573
826
  return result
574
827
 
575
828
  return inner
@@ -632,19 +885,23 @@ def tensorleap_preprocess():
632
885
 
633
886
  def _validate_input_args(*args, **kwargs):
634
887
  assert len(args) == 0 and len(kwargs) == 0, \
635
- (f'tensorleap_preprocess validation failed: '
888
+ (f'{user_function.__name__}() validation failed: '
636
889
  f'The function should not take any arguments. Got {args} and {kwargs}.')
637
890
 
638
891
  def _validate_result(result):
639
- assert isinstance(result, list), \
640
- (f'tensorleap_preprocess validation failed: '
641
- f'The return type should be a list. Got {type(result)}.')
892
+ assert isinstance(result, list), (
893
+ f"{user_function.__name__}() validation failed: expected return type list[{PreprocessResponse.__name__}]"
894
+ f"(e.g., [PreprocessResponse1, PreprocessResponse2, ...]), but returned type is {type(result).__name__}."
895
+ if not isinstance(result, tuple)
896
+ else f"{user_function.__name__}() validation failed: expected to return a single list[{PreprocessResponse.__name__}] object, "
897
+ f"but returned {len(result)} objects instead."
898
+ )
642
899
  for i, response in enumerate(result):
643
900
  assert isinstance(response, PreprocessResponse), \
644
- (f'tensorleap_preprocess validation failed: '
901
+ (f'{user_function.__name__}() validation failed: '
645
902
  f'Element #{i} in the return list should be a PreprocessResponse. Got {type(response)}.')
646
903
  assert len(set(result)) == len(result), \
647
- (f'tensorleap_preprocess validation failed: '
904
+ (f'{user_function.__name__}() validation failed: '
648
905
  f'The return list should not contain duplicate PreprocessResponse objects.')
649
906
 
650
907
  def inner(*args, **kwargs):
@@ -652,9 +909,10 @@ def tensorleap_preprocess():
652
909
  return [None, None, None, None]
653
910
 
654
911
  _validate_input_args(*args, **kwargs)
655
-
656
912
  result = user_function()
657
913
  _validate_result(result)
914
+ if not _update_env_status is None:
915
+ _update_env_status("tensorleap_preprocess", "v")
658
916
  return result
659
917
 
660
918
  return inner
@@ -853,29 +1111,23 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
853
1111
  raise Exception(f"Channel dim for input {name} is expected to be either -1 or positive")
854
1112
 
855
1113
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
856
- assert isinstance(sample_id, (int, str)), \
857
- (f'tensorleap_input_encoder validation failed: '
858
- f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
859
- assert isinstance(preprocess_response, PreprocessResponse), \
860
- (f'tensorleap_input_encoder validation failed: '
861
- f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
862
1114
  assert type(sample_id) == preprocess_response.sample_id_type, \
863
- (f'tensorleap_input_encoder validation failed: '
1115
+ (f'{user_function.__name__}() validation failed: '
864
1116
  f'Argument sample_id should be as the same type as defined in the preprocess response '
865
1117
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
866
1118
 
867
1119
  def _validate_result(result):
1120
+ validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray")
868
1121
  assert isinstance(result, np.ndarray), \
869
- (f'tensorleap_input_encoder validation failed: '
1122
+ (f'{user_function.__name__}() validation failed: '
870
1123
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
871
1124
  assert result.dtype == np.float32, \
872
- (f'tensorleap_input_encoder validation failed: '
1125
+ (f'{user_function.__name__}() validation failed: '
873
1126
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
874
- assert channel_dim - 1 <= len(result.shape), (f'tensorleap_input_encoder validation failed: '
1127
+ assert channel_dim - 1 <= len(result.shape), (f'{user_function.__name__}() validation failed: '
875
1128
  f'The channel_dim ({channel_dim}) should be <= to the rank of the resulting input rank ({len(result.shape)}).')
876
1129
 
877
1130
  def inner_without_validate(sample_id, preprocess_response):
878
-
879
1131
  global _called_from_inside_tl_decorator
880
1132
  _called_from_inside_tl_decorator += 1
881
1133
 
@@ -889,7 +1141,10 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
889
1141
  leap_binder.set_input(inner_without_validate, name, channel_dim=channel_dim)
890
1142
 
891
1143
 
892
- def inner(sample_id, preprocess_response):
1144
+ def inner(*args, **kwargs):
1145
+ validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1146
+ func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1147
+ sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
893
1148
  _validate_input_args(sample_id, preprocess_response)
894
1149
 
895
1150
  result = inner_without_validate(sample_id, preprocess_response)
@@ -897,24 +1152,20 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
897
1152
  _validate_result(result)
898
1153
 
899
1154
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1155
+ batch_warning(result,user_function.__name__)
900
1156
  result = np.expand_dims(result, axis=0)
901
- # Emit integration test event once per test
902
- _emit_integration_event_once('input_encoder_integration_test', {
903
- 'encoder_name': name,
904
- 'channel_dim': channel_dim,
905
- 'model_input_index': model_input_index
906
- })
1157
+ if not _update_env_status is None:
1158
+ _update_env_status("tensorleap_input_encoder", "v")
907
1159
 
908
1160
  return result
909
1161
 
910
1162
 
911
-
912
1163
  node_mapping_type = NodeMappingType.Input
913
1164
  if model_input_index is not None:
914
1165
  node_mapping_type = NodeMappingType(f'Input{str(model_input_index)}')
915
1166
  inner.node_mapping = NodeMapping(name, node_mapping_type)
916
1167
 
917
- def mapping_inner(sample_id, preprocess_response):
1168
+ def mapping_inner(*args, **kwargs):
918
1169
  class TempMapping:
919
1170
  pass
920
1171
 
@@ -926,11 +1177,11 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
926
1177
 
927
1178
  mapping_inner.node_mapping = NodeMapping(name, node_mapping_type)
928
1179
 
929
- def final_inner(sample_id, preprocess_response):
1180
+ def final_inner(*args, **kwargs):
930
1181
  if os.environ.get(mapping_runtime_mode_env_var_mame):
931
- return mapping_inner(sample_id, preprocess_response)
1182
+ return mapping_inner(*args, **kwargs)
932
1183
  else:
933
- return inner(sample_id, preprocess_response)
1184
+ return inner(*args, **kwargs)
934
1185
 
935
1186
  final_inner.node_mapping = NodeMapping(name, node_mapping_type)
936
1187
 
@@ -947,23 +1198,18 @@ def tensorleap_gt_encoder(name: str):
947
1198
  f'Please choose another')
948
1199
 
949
1200
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
950
- assert isinstance(sample_id, (int, str)), \
951
- (f'tensorleap_gt_encoder validation failed: '
952
- f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
953
- assert isinstance(preprocess_response, PreprocessResponse), \
954
- (f'tensorleap_gt_encoder validation failed: '
955
- f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
956
1201
  assert type(sample_id) == preprocess_response.sample_id_type, \
957
- (f'tensorleap_gt_encoder validation failed: '
1202
+ (f'{user_function.__name__}() validation failed: '
958
1203
  f'Argument sample_id should be as the same type as defined in the preprocess response '
959
1204
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
960
1205
 
961
1206
  def _validate_result(result):
1207
+ validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray",gt_flag=True)
962
1208
  assert isinstance(result, np.ndarray), \
963
- (f'tensorleap_gt_encoder validation failed: '
1209
+ (f'{user_function.__name__}() validation failed: '
964
1210
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
965
1211
  assert result.dtype == np.float32, \
966
- (f'tensorleap_gt_encoder validation failed: '
1212
+ (f'{user_function.__name__}() validation failed: '
967
1213
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
968
1214
 
969
1215
  def inner_without_validate(sample_id, preprocess_response):
@@ -980,7 +1226,10 @@ def tensorleap_gt_encoder(name: str):
980
1226
  leap_binder.set_ground_truth(inner_without_validate, name)
981
1227
 
982
1228
 
983
- def inner(sample_id, preprocess_response):
1229
+ def inner(*args, **kwargs):
1230
+ validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1231
+ func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1232
+ sample_id, preprocess_response = args
984
1233
  _validate_input_args(sample_id, preprocess_response)
985
1234
 
986
1235
  result = inner_without_validate(sample_id, preprocess_response)
@@ -988,13 +1237,15 @@ def tensorleap_gt_encoder(name: str):
988
1237
  _validate_result(result)
989
1238
 
990
1239
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1240
+ batch_warning(result, user_function.__name__)
991
1241
  result = np.expand_dims(result, axis=0)
992
-
1242
+ if not _update_env_status is None:
1243
+ _update_env_status("tensorleap_gt_encoder", "v")
993
1244
  return result
994
1245
 
995
1246
  inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
996
1247
 
997
- def mapping_inner(sample_id, preprocess_response):
1248
+ def mapping_inner(*args, **kwargs):
998
1249
  class TempMapping:
999
1250
  pass
1000
1251
 
@@ -1005,11 +1256,11 @@ def tensorleap_gt_encoder(name: str):
1005
1256
 
1006
1257
  mapping_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1007
1258
 
1008
- def final_inner(sample_id, preprocess_response):
1259
+ def final_inner(*args, **kwargs):
1009
1260
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1010
- return mapping_inner(sample_id, preprocess_response)
1261
+ return mapping_inner(*args, **kwargs)
1011
1262
  else:
1012
- return inner(sample_id, preprocess_response)
1263
+ return inner(*args, **kwargs)
1013
1264
 
1014
1265
  final_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1015
1266
 
@@ -1030,28 +1281,37 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1030
1281
  valid_types = (np.ndarray, SamplePreprocessResponse)
1031
1282
 
1032
1283
  def _validate_input_args(*args, **kwargs):
1284
+ assert len(args) > 0 and len(kwargs)==0, (
1285
+ f"{user_function.__name__}() validation failed: "
1286
+ f"Expected at least one positional|key-word argument of the allowed types (np.ndarray|SamplePreprocessResponse|list(np.ndarray|SamplePreprocessResponse)). "
1287
+ f"but received none. "
1288
+ f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
1289
+ )
1033
1290
  for i, arg in enumerate(args):
1034
1291
  if isinstance(arg, list):
1035
1292
  for y, elem in enumerate(arg):
1036
- assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1293
+ assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1037
1294
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1038
1295
  else:
1039
- assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1296
+ assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1040
1297
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
1041
1298
  for _arg_name, arg in kwargs.items():
1042
1299
  if isinstance(arg, list):
1043
1300
  for y, elem in enumerate(arg):
1044
- assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1301
+ assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1045
1302
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1046
1303
  else:
1047
- assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1304
+ assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1048
1305
  f'Argument #{_arg_name} should be a numpy array. Got {type(arg)}.')
1049
1306
 
1050
1307
  def _validate_result(result):
1308
+ validate_output_structure(result, func_name=user_function.__name__,
1309
+ expected_type_name="np.ndarray")
1051
1310
  assert isinstance(result, np.ndarray), \
1052
- (f'tensorleap_custom_loss validation failed: '
1311
+ (f'{user_function.__name__} validation failed: '
1053
1312
  f'The return type should be a numpy array. Got {type(result)}.')
1054
-
1313
+ assert result.ndim<2 ,(f'{user_function.__name__} validation failed: '
1314
+ f'The return type should be a 1Dim numpy array but got {result.ndim}Dim.')
1055
1315
 
1056
1316
  @functools.wraps(user_function)
1057
1317
  def inner_without_validate(*args, **kwargs):
@@ -1082,6 +1342,9 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1082
1342
  result = inner_without_validate(*args, **kwargs)
1083
1343
 
1084
1344
  _validate_result(result)
1345
+ if not _update_env_status is None:
1346
+ _update_env_status("tensorleap_custom_loss", "v")
1347
+
1085
1348
  return result
1086
1349
 
1087
1350
  def mapping_inner(*args, **kwargs):
@@ -1138,3 +1401,88 @@ def tensorleap_custom_layer(name: str):
1138
1401
  return custom_layer
1139
1402
 
1140
1403
  return decorating_function
1404
+
1405
+
1406
+ def tensorleap_status_table():
1407
+ '''
1408
+ Usage example:
1409
+ ###################
1410
+ leap_integration.py
1411
+ ###################
1412
+ from code_loader.inner_leap_binder.leapbinder_decorators import tensorleap_status_table
1413
+ ...
1414
+ ...
1415
+ ...
1416
+ if __name__ == '__main__':
1417
+ tensorleap_status_table()
1418
+ ...
1419
+ '''
1420
+ import atexit
1421
+ import sys
1422
+ import traceback
1423
+ CHECK = "✅"
1424
+ CROSS = "❌"
1425
+
1426
+ table = [
1427
+ {"name": "tensorleap_preprocess", "Added to integration": CROSS},
1428
+ {"name": "tensorleap_integration_test", "Added to integration": CROSS},
1429
+ {"name": "tensorleap_input_encoder", "Added to integration": CROSS},
1430
+ {"name": "tensorleap_gt_encoder", "Added to integration": CROSS},
1431
+ {"name": "tensorleap_load_model", "Added to integration": CROSS},
1432
+ {"name": "tensorleap_custom_loss", "Added to integration": CROSS},
1433
+ {"name": "tensorleap_custom_metric (optional)", "Added to integration": CROSS},
1434
+ {"name": "tensorleap_metadata (optional)", "Added to integration": CROSS},
1435
+ {"name": "tensorleap_custom_visualizer (optional)", "Added to integration": CROSS},
1436
+
1437
+ ]
1438
+
1439
+ _finalizer_called = {"done": False}
1440
+
1441
+ def _print_table():
1442
+ ready_mess = "\nAll parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system."
1443
+ not_ready_mess = "\nSome mandatory components have not yet been added to the Integration test. Recommended next interface to add is: "
1444
+ mandatory_ready_mess = "\nAll mandatory parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system or continue to the next optional reccomeded interface,adding: "
1445
+
1446
+ name_width = max(len(row["name"]) for row in table)
1447
+ status_width = max(len(row["Added to integration"]) for row in table)
1448
+ header = f"{'Decorator Name'.ljust(name_width)} | {'Added to integration'.ljust(status_width)}"
1449
+ sep = "-" * len(header)
1450
+ print("\n" + header)
1451
+ print(sep)
1452
+ ready=True
1453
+ for row in table:
1454
+ print(f"{row['name'].ljust(name_width)} | {row['Added to integration'].ljust(status_width)}")
1455
+ if row['Added to integration']==CROSS and ready:
1456
+ ready=False
1457
+ next_step=row['name']
1458
+
1459
+
1460
+ print(ready_mess) if ready else print(mandatory_ready_mess+next_step) if "optional" in next_step else print(not_ready_mess+next_step)
1461
+ def update_env_params(name: str, status: str = "✓"):
1462
+ for row in table:
1463
+ if row["name"].removesuffix(" (optional)") == name:
1464
+ row["Added to integration"] = CHECK if status=="v" else CROSS
1465
+ break
1466
+ def run_on_exit():
1467
+ if _finalizer_called["done"]:
1468
+ return
1469
+ _finalizer_called["done"] = True
1470
+ _print_table()
1471
+ def handle_exception(exc_type, exc_value, exc_traceback):
1472
+ traceback.print_exception(exc_type, exc_value, exc_traceback)
1473
+ run_on_exit()
1474
+ atexit.register(run_on_exit)
1475
+ sys.excepthook = handle_exception
1476
+ global _update_env_status
1477
+ _update_env_status = update_env_params
1478
+ return update_env_params
1479
+
1480
+
1481
+
1482
+
1483
+
1484
+
1485
+
1486
+
1487
+
1488
+
@@ -126,43 +126,6 @@ class MixpanelTracker:
126
126
  except Exception as e:
127
127
  pass
128
128
 
129
- def track_integration_test_event(self, event_name: str, event_properties: Optional[Dict[str, Any]] = None) -> None:
130
- """Track an integration test event with device identification.
131
-
132
- Args:
133
- event_name: The name of the event to track
134
- event_properties: Optional additional properties to include in the event
135
- """
136
- # Skip tracking if IS_TENSORLEAP_PLATFORM environment variable is set to 'true'
137
- if os.environ.get('IS_TENSORLEAP_PLATFORM') == 'true':
138
- return
139
-
140
- try:
141
- distinct_id = self._get_distinct_id()
142
-
143
- tensorleap_user_id = self._get_tensorleap_user_id()
144
- whoami = self._get_whoami()
145
- device_id = self._get_or_create_device_id()
146
-
147
- properties = {
148
- 'tracking_version': TRACKING_VERSION,
149
- 'service': 'code-loader',
150
- 'whoami': whoami,
151
- '$device_id': device_id, # Always use device_id for $device_id
152
- 'python_version': f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
153
- 'platform': os.name,
154
- }
155
-
156
- if tensorleap_user_id:
157
- properties['user_id'] = tensorleap_user_id
158
-
159
- if event_properties:
160
- properties.update(event_properties)
161
-
162
- self.mp.track(distinct_id, event_name, properties)
163
- except Exception as e:
164
- pass
165
-
166
129
 
167
130
  # Global tracker instance
168
131
  _tracker = None
@@ -177,7 +140,3 @@ def get_tracker() -> MixpanelTracker:
177
140
 
178
141
  def track_code_loader_loaded(event_properties: Optional[Dict[str, Any]] = None) -> None:
179
142
  get_tracker().track_code_loader_loaded(event_properties)
180
-
181
-
182
- def track_integration_test_event(event_name: str, event_properties: Optional[Dict[str, Any]] = None) -> None:
183
- get_tracker().track_integration_test_event(event_name, event_properties)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "1.0.139.dev4"
3
+ version = "1.0.139.dev6"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"