code-loader 1.0.139.dev3__tar.gz → 1.0.139.dev4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of code-loader might be problematic. Click here for more details.

Files changed (36) hide show
  1. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/PKG-INFO +4 -3
  2. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/datasetclasses.py +1 -9
  3. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/inner_leap_binder/leapbinder_decorators.py +117 -457
  4. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/mixpanel_tracker.py +41 -0
  5. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/pyproject.toml +1 -1
  6. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/LICENSE +0 -0
  7. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/README.md +0 -0
  8. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/__init__.py +0 -0
  9. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/__init__.py +0 -0
  10. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/enums.py +0 -0
  11. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/exceptions.py +0 -0
  12. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/mapping.py +0 -0
  13. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/responsedataclasses.py +0 -0
  14. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/contract/visualizer_classes.py +0 -0
  15. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/default_losses.py +0 -0
  16. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/default_metrics.py +0 -0
  17. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/__init__.py +0 -0
  18. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/api.py +0 -0
  19. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/cli_config_utils.py +0 -0
  20. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/client.py +0 -0
  21. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/epoch.py +0 -0
  22. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/experiment.py +0 -0
  23. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/experiment_context.py +0 -0
  24. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/types.py +0 -0
  25. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/utils.py +0 -0
  26. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/experiment_api/workingspace_config_utils.py +0 -0
  27. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/inner_leap_binder/__init__.py +0 -0
  28. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/inner_leap_binder/leapbinder.py +0 -0
  29. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/leaploader.py +0 -0
  30. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/leaploaderbase.py +0 -0
  31. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/plot_functions/__init__.py +0 -0
  32. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/plot_functions/plot_functions.py +0 -0
  33. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/plot_functions/visualize.py +0 -0
  34. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/utils.py +0 -0
  35. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/visualizers/__init__.py +0 -0
  36. {code_loader-1.0.139.dev3 → code_loader-1.0.139.dev4}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,9 +1,9 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: code-loader
3
- Version: 1.0.139.dev3
3
+ Version: 1.0.139.dev4
4
4
  Summary:
5
- Home-page: https://github.com/tensorleap/code-loader
6
5
  License: MIT
6
+ License-File: LICENSE
7
7
  Author: dorhar
8
8
  Author-email: doron.harnoy@tensorleap.ai
9
9
  Requires-Python: >=3.8,<3.13
@@ -20,6 +20,7 @@ Requires-Dist: numpy (>=2.3.2,<3.0.0) ; python_version >= "3.11" and python_vers
20
20
  Requires-Dist: psutil (>=5.9.5,<6.0.0)
21
21
  Requires-Dist: pyyaml (>=6.0.2,<7.0.0)
22
22
  Requires-Dist: requests (>=2.32.3,<3.0.0)
23
+ Project-URL: Homepage, https://github.com/tensorleap/code-loader
23
24
  Project-URL: Repository, https://github.com/tensorleap/code-loader
24
25
  Description-Content-Type: text/markdown
25
26
 
@@ -1,4 +1,3 @@
1
- import warnings
2
1
  from dataclasses import dataclass, field
3
2
  from typing import Any, Callable, List, Optional, Dict, Union, Type
4
3
  import re
@@ -57,14 +56,7 @@ class PreprocessResponse:
57
56
  for sample_id in self.sample_ids:
58
57
  assert isinstance(sample_id, str), f"Sample id should be of type str. Got: {type(sample_id)}"
59
58
  else:
60
- raise Exception("length is deprecated, please use sample_ids instead.")
61
-
62
- if self.state is None:
63
- warnings.warn(
64
- "PreprocessResponse.state is not set. For best practice, assign a unique `state` value to each PreprocessResponse instance."
65
- )
66
- else:
67
- assert isinstance(self.state, DataStateType), f"PreprocessResponse.state must be of type {DataStateType.__name__} but got {type(self.state)}"
59
+ raise Exception("length is deprecated.")
68
60
 
69
61
  def __hash__(self) -> int:
70
62
  return id(self)
@@ -1,10 +1,9 @@
1
1
  # mypy: ignore-errors
2
2
  import os
3
- import warnings
4
3
  from collections import defaultdict
5
4
  from functools import lru_cache
6
5
  from pathlib import Path
7
- from typing import Optional, Union, Callable, List, Dict, get_args, get_origin
6
+ from typing import Optional, Union, Callable, List, Dict, Set, Any
8
7
 
9
8
  import numpy as np
10
9
  import numpy.typing as npt
@@ -26,116 +25,9 @@ import functools
26
25
 
27
26
  _called_from_inside_tl_decorator = 0
28
27
  _called_from_inside_tl_integration_test_decorator = False
29
- _update_env_status = None
30
-
31
-
32
-
33
-
34
- def validate_args_structure(*args, types_order, func_name, expected_names, **kwargs):
35
- def _type_to_str(t):
36
- origin = get_origin(t)
37
- if origin is Union:
38
- return " | ".join(tt.__name__ for tt in get_args(t))
39
- elif hasattr(t, "__name__"):
40
- return t.__name__
41
- else:
42
- return str(t)
43
-
44
- def _format_types(types, names=None):
45
- return ", ".join(
46
- f"{(names[i] + ': ') if names else f'arg{i}: '}{_type_to_str(ty)}"
47
- for i, ty in enumerate(types)
48
- )
49
-
50
- if expected_names:
51
- normalized_args = []
52
- for i, name in enumerate(expected_names):
53
- if i < len(args):
54
- normalized_args.append(args[i])
55
- elif name in kwargs:
56
- normalized_args.append(kwargs[name])
57
- else:
58
- raise AssertionError(
59
- f"{func_name} validation failed: "
60
- f"Missing required argument '{name}'. "
61
- f"Expected arguments: {expected_names}."
62
- )
63
- else:
64
- normalized_args = list(args)
65
- if len(normalized_args) != len(types_order):
66
- expected = _format_types(types_order, expected_names)
67
- got_types = ", ".join(type(arg).__name__ for arg in normalized_args)
68
- raise AssertionError(
69
- f"{func_name} validation failed: "
70
- f"Expected exactly {len(types_order)} arguments ({expected}), "
71
- f"but got {len(normalized_args)} argument(s) of type(s): ({got_types}). "
72
- f"Correct usage example: {func_name}({expected})"
73
- )
74
-
75
- for i, (arg, expected_type) in enumerate(zip(normalized_args, types_order)):
76
- origin = get_origin(expected_type)
77
- if origin is Union:
78
- allowed_types = get_args(expected_type)
79
- else:
80
- allowed_types = (expected_type,)
81
-
82
- if not isinstance(arg, allowed_types):
83
- allowed_str = " | ".join(t.__name__ for t in allowed_types)
84
- raise AssertionError(
85
- f"{func_name} validation failed: "
86
- f"Argument '{expected_names[i] if expected_names else f'arg{i}'}' "
87
- f"expected type {allowed_str}, but got {type(arg).__name__}. "
88
- f"Correct usage example: {func_name}({_format_types(types_order, expected_names)})"
89
- )
90
-
91
-
92
- def validate_output_structure(result, func_name: str, expected_type_name="np.ndarray",gt_flag=False):
93
- if result is None or (isinstance(result, float) and np.isnan(result)):
94
- if gt_flag:
95
- raise AssertionError(
96
- f"{func_name} validation failed: "
97
- f"The function returned {result!r}. "
98
- f"If you are working with an unlabeled dataset and no ground truth is available, "
99
- f"use 'return np.array([], dtype=np.float32)' instead. "
100
- f"Otherwise, {func_name} expected a single {expected_type_name} object. "
101
- f"Make sure the function ends with 'return <{expected_type_name}>'."
102
- )
103
-
104
- raise AssertionError(
105
- f"{func_name} validation failed: "
106
- f"The function returned None. "
107
- f"Expected a single {expected_type_name} object. "
108
- f"Make sure the function ends with 'return <{expected_type_name}>'."
109
- )
110
- if isinstance(result, tuple):
111
- element_descriptions = [
112
- f"[{i}] type: {type(r).__name__}"
113
- for i, r in enumerate(result)
114
- ]
115
- element_summary = "\n ".join(element_descriptions)
116
-
117
- raise AssertionError(
118
- f"{func_name} validation failed: "
119
- f"The function returned multiple outputs ({len(result)} values), "
120
- f"but only a single {expected_type_name} is allowed.\n\n"
121
- f"Returned elements:\n"
122
- f" {element_summary}\n\n"
123
- f"Correct usage example:\n"
124
- f" def {func_name}(...):\n"
125
- f" return <{expected_type_name}>\n\n"
126
- f"If you intended to return multiple values, combine them into a single "
127
- f"{expected_type_name} (e.g., by concatenation or stacking)."
128
- )
129
-
130
- def batch_warning(result, func_name):
131
- if result.shape[0] == 1:
132
- warnings.warn(
133
- f"{func_name} warning: Tensorleap will add a batch dimension at axis 0 to the output of {func_name}, "
134
- f"although the detected size of axis 0 is already 1. "
135
- f"This may lead to an extra batch dimension (e.g., shape (1, 1, ...)). "
136
- f"Please ensure that the output of '{func_name}' is not already batched "
137
- f"to avoid computation errors."
138
- )
28
+ _integration_events_emitted: Set[str] = set()
29
+
30
+
139
31
  def _add_mapping_connection(user_unique_name, connection_destinations, arg_names, name, node_mapping_type):
140
32
  connection_destinations = [connection_destination for connection_destination in connection_destinations
141
33
  if not isinstance(connection_destination, SamplePreprocessResponse)]
@@ -158,24 +50,12 @@ def tensorleap_integration_test():
158
50
  def decorating_function(integration_test_function: Callable):
159
51
  leap_binder.integration_test_func = integration_test_function
160
52
 
161
- def _validate_input_args(*args, **kwargs):
162
- sample_id,preprocess_response=args
163
- assert type(sample_id) == preprocess_response.sample_id_type, (
164
- f"tensorleap_integration_test validation failed: "
165
- f"sample_id type ({type(sample_id).__name__}) does not match the expected "
166
- f"type ({preprocess_response.sample_id_type}) from the PreprocessResponse."
167
- )
168
-
169
53
  def inner(*args, **kwargs):
170
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
171
- func_name='integration_test',expected_names=["idx", "preprocess"],**kwargs)
172
- _validate_input_args(*args, **kwargs)
173
-
174
54
  global _called_from_inside_tl_integration_test_decorator
55
+ global _integration_events_emitted
56
+ _integration_events_emitted.clear() # Clear events for new test
175
57
  try:
176
58
  _called_from_inside_tl_integration_test_decorator = True
177
- if not _update_env_status is None:
178
- _update_env_status("tensorleap_integration_test", "v")#put here because otherwise it will become v only if it finishes all the script
179
59
  ret = integration_test_function(*args, **kwargs)
180
60
 
181
61
  try:
@@ -188,7 +68,7 @@ def tensorleap_integration_test():
188
68
  line_number = first_tb.lineno
189
69
  if isinstance(e, TypeError) and 'is not subscriptable' in str(e):
190
70
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
191
- f"indexing is supported only on the model's predictions inside the integration test. Please remove this indexing operation usage from the integration test code.")
71
+ f'Please remove this indexing operation usage from the integration test code.')
192
72
  else:
193
73
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
194
74
  f'Integration test is only allowed to call Tensorleap decorators. '
@@ -200,8 +80,8 @@ def tensorleap_integration_test():
200
80
  _called_from_inside_tl_integration_test_decorator = False
201
81
 
202
82
  leap_binder.check()
203
- return inner
204
83
 
84
+ return inner
205
85
 
206
86
  return decorating_function
207
87
 
@@ -211,52 +91,31 @@ def _safe_get_item(key):
211
91
  except ValueError:
212
92
  raise Exception(f'Tensorleap currently supports models with no more then 10 inputs')
213
93
 
94
+ def _emit_integration_event_once(event_name: str, props: Dict[str, Any]) -> None:
95
+ """Emit an integration test event only once per test run."""
96
+ if event_name in _integration_events_emitted:
97
+ return
98
+
99
+ try:
100
+ from code_loader.mixpanel_tracker import track_integration_test_event
101
+ track_integration_test_event(event_name, props)
102
+ _integration_events_emitted.add(event_name)
103
+ except Exception:
104
+ pass
105
+
214
106
  def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]] = []):
215
- assert isinstance(prediction_types, list),(
216
- f"tensorleap_load_model validation failed: "
217
- f" prediction_types is an optional argument of type List[PredictionTypeHandler]] but got {type(prediction_types).__name__}."
218
- )
219
107
  for i, prediction_type in enumerate(prediction_types):
220
- assert isinstance(prediction_type, PredictionTypeHandler),(f"tensorleap_load_model validation failed: "
221
- f" prediction_types at position {i} must be of type PredictionTypeHandler but got {type(prediction_types[i]).__name__}.")
222
108
  leap_binder.add_prediction(prediction_type.name, prediction_type.labels, prediction_type.channel_dim, i)
223
109
 
224
- def _validate_result(result) -> None:
225
- valid_types=["onnxruntime","keras"]
226
- err_message=f"tensorleap_load_model validation failed:\nSupported models are Keras and onnxruntime only and non of them was returned."
227
- validate_output_structure(result, func_name="tensorleap_load_model", expected_type_name= [" | ".join(t for t in valid_types)][0])
228
- try:
229
- import keras
230
- except ImportError:
231
- keras = None
232
- try:
233
- import onnxruntime
234
- except ImportError:
235
- onnxruntime = None
236
-
237
- if not keras and not onnxruntime:
238
- raise AssertionError(err_message)
239
-
240
- is_keras_model = bool(keras and isinstance(result, getattr(keras, "Model", tuple())))
241
- is_onnx_model = bool(onnxruntime and isinstance(result, onnxruntime.InferenceSession))
242
-
243
- if not any([is_keras_model, is_onnx_model]):
244
- raise AssertionError( err_message)
245
-
246
-
247
-
248
110
  def decorating_function(load_model_func):
249
111
  class TempMapping:
250
112
  pass
251
113
 
252
114
  @lru_cache()
253
- def inner(*args, **kwargs):
254
- validate_args_structure(*args, types_order=[],
255
- func_name='tensorleap_load_model',expected_names=[],**kwargs)
115
+ def inner():
256
116
  class ModelPlaceholder:
257
117
  def __init__(self):
258
- self.model = load_model_func() #TODO- check why this fails on onnx model
259
- _validate_result(self.model)
118
+ self.model = load_model_func()
260
119
 
261
120
  # keras interface
262
121
  def __call__(self, arg):
@@ -320,10 +179,8 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
320
179
 
321
180
  def get_inputs(self):
322
181
  return self.model.get_inputs()
323
- model_placeholder=ModelPlaceholder()
324
- if not _update_env_status is None:
325
- _update_env_status("tensorleap_load_model", "v")
326
- return model_placeholder
182
+
183
+ return ModelPlaceholder()
327
184
 
328
185
  def mapping_inner():
329
186
  class ModelOutputPlaceholder:
@@ -386,11 +243,12 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
386
243
 
387
244
  return ModelPlaceholder()
388
245
 
389
- def final_inner(*args, **kwargs):
246
+ def final_inner():
390
247
  if os.environ.get(mapping_runtime_mode_env_var_mame):
391
248
  return mapping_inner()
392
249
  else:
393
- return inner(*args, **kwargs)
250
+ return inner()
251
+
394
252
  return final_inner
395
253
 
396
254
  return decorating_function
@@ -401,168 +259,81 @@ def tensorleap_custom_metric(name: str,
401
259
  compute_insights: Optional[Union[bool, Dict[str, bool]]] = None,
402
260
  connects_to=None):
403
261
  name_to_unique_name = defaultdict(set)
262
+
404
263
  def decorating_function(
405
264
  user_function: Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs,
406
265
  ConfusionMatrixCallableInterfaceMultiArgs]):
407
-
408
- def _validate_decorators_signature():
409
- err_message = f"{user_function.__name__} validation failed.\n"
410
- if not isinstance(name, str):
411
- raise TypeError(err_message + f"`name` must be a string, got type {type(name).__name__}.")
412
- valid_directions = {MetricDirection.Upward, MetricDirection.Downward}
413
- if isinstance(direction, MetricDirection):
414
- if direction not in valid_directions:
415
- raise ValueError(
416
- err_message +
417
- f"Invalid MetricDirection: {direction}. Must be one of {valid_directions}, "
418
- f"got type {type(direction).__name__}."
419
- )
420
- elif isinstance(direction, dict):
421
- if not all(isinstance(k, str) for k in direction.keys()):
422
- invalid_keys = {k: type(k).__name__ for k in direction.keys() if not isinstance(k, str)}
423
- raise TypeError(
424
- err_message +
425
- f"All keys in `direction` must be strings, got invalid key types: {invalid_keys}."
426
- )
427
- for k, v in direction.items():
428
- if v not in valid_directions:
429
- raise ValueError(
430
- err_message +
431
- f"Invalid direction for key '{k}': {v}. Must be one of {valid_directions}, "
432
- f"got type {type(v).__name__}."
433
- )
434
- else:
435
- raise TypeError(
436
- err_message +
437
- f"`direction` must be a MetricDirection or a Dict[str, MetricDirection], "
438
- f"got type {type(direction).__name__}."
439
- )
440
- if compute_insights is not None:
441
- if not isinstance(compute_insights, (bool, dict)):
442
- raise TypeError(
443
- err_message +
444
- f"`compute_insights` must be a bool or a Dict[str, bool], "
445
- f"got type {type(compute_insights).__name__}."
446
- )
447
- if isinstance(compute_insights, dict):
448
- if not all(isinstance(k, str) for k in compute_insights.keys()):
449
- invalid_keys = {k: type(k).__name__ for k in compute_insights.keys() if not isinstance(k, str)}
450
- raise TypeError(
451
- err_message +
452
- f"All keys in `compute_insights` must be strings, got invalid key types: {invalid_keys}."
453
- )
454
- for k, v in compute_insights.items():
455
- if not isinstance(v, bool):
456
- raise TypeError(
457
- err_message +
458
- f"Invalid type for compute_insights['{k}']: expected bool, got type {type(v).__name__}."
459
- )
460
- if connects_to is not None:
461
- valid_types = (str, list, tuple, set)
462
- if not isinstance(connects_to, valid_types):
463
- raise TypeError(
464
- err_message +
465
- f"`connects_to` must be one of {valid_types}, got type {type(connects_to).__name__}."
466
- )
467
- if isinstance(connects_to, (list, tuple, set)):
468
- invalid_elems = [f"{type(e).__name__}" for e in connects_to if not isinstance(e, str)]
469
- if invalid_elems:
470
- raise TypeError(
471
- err_message +
472
- f"All elements in `connects_to` must be strings, "
473
- f"but found element types: {invalid_elems}."
474
- )
475
-
476
-
477
- _validate_decorators_signature()
478
-
479
266
  for metric_handler in leap_binder.setup_container.metrics:
480
267
  if metric_handler.metric_handler_data.name == name:
481
268
  raise Exception(f'Metric with name {name} already exists. '
482
269
  f'Please choose another')
483
270
 
484
271
  def _validate_input_args(*args, **kwargs) -> None:
485
- assert len(args) > 0, (
486
- f"{user_function.__name__}() validation failed: "
487
- f"Expected at least one positional|key-word argument of type np.ndarray, "
488
- f"but received none. "
489
- f"Correct usage example: tensorleap_custom_metric(input_array: np.ndarray, ...)"
490
- )
491
272
  for i, arg in enumerate(args):
492
273
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
493
- f'{user_function.__name__}() validation failed: '
274
+ f'tensorleap_custom_metric validation failed: '
494
275
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
495
276
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
496
277
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
497
- (f'{user_function.__name__}() validation failed: Argument #{i} '
278
+ (f'tensorleap_custom_metric validation failed: Argument #{i} '
498
279
  f'first dim should be as the batch size. Got {arg.shape[0]} '
499
280
  f'instead of {leap_binder.batch_size_to_validate}')
500
281
 
501
282
  for _arg_name, arg in kwargs.items():
502
283
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
503
- f'{user_function.__name__}() validation failed: '
284
+ f'tensorleap_custom_metric validation failed: '
504
285
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
505
286
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
506
287
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
507
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
288
+ (f'tensorleap_custom_metric validation failed: Argument {_arg_name} '
508
289
  f'first dim should be as the batch size. Got {arg.shape[0]} '
509
290
  f'instead of {leap_binder.batch_size_to_validate}')
510
291
 
511
292
  def _validate_result(result) -> None:
512
- validate_output_structure(result, func_name=user_function.__name__,
513
- expected_type_name="List[float | int | None | List[ConfusionMatrixElement] ] | NDArray[np.float32] or dictonary with one of these types as its values types")
514
- supported_types_message = (f'{user_function.__name__}() validation failed: '
515
- f'{user_function.__name__}() has returned unsupported type.\nSupported types are List[float|int|None], '
516
- f'List[List[ConfusionMatrixElement]], NDArray[np.float32] or dictonary with one of these types as its values types. ')
293
+ supported_types_message = (f'tensorleap_custom_metric validation failed: '
294
+ f'Metric has returned unsupported type. Supported types are List[float], '
295
+ f'List[List[ConfusionMatrixElement]], NDArray[np.float32]. ')
517
296
 
518
- def _validate_single_metric(single_metric_result,key=None):
297
+ def _validate_single_metric(single_metric_result):
519
298
  if isinstance(single_metric_result, list):
520
299
  if isinstance(single_metric_result[0], list):
521
- assert all(isinstance(cm, ConfusionMatrixElement) for cm in single_metric_result[0]), (
522
- f"{supported_types_message} "
523
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
524
- f"List[List[{', '.join(type(cm).__name__ for cm in single_metric_result[0])}]]."
525
- )
526
-
300
+ assert isinstance(single_metric_result[0][0], ConfusionMatrixElement), \
301
+ f'{supported_types_message}Got List[List[{type(single_metric_result[0][0])}]].'
527
302
  else:
528
- assert all(isinstance(v, (float,int,type(None),np.float32)) for v in single_metric_result), (
529
- f"{supported_types_message}\n"
530
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
531
- f"List[{', '.join(type(v).__name__ for v in single_metric_result)}]."
532
- )
303
+ assert isinstance(single_metric_result[0], (
304
+ float, int,
305
+ type(None))), f'{supported_types_message}Got List[{type(single_metric_result[0])}].'
533
306
  else:
534
307
  assert isinstance(single_metric_result,
535
- np.ndarray), f'{supported_types_message}\nGot {type(single_metric_result)}.'
536
- assert len(single_metric_result.shape) == 1, (f'{user_function.__name__}() validation failed: '
308
+ np.ndarray), f'{supported_types_message}Got {type(single_metric_result)}.'
309
+ assert len(single_metric_result.shape) == 1, (f'tensorleap_custom_metric validation failed: '
537
310
  f'The return shape should be 1D. Got {len(single_metric_result.shape)}D.')
538
311
 
539
312
  if leap_binder.batch_size_to_validate:
540
313
  assert len(single_metric_result) == leap_binder.batch_size_to_validate, \
541
- f'{user_function.__name__}() validation failed: The return len {f"of srt{key} value" if key is not None else ""} should be as the batch size.'
314
+ f'tensorleap_custom_metrix validation failed: The return len should be as the batch size.'
542
315
 
543
316
  if isinstance(result, dict):
544
317
  for key, value in result.items():
545
- _validate_single_metric(value,key)
546
-
547
318
  assert isinstance(key, str), \
548
- (f'{user_function.__name__}() validation failed: '
319
+ (f'tensorleap_custom_metric validation failed: '
549
320
  f'Keys in the return dict should be of type str. Got {type(key)}.')
550
321
  _validate_single_metric(value)
551
322
 
552
323
  if isinstance(direction, dict):
553
324
  for direction_key in direction:
554
325
  assert direction_key in result, \
555
- (f'{user_function.__name__}() validation failed: '
326
+ (f'tensorleap_custom_metric validation failed: '
556
327
  f'Keys in the direction mapping should be part of result keys. Got key {direction_key}.')
557
328
 
558
329
  if compute_insights is not None:
559
330
  assert isinstance(compute_insights, dict), \
560
- (f'{user_function.__name__}() validation failed: '
331
+ (f'tensorleap_custom_metric validation failed: '
561
332
  f'compute_insights should be dict if using the dict results. Got {type(compute_insights)}.')
562
333
 
563
334
  for ci_key in compute_insights:
564
335
  assert ci_key in result, \
565
- (f'{user_function.__name__}() validation failed: '
336
+ (f'tensorleap_custom_metric validation failed: '
566
337
  f'Keys in the compute_insights mapping should be part of result keys. Got key {ci_key}.')
567
338
 
568
339
  else:
@@ -570,7 +341,7 @@ def tensorleap_custom_metric(name: str,
570
341
 
571
342
  if compute_insights is not None:
572
343
  assert isinstance(compute_insights, bool), \
573
- (f'{user_function.__name__}() validation failed: '
344
+ (f'tensorleap_custom_metric validation failed: '
574
345
  f'compute_insights should be boolean. Got {type(compute_insights)}.')
575
346
 
576
347
  @functools.wraps(user_function)
@@ -602,8 +373,6 @@ def tensorleap_custom_metric(name: str,
602
373
  result = inner_without_validate(*args, **kwargs)
603
374
 
604
375
  _validate_result(result)
605
- if not _update_env_status is None:
606
- _update_env_status("tensorleap_custom_metric","v")
607
376
  return result
608
377
 
609
378
  def mapping_inner(*args, **kwargs):
@@ -643,38 +412,28 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
643
412
  name_to_unique_name = defaultdict(set)
644
413
 
645
414
  def decorating_function(user_function: VisualizerCallableInterface):
646
- assert isinstance(visualizer_type,LeapDataType),(f"{user_function.__name__} validation failed: "
647
- f"visualizer_type should be of type {LeapDataType.__name__} but got {type(visualizer_type)}"
648
- )
649
-
650
415
  for viz_handler in leap_binder.setup_container.visualizers:
651
416
  if viz_handler.visualizer_handler_data.name == name:
652
417
  raise Exception(f'Visualizer with name {name} already exists. '
653
418
  f'Please choose another')
654
419
 
655
420
  def _validate_input_args(*args, **kwargs):
656
- assert len(args) > 0, (
657
- f"{user_function.__name__}() validation failed: "
658
- f"Expected at least one positional|key-word argument of type np.ndarray, "
659
- f"but received none. "
660
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
661
- )
662
421
  for i, arg in enumerate(args):
663
422
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
664
- f'{user_function.__name__}() validation failed: '
423
+ f'tensorleap_custom_visualizer validation failed: '
665
424
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
666
425
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
667
426
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
668
- (f'{user_function.__name__}() validation failed: '
427
+ (f'tensorleap_custom_visualizer validation failed: '
669
428
  f'Argument #{i} should be without batch dimension. ')
670
429
 
671
430
  for _arg_name, arg in kwargs.items():
672
431
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
673
- f'{user_function.__name__}() validation failed: '
432
+ f'tensorleap_custom_visualizer validation failed: '
674
433
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
675
434
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
676
435
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
677
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
436
+ (f'tensorleap_custom_visualizer validation failed: Argument {_arg_name} '
678
437
  f'should be without batch dimension. ')
679
438
 
680
439
  def _validate_result(result):
@@ -688,11 +447,8 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
688
447
  LeapDataType.ImageWithBBox: LeapImageWithBBox,
689
448
  LeapDataType.ImageWithHeatmap: LeapImageWithHeatmap
690
449
  }
691
- validate_output_structure(result, func_name=user_function.__name__,
692
- expected_type_name=result_type_map[visualizer_type])
693
-
694
450
  assert isinstance(result, result_type_map[visualizer_type]), \
695
- (f'{user_function.__name__}() validation failed: '
451
+ (f'tensorleap_custom_visualizer validation failed: '
696
452
  f'The return type should be {result_type_map[visualizer_type]}. Got {type(result)}.')
697
453
 
698
454
  @functools.wraps(user_function)
@@ -724,8 +480,6 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
724
480
  result = inner_without_validate(*args, **kwargs)
725
481
 
726
482
  _validate_result(result)
727
- if not _update_env_status is None:
728
- _update_env_status("tensorleap_custom_visualizer","v")
729
483
  return result
730
484
 
731
485
  def mapping_inner(*args, **kwargs):
@@ -767,26 +521,30 @@ def tensorleap_metadata(
767
521
  f'Please choose another')
768
522
 
769
523
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
524
+ assert isinstance(sample_id, (int, str)), \
525
+ (f'tensorleap_metadata validation failed: '
526
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
527
+ assert isinstance(preprocess_response, PreprocessResponse), \
528
+ (f'tensorleap_metadata validation failed: '
529
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
770
530
  assert type(sample_id) == preprocess_response.sample_id_type, \
771
- (f'{user_function.__name__}() validation failed: '
531
+ (f'tensorleap_metadata validation failed: '
772
532
  f'Argument sample_id should be as the same type as defined in the preprocess response '
773
533
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
774
534
 
775
535
  def _validate_result(result):
776
536
  supported_result_types = (type(None), int, str, bool, float, dict, np.floating,
777
537
  np.bool_, np.unsignedinteger, np.signedinteger, np.integer)
778
- validate_output_structure(result, func_name=user_function.__name__,
779
- expected_type_name=supported_result_types)
780
538
  assert isinstance(result, supported_result_types), \
781
- (f'{user_function.__name__}() validation failed: '
539
+ (f'tensorleap_metadata validation failed: '
782
540
  f'Unsupported return type. Got {type(result)}. should be any of {str(supported_result_types)}')
783
541
  if isinstance(result, dict):
784
542
  for key, value in result.items():
785
543
  assert isinstance(key, str), \
786
- (f'{user_function.__name__}() validation failed: '
544
+ (f'tensorleap_metadata validation failed: '
787
545
  f'Keys in the return dict should be of type str. Got {type(key)}.')
788
546
  assert isinstance(value, supported_result_types), \
789
- (f'{user_function.__name__}() validation failed: '
547
+ (f'tensorleap_metadata validation failed: '
790
548
  f'Values in the return dict should be of type {str(supported_result_types)}. Got {type(value)}.')
791
549
 
792
550
  def inner_without_validate(sample_id, preprocess_response):
@@ -803,19 +561,15 @@ def tensorleap_metadata(
803
561
 
804
562
  leap_binder.set_metadata(inner_without_validate, name, metadata_type)
805
563
 
806
- def inner(*args,**kwargs):
564
+ def inner(sample_id, preprocess_response):
807
565
  if os.environ.get(mapping_runtime_mode_env_var_mame):
808
566
  return None
809
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
810
- func_name=user_function.__name__, expected_names=["idx", "preprocess"],**kwargs)
811
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
567
+
812
568
  _validate_input_args(sample_id, preprocess_response)
813
569
 
814
570
  result = inner_without_validate(sample_id, preprocess_response)
815
571
 
816
572
  _validate_result(result)
817
- if not _update_env_status is None:
818
- _update_env_status("tensorleap_metadata","v")
819
573
  return result
820
574
 
821
575
  return inner
@@ -878,23 +632,19 @@ def tensorleap_preprocess():
878
632
 
879
633
  def _validate_input_args(*args, **kwargs):
880
634
  assert len(args) == 0 and len(kwargs) == 0, \
881
- (f'{user_function.__name__}() validation failed: '
635
+ (f'tensorleap_preprocess validation failed: '
882
636
  f'The function should not take any arguments. Got {args} and {kwargs}.')
883
637
 
884
638
  def _validate_result(result):
885
- assert isinstance(result, list), (
886
- f"{user_function.__name__}() validation failed: expected return type list[{PreprocessResponse.__name__}]"
887
- f"(e.g., [PreprocessResponse1, PreprocessResponse2, ...]), but returned type is {type(result).__name__}."
888
- if not isinstance(result, tuple)
889
- else f"{user_function.__name__}() validation failed: expected to return a single list[{PreprocessResponse.__name__}] object, "
890
- f"but returned {len(result)} objects instead."
891
- )
639
+ assert isinstance(result, list), \
640
+ (f'tensorleap_preprocess validation failed: '
641
+ f'The return type should be a list. Got {type(result)}.')
892
642
  for i, response in enumerate(result):
893
643
  assert isinstance(response, PreprocessResponse), \
894
- (f'{user_function.__name__}() validation failed: '
644
+ (f'tensorleap_preprocess validation failed: '
895
645
  f'Element #{i} in the return list should be a PreprocessResponse. Got {type(response)}.')
896
646
  assert len(set(result)) == len(result), \
897
- (f'{user_function.__name__}() validation failed: '
647
+ (f'tensorleap_preprocess validation failed: '
898
648
  f'The return list should not contain duplicate PreprocessResponse objects.')
899
649
 
900
650
  def inner(*args, **kwargs):
@@ -902,10 +652,9 @@ def tensorleap_preprocess():
902
652
  return [None, None, None, None]
903
653
 
904
654
  _validate_input_args(*args, **kwargs)
655
+
905
656
  result = user_function()
906
657
  _validate_result(result)
907
- if not _update_env_status is None:
908
- _update_env_status("tensorleap_preprocess", "v")
909
658
  return result
910
659
 
911
660
  return inner
@@ -1104,23 +853,29 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1104
853
  raise Exception(f"Channel dim for input {name} is expected to be either -1 or positive")
1105
854
 
1106
855
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
856
+ assert isinstance(sample_id, (int, str)), \
857
+ (f'tensorleap_input_encoder validation failed: '
858
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
859
+ assert isinstance(preprocess_response, PreprocessResponse), \
860
+ (f'tensorleap_input_encoder validation failed: '
861
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1107
862
  assert type(sample_id) == preprocess_response.sample_id_type, \
1108
- (f'{user_function.__name__}() validation failed: '
863
+ (f'tensorleap_input_encoder validation failed: '
1109
864
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1110
865
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1111
866
 
1112
867
  def _validate_result(result):
1113
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray")
1114
868
  assert isinstance(result, np.ndarray), \
1115
- (f'{user_function.__name__}() validation failed: '
869
+ (f'tensorleap_input_encoder validation failed: '
1116
870
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1117
871
  assert result.dtype == np.float32, \
1118
- (f'{user_function.__name__}() validation failed: '
872
+ (f'tensorleap_input_encoder validation failed: '
1119
873
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1120
- assert channel_dim - 1 <= len(result.shape), (f'{user_function.__name__}() validation failed: '
874
+ assert channel_dim - 1 <= len(result.shape), (f'tensorleap_input_encoder validation failed: '
1121
875
  f'The channel_dim ({channel_dim}) should be <= to the rank of the resulting input rank ({len(result.shape)}).')
1122
876
 
1123
877
  def inner_without_validate(sample_id, preprocess_response):
878
+
1124
879
  global _called_from_inside_tl_decorator
1125
880
  _called_from_inside_tl_decorator += 1
1126
881
 
@@ -1134,10 +889,7 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1134
889
  leap_binder.set_input(inner_without_validate, name, channel_dim=channel_dim)
1135
890
 
1136
891
 
1137
- def inner(*args, **kwargs):
1138
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1139
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1140
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
892
+ def inner(sample_id, preprocess_response):
1141
893
  _validate_input_args(sample_id, preprocess_response)
1142
894
 
1143
895
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1145,20 +897,24 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1145
897
  _validate_result(result)
1146
898
 
1147
899
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1148
- batch_warning(result,user_function.__name__)
1149
900
  result = np.expand_dims(result, axis=0)
1150
- if not _update_env_status is None:
1151
- _update_env_status("tensorleap_input_encoder", "v")
901
+ # Emit integration test event once per test
902
+ _emit_integration_event_once('input_encoder_integration_test', {
903
+ 'encoder_name': name,
904
+ 'channel_dim': channel_dim,
905
+ 'model_input_index': model_input_index
906
+ })
1152
907
 
1153
908
  return result
1154
909
 
1155
910
 
911
+
1156
912
  node_mapping_type = NodeMappingType.Input
1157
913
  if model_input_index is not None:
1158
914
  node_mapping_type = NodeMappingType(f'Input{str(model_input_index)}')
1159
915
  inner.node_mapping = NodeMapping(name, node_mapping_type)
1160
916
 
1161
- def mapping_inner(*args, **kwargs):
917
+ def mapping_inner(sample_id, preprocess_response):
1162
918
  class TempMapping:
1163
919
  pass
1164
920
 
@@ -1170,11 +926,11 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1170
926
 
1171
927
  mapping_inner.node_mapping = NodeMapping(name, node_mapping_type)
1172
928
 
1173
- def final_inner(*args, **kwargs):
929
+ def final_inner(sample_id, preprocess_response):
1174
930
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1175
- return mapping_inner(*args, **kwargs)
931
+ return mapping_inner(sample_id, preprocess_response)
1176
932
  else:
1177
- return inner(*args, **kwargs)
933
+ return inner(sample_id, preprocess_response)
1178
934
 
1179
935
  final_inner.node_mapping = NodeMapping(name, node_mapping_type)
1180
936
 
@@ -1191,18 +947,23 @@ def tensorleap_gt_encoder(name: str):
1191
947
  f'Please choose another')
1192
948
 
1193
949
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
950
+ assert isinstance(sample_id, (int, str)), \
951
+ (f'tensorleap_gt_encoder validation failed: '
952
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
953
+ assert isinstance(preprocess_response, PreprocessResponse), \
954
+ (f'tensorleap_gt_encoder validation failed: '
955
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1194
956
  assert type(sample_id) == preprocess_response.sample_id_type, \
1195
- (f'{user_function.__name__}() validation failed: '
957
+ (f'tensorleap_gt_encoder validation failed: '
1196
958
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1197
959
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1198
960
 
1199
961
  def _validate_result(result):
1200
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray",gt_flag=True)
1201
962
  assert isinstance(result, np.ndarray), \
1202
- (f'{user_function.__name__}() validation failed: '
963
+ (f'tensorleap_gt_encoder validation failed: '
1203
964
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1204
965
  assert result.dtype == np.float32, \
1205
- (f'{user_function.__name__}() validation failed: '
966
+ (f'tensorleap_gt_encoder validation failed: '
1206
967
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1207
968
 
1208
969
  def inner_without_validate(sample_id, preprocess_response):
@@ -1219,10 +980,7 @@ def tensorleap_gt_encoder(name: str):
1219
980
  leap_binder.set_ground_truth(inner_without_validate, name)
1220
981
 
1221
982
 
1222
- def inner(*args, **kwargs):
1223
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1224
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1225
- sample_id, preprocess_response = args
983
+ def inner(sample_id, preprocess_response):
1226
984
  _validate_input_args(sample_id, preprocess_response)
1227
985
 
1228
986
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1230,15 +988,13 @@ def tensorleap_gt_encoder(name: str):
1230
988
  _validate_result(result)
1231
989
 
1232
990
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1233
- batch_warning(result, user_function.__name__)
1234
991
  result = np.expand_dims(result, axis=0)
1235
- if not _update_env_status is None:
1236
- _update_env_status("tensorleap_gt_encoder", "v")
992
+
1237
993
  return result
1238
994
 
1239
995
  inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1240
996
 
1241
- def mapping_inner(*args, **kwargs):
997
+ def mapping_inner(sample_id, preprocess_response):
1242
998
  class TempMapping:
1243
999
  pass
1244
1000
 
@@ -1249,11 +1005,11 @@ def tensorleap_gt_encoder(name: str):
1249
1005
 
1250
1006
  mapping_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1251
1007
 
1252
- def final_inner(*args, **kwargs):
1008
+ def final_inner(sample_id, preprocess_response):
1253
1009
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1254
- return mapping_inner(*args, **kwargs)
1010
+ return mapping_inner(sample_id, preprocess_response)
1255
1011
  else:
1256
- return inner(*args, **kwargs)
1012
+ return inner(sample_id, preprocess_response)
1257
1013
 
1258
1014
  final_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1259
1015
 
@@ -1274,37 +1030,28 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1274
1030
  valid_types = (np.ndarray, SamplePreprocessResponse)
1275
1031
 
1276
1032
  def _validate_input_args(*args, **kwargs):
1277
- assert len(args) > 0 and len(kwargs)==0, (
1278
- f"{user_function.__name__}() validation failed: "
1279
- f"Expected at least one positional|key-word argument of the allowed types (np.ndarray|SamplePreprocessResponse|list(np.ndarray|SamplePreprocessResponse)). "
1280
- f"but received none. "
1281
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
1282
- )
1283
1033
  for i, arg in enumerate(args):
1284
1034
  if isinstance(arg, list):
1285
1035
  for y, elem in enumerate(arg):
1286
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1036
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1287
1037
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1288
1038
  else:
1289
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1039
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1290
1040
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
1291
1041
  for _arg_name, arg in kwargs.items():
1292
1042
  if isinstance(arg, list):
1293
1043
  for y, elem in enumerate(arg):
1294
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1044
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1295
1045
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1296
1046
  else:
1297
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1047
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1298
1048
  f'Argument #{_arg_name} should be a numpy array. Got {type(arg)}.')
1299
1049
 
1300
1050
  def _validate_result(result):
1301
- validate_output_structure(result, func_name=user_function.__name__,
1302
- expected_type_name="np.ndarray")
1303
1051
  assert isinstance(result, np.ndarray), \
1304
- (f'{user_function.__name__} validation failed: '
1052
+ (f'tensorleap_custom_loss validation failed: '
1305
1053
  f'The return type should be a numpy array. Got {type(result)}.')
1306
- assert result.ndim<2 ,(f'{user_function.__name__} validation failed: '
1307
- f'The return type should be a 1Dim numpy array but got {result.ndim}Dim.')
1054
+
1308
1055
 
1309
1056
  @functools.wraps(user_function)
1310
1057
  def inner_without_validate(*args, **kwargs):
@@ -1335,9 +1082,6 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1335
1082
  result = inner_without_validate(*args, **kwargs)
1336
1083
 
1337
1084
  _validate_result(result)
1338
- if not _update_env_status is None:
1339
- _update_env_status("tensorleap_custom_loss", "v")
1340
-
1341
1085
  return result
1342
1086
 
1343
1087
  def mapping_inner(*args, **kwargs):
@@ -1394,87 +1138,3 @@ def tensorleap_custom_layer(name: str):
1394
1138
  return custom_layer
1395
1139
 
1396
1140
  return decorating_function
1397
-
1398
-
1399
- def tensorleap_status_table():
1400
- '''
1401
- Usage example:
1402
- ###################
1403
- leap_integration.py
1404
- ###################
1405
- from code_loader.inner_leap_binder.leapbinder_decorators import tensorleap_status_table
1406
- ...
1407
- ...
1408
- ...
1409
- if __name__ == '__main__':
1410
- tensorleap_status_table()
1411
- ...
1412
- '''
1413
- import atexit
1414
- import sys
1415
- import traceback
1416
-
1417
-
1418
- table = [
1419
- {"name": "tensorleap_preprocess", "status": "x"},
1420
- {"name": "tensorleap_integration_test", "status": "x"},
1421
- {"name": "tensorleap_input_encoder", "status": "x"},
1422
- {"name": "tensorleap_gt_encoder", "status": "x"},
1423
- {"name": "tensorleap_load_model", "status": "x"},
1424
- {"name": "tensorleap_custom_loss", "status": "x"},
1425
- {"name": "tensorleap_custom_metric (optional)", "status": "x"},
1426
- {"name": "tensorleap_metadata (optional)", "status": "x"},
1427
- {"name": "tensorleap_custom_visualizer (optional)", "status": "x"},
1428
-
1429
- ]
1430
-
1431
- _finalizer_called = {"done": False}
1432
-
1433
- def _print_table():
1434
- ready_mess = "\nAll parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system."
1435
- not_ready_mess = "\nSome mandatory components have not been set yet. Recommended next implementation step: "
1436
- mandatory_ready_mess = "\nAll mandatory parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system or continue to the next optional implementation step: "
1437
-
1438
- name_width = max(len(row["name"]) for row in table)
1439
- status_width = max(len(row["status"]) for row in table)
1440
- header = f"{'Function Name'.ljust(name_width)} | {'Status'.ljust(status_width)}"
1441
- sep = "-" * len(header)
1442
- print("\n" + header)
1443
- print(sep)
1444
- ready=True
1445
- for row in table:
1446
- print(f"{row['name'].ljust(name_width)} | {row['status'].ljust(status_width)}")
1447
- if row['status']=='x' and ready:
1448
- ready=False
1449
- next_step=row['name']
1450
-
1451
-
1452
- print(ready_mess) if ready else print(mandatory_ready_mess+next_step) if "optional" in next_step else print(not_ready_mess+next_step)
1453
- def update_env_params(name: str, status: str = "✓"):
1454
- for row in table:
1455
- if row["name"].removesuffix(" (optional)") == name:
1456
- row["status"] = status
1457
- break
1458
- def run_on_exit():
1459
- if _finalizer_called["done"]:
1460
- return
1461
- _finalizer_called["done"] = True
1462
- _print_table()
1463
- def handle_exception(exc_type, exc_value, exc_traceback):
1464
- traceback.print_exception(exc_type, exc_value, exc_traceback)
1465
- run_on_exit()
1466
- atexit.register(run_on_exit)
1467
- sys.excepthook = handle_exception
1468
- global _update_env_status
1469
- _update_env_status = update_env_params
1470
- return update_env_params
1471
-
1472
-
1473
-
1474
-
1475
-
1476
-
1477
-
1478
-
1479
-
1480
-
@@ -126,6 +126,43 @@ class MixpanelTracker:
126
126
  except Exception as e:
127
127
  pass
128
128
 
129
+ def track_integration_test_event(self, event_name: str, event_properties: Optional[Dict[str, Any]] = None) -> None:
130
+ """Track an integration test event with device identification.
131
+
132
+ Args:
133
+ event_name: The name of the event to track
134
+ event_properties: Optional additional properties to include in the event
135
+ """
136
+ # Skip tracking if IS_TENSORLEAP_PLATFORM environment variable is set to 'true'
137
+ if os.environ.get('IS_TENSORLEAP_PLATFORM') == 'true':
138
+ return
139
+
140
+ try:
141
+ distinct_id = self._get_distinct_id()
142
+
143
+ tensorleap_user_id = self._get_tensorleap_user_id()
144
+ whoami = self._get_whoami()
145
+ device_id = self._get_or_create_device_id()
146
+
147
+ properties = {
148
+ 'tracking_version': TRACKING_VERSION,
149
+ 'service': 'code-loader',
150
+ 'whoami': whoami,
151
+ '$device_id': device_id, # Always use device_id for $device_id
152
+ 'python_version': f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
153
+ 'platform': os.name,
154
+ }
155
+
156
+ if tensorleap_user_id:
157
+ properties['user_id'] = tensorleap_user_id
158
+
159
+ if event_properties:
160
+ properties.update(event_properties)
161
+
162
+ self.mp.track(distinct_id, event_name, properties)
163
+ except Exception as e:
164
+ pass
165
+
129
166
 
130
167
  # Global tracker instance
131
168
  _tracker = None
@@ -140,3 +177,7 @@ def get_tracker() -> MixpanelTracker:
140
177
 
141
178
  def track_code_loader_loaded(event_properties: Optional[Dict[str, Any]] = None) -> None:
142
179
  get_tracker().track_code_loader_loaded(event_properties)
180
+
181
+
182
+ def track_integration_test_event(event_name: str, event_properties: Optional[Dict[str, Any]] = None) -> None:
183
+ get_tracker().track_integration_test_event(event_name, event_properties)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "1.0.139.dev3"
3
+ version = "1.0.139.dev4"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"