code-loader 1.0.139.dev9__py3-none-any.whl → 1.0.141__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of code-loader might be problematic. Click here for more details.

@@ -1,4 +1,3 @@
1
- import warnings
2
1
  from dataclasses import dataclass, field
3
2
  from typing import Any, Callable, List, Optional, Dict, Union, Type
4
3
  import re
@@ -57,14 +56,7 @@ class PreprocessResponse:
57
56
  for sample_id in self.sample_ids:
58
57
  assert isinstance(sample_id, str), f"Sample id should be of type str. Got: {type(sample_id)}"
59
58
  else:
60
- raise Exception("length is deprecated, please use sample_ids instead.")
61
-
62
- if self.state is None:
63
- warnings.warn(
64
- "PreprocessResponse.state is not set. For best practice, assign a unique `state` value to each PreprocessResponse instance."
65
- )
66
- else:
67
- assert isinstance(self.state, DataStateType), f"PreprocessResponse.state must be of type {DataStateType.__name__} but got {type(self.state)}"
59
+ raise Exception("length is deprecated.")
68
60
 
69
61
  def __hash__(self) -> int:
70
62
  return id(self)
@@ -1,14 +1,16 @@
1
1
  # mypy: ignore-errors
2
2
  import os
3
- import warnings
3
+ import logging
4
4
  from collections import defaultdict
5
5
  from functools import lru_cache
6
6
  from pathlib import Path
7
- from typing import Optional, Union, Callable, List, Dict, get_args, get_origin
7
+ from typing import Optional, Union, Callable, List, Dict, Set, Any
8
8
 
9
9
  import numpy as np
10
10
  import numpy.typing as npt
11
11
 
12
+ logger = logging.getLogger(__name__)
13
+
12
14
  from code_loader.contract.datasetclasses import CustomCallableInterfaceMultiArgs, \
13
15
  CustomMultipleReturnCallableInterfaceMultiArgs, ConfusionMatrixCallableInterfaceMultiArgs, CustomCallableInterface, \
14
16
  VisualizerCallableInterface, MetadataSectionCallableInterface, PreprocessResponse, SectionCallableInterface, \
@@ -20,122 +22,15 @@ from code_loader.contract.mapping import NodeMapping, NodeMappingType, NodeConne
20
22
  from code_loader.contract.visualizer_classes import LeapImage, LeapImageMask, LeapTextMask, LeapText, LeapGraph, \
21
23
  LeapHorizontalBar, LeapImageWithBBox, LeapImageWithHeatmap
22
24
  from code_loader.inner_leap_binder.leapbinder import mapping_runtime_mode_env_var_mame
25
+ from code_loader.mixpanel_tracker import clear_integration_events, AnalyticsEvent, emit_integration_event_once
23
26
 
24
27
  import inspect
25
28
  import functools
26
29
 
27
30
  _called_from_inside_tl_decorator = 0
28
31
  _called_from_inside_tl_integration_test_decorator = False
29
- _update_env_status = None
30
-
31
-
32
-
33
-
34
- def validate_args_structure(*args, types_order, func_name, expected_names, **kwargs):
35
- def _type_to_str(t):
36
- origin = get_origin(t)
37
- if origin is Union:
38
- return " | ".join(tt.__name__ for tt in get_args(t))
39
- elif hasattr(t, "__name__"):
40
- return t.__name__
41
- else:
42
- return str(t)
43
-
44
- def _format_types(types, names=None):
45
- return ", ".join(
46
- f"{(names[i] + ': ') if names else f'arg{i}: '}{_type_to_str(ty)}"
47
- for i, ty in enumerate(types)
48
- )
49
-
50
- if expected_names:
51
- normalized_args = []
52
- for i, name in enumerate(expected_names):
53
- if i < len(args):
54
- normalized_args.append(args[i])
55
- elif name in kwargs:
56
- normalized_args.append(kwargs[name])
57
- else:
58
- raise AssertionError(
59
- f"{func_name} validation failed: "
60
- f"Missing required argument '{name}'. "
61
- f"Expected arguments: {expected_names}."
62
- )
63
- else:
64
- normalized_args = list(args)
65
- if len(normalized_args) != len(types_order):
66
- expected = _format_types(types_order, expected_names)
67
- got_types = ", ".join(type(arg).__name__ for arg in normalized_args)
68
- raise AssertionError(
69
- f"{func_name} validation failed: "
70
- f"Expected exactly {len(types_order)} arguments ({expected}), "
71
- f"but got {len(normalized_args)} argument(s) of type(s): ({got_types}). "
72
- f"Correct usage example: {func_name}({expected})"
73
- )
74
-
75
- for i, (arg, expected_type) in enumerate(zip(normalized_args, types_order)):
76
- origin = get_origin(expected_type)
77
- if origin is Union:
78
- allowed_types = get_args(expected_type)
79
- else:
80
- allowed_types = (expected_type,)
81
-
82
- if not isinstance(arg, allowed_types):
83
- allowed_str = " | ".join(t.__name__ for t in allowed_types)
84
- raise AssertionError(
85
- f"{func_name} validation failed: "
86
- f"Argument '{expected_names[i] if expected_names else f'arg{i}'}' "
87
- f"expected type {allowed_str}, but got {type(arg).__name__}. "
88
- f"Correct usage example: {func_name}({_format_types(types_order, expected_names)})"
89
- )
90
-
91
-
92
- def validate_output_structure(result, func_name: str, expected_type_name="np.ndarray",gt_flag=False):
93
- if result is None or (isinstance(result, float) and np.isnan(result)):
94
- if gt_flag:
95
- raise AssertionError(
96
- f"{func_name} validation failed: "
97
- f"The function returned {result!r}. "
98
- f"If you are working with an unlabeled dataset and no ground truth is available, "
99
- f"use 'return np.array([], dtype=np.float32)' instead. "
100
- f"Otherwise, {func_name} expected a single {expected_type_name} object. "
101
- f"Make sure the function ends with 'return <{expected_type_name}>'."
102
- )
103
-
104
- raise AssertionError(
105
- f"{func_name} validation failed: "
106
- f"The function returned None. "
107
- f"Expected a single {expected_type_name} object. "
108
- f"Make sure the function ends with 'return <{expected_type_name}>'."
109
- )
110
- if isinstance(result, tuple):
111
- element_descriptions = [
112
- f"[{i}] type: {type(r).__name__}"
113
- for i, r in enumerate(result)
114
- ]
115
- element_summary = "\n ".join(element_descriptions)
116
-
117
- raise AssertionError(
118
- f"{func_name} validation failed: "
119
- f"The function returned multiple outputs ({len(result)} values), "
120
- f"but only a single {expected_type_name} is allowed.\n\n"
121
- f"Returned elements:\n"
122
- f" {element_summary}\n\n"
123
- f"Correct usage example:\n"
124
- f" def {func_name}(...):\n"
125
- f" return <{expected_type_name}>\n\n"
126
- f"If you intended to return multiple values, combine them into a single "
127
- f"{expected_type_name} (e.g., by concatenation or stacking)."
128
- )
129
-
130
- def batch_warning(result, func_name):
131
- if result.shape[0] == 1:
132
- warnings.warn(
133
- f"{func_name} warning: Tensorleap will add a batch dimension at axis 0 to the output of {func_name}, "
134
- f"although the detected size of axis 0 is already 1. "
135
- f"This may lead to an extra batch dimension (e.g., shape (1, 1, ...)). "
136
- f"Please ensure that the output of '{func_name}' is not already batched "
137
- f"to avoid computation errors."
138
- )
32
+
33
+
139
34
  def _add_mapping_connection(user_unique_name, connection_destinations, arg_names, name, node_mapping_type):
140
35
  connection_destinations = [connection_destination for connection_destination in connection_destinations
141
36
  if not isinstance(connection_destination, SamplePreprocessResponse)]
@@ -158,24 +53,15 @@ def tensorleap_integration_test():
158
53
  def decorating_function(integration_test_function: Callable):
159
54
  leap_binder.integration_test_func = integration_test_function
160
55
 
161
- def _validate_input_args(*args, **kwargs):
162
- sample_id,preprocess_response=args
163
- assert type(sample_id) == preprocess_response.sample_id_type, (
164
- f"tensorleap_integration_test validation failed: "
165
- f"sample_id type ({type(sample_id).__name__}) does not match the expected "
166
- f"type ({preprocess_response.sample_id_type}) from the PreprocessResponse."
167
- )
168
-
169
56
  def inner(*args, **kwargs):
170
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
171
- func_name='integration_test',expected_names=["idx", "preprocess"],**kwargs)
172
- _validate_input_args(*args, **kwargs)
173
-
174
57
  global _called_from_inside_tl_integration_test_decorator
58
+ # Clear integration test events for new test
59
+ try:
60
+ clear_integration_events()
61
+ except Exception as e:
62
+ logger.debug(f"Failed to clear integration events: {e}")
175
63
  try:
176
64
  _called_from_inside_tl_integration_test_decorator = True
177
- if not _update_env_status is None:
178
- _update_env_status("tensorleap_integration_test", "v")#put here because otherwise it will become v only if it finishes all the script
179
65
  ret = integration_test_function(*args, **kwargs)
180
66
 
181
67
  try:
@@ -188,7 +74,7 @@ def tensorleap_integration_test():
188
74
  line_number = first_tb.lineno
189
75
  if isinstance(e, TypeError) and 'is not subscriptable' in str(e):
190
76
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
191
- f"indexing is supported only on the model's predictions inside the integration test. Please remove this indexing operation usage from the integration test code.")
77
+ f'Please remove this indexing operation usage from the integration test code.')
192
78
  else:
193
79
  print(f'Invalid integration code. File {file_name}, line {line_number}: '
194
80
  f'Integration test is only allowed to call Tensorleap decorators. '
@@ -200,8 +86,8 @@ def tensorleap_integration_test():
200
86
  _called_from_inside_tl_integration_test_decorator = False
201
87
 
202
88
  leap_binder.check()
203
- return inner
204
89
 
90
+ return inner
205
91
 
206
92
  return decorating_function
207
93
 
@@ -211,63 +97,34 @@ def _safe_get_item(key):
211
97
  except ValueError:
212
98
  raise Exception(f'Tensorleap currently supports models with no more then 10 inputs')
213
99
 
100
+
214
101
  def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]] = []):
215
- assert isinstance(prediction_types, list),(
216
- f"tensorleap_load_model validation failed: "
217
- f" prediction_types is an optional argument of type List[PredictionTypeHandler]] but got {type(prediction_types).__name__}."
218
- )
219
102
  for i, prediction_type in enumerate(prediction_types):
220
- assert isinstance(prediction_type, PredictionTypeHandler),(f"tensorleap_load_model validation failed: "
221
- f" prediction_types at position {i} must be of type PredictionTypeHandler but got {type(prediction_types[i]).__name__}.")
222
103
  leap_binder.add_prediction(prediction_type.name, prediction_type.labels, prediction_type.channel_dim, i)
223
104
 
224
- def _validate_result(result) -> None:
225
- valid_types=["onnxruntime","keras"]
226
- err_message=f"tensorleap_load_model validation failed:\nSupported models are Keras and onnxruntime only and non of them was returned."
227
- validate_output_structure(result, func_name="tensorleap_load_model", expected_type_name= [" | ".join(t for t in valid_types)][0])
228
- try:
229
- import keras
230
- except ImportError:
231
- keras = None
232
- try:
233
- import tensorflow as tf
234
- except ImportError:
235
- tf = None
236
- try:
237
- import onnxruntime
238
- except ImportError:
239
- onnxruntime = None
240
-
241
- if not keras and not onnxruntime:
242
- raise AssertionError(err_message)
243
-
244
- is_keras_model = (
245
- bool(keras and isinstance(result, getattr(keras, "Model", tuple())))
246
- or bool(tf and isinstance(result, getattr(tf.keras, "Model", tuple())))
247
- )
248
- is_onnx_model = bool(onnxruntime and isinstance(result, onnxruntime.InferenceSession))
249
-
250
- if not any([is_keras_model, is_onnx_model]):
251
- raise AssertionError( err_message)
252
-
253
-
254
-
255
105
  def decorating_function(load_model_func):
256
106
  class TempMapping:
257
107
  pass
258
108
 
259
109
  @lru_cache()
260
- def inner(*args, **kwargs):
261
- validate_args_structure(*args, types_order=[],
262
- func_name='tensorleap_load_model',expected_names=[],**kwargs)
110
+ def inner():
263
111
  class ModelPlaceholder:
264
112
  def __init__(self):
265
- self.model = load_model_func() #TODO- check why this fails on onnx model
266
- _validate_result(self.model)
113
+ self.model = load_model_func()
114
+ # Emit integration test event once per test
115
+ try:
116
+ emit_integration_event_once(AnalyticsEvent.LOAD_MODEL_INTEGRATION_TEST, {
117
+ 'prediction_types_count': len(prediction_types)
118
+ })
119
+ except Exception as e:
120
+ logger.debug(f"Failed to emit load_model integration test event: {e}")
267
121
 
268
122
  # keras interface
269
123
  def __call__(self, arg):
270
124
  ret = self.model(arg)
125
+ if isinstance(ret, list or tuple):
126
+ return [r.numpy() for r in ret]
127
+
271
128
  return ret.numpy()
272
129
 
273
130
  def _convert_onnx_inputs_to_correct_type(
@@ -327,10 +184,8 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
327
184
 
328
185
  def get_inputs(self):
329
186
  return self.model.get_inputs()
330
- model_placeholder=ModelPlaceholder()
331
- if not _update_env_status is None:
332
- _update_env_status("tensorleap_load_model", "v")
333
- return model_placeholder
187
+
188
+ return ModelPlaceholder()
334
189
 
335
190
  def mapping_inner():
336
191
  class ModelOutputPlaceholder:
@@ -393,11 +248,12 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
393
248
 
394
249
  return ModelPlaceholder()
395
250
 
396
- def final_inner(*args, **kwargs):
251
+ def final_inner():
397
252
  if os.environ.get(mapping_runtime_mode_env_var_mame):
398
253
  return mapping_inner()
399
254
  else:
400
- return inner(*args, **kwargs)
255
+ return inner()
256
+
401
257
  return final_inner
402
258
 
403
259
  return decorating_function
@@ -408,168 +264,81 @@ def tensorleap_custom_metric(name: str,
408
264
  compute_insights: Optional[Union[bool, Dict[str, bool]]] = None,
409
265
  connects_to=None):
410
266
  name_to_unique_name = defaultdict(set)
267
+
411
268
  def decorating_function(
412
269
  user_function: Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs,
413
270
  ConfusionMatrixCallableInterfaceMultiArgs]):
414
-
415
- def _validate_decorators_signature():
416
- err_message = f"{user_function.__name__} validation failed.\n"
417
- if not isinstance(name, str):
418
- raise TypeError(err_message + f"`name` must be a string, got type {type(name).__name__}.")
419
- valid_directions = {MetricDirection.Upward, MetricDirection.Downward}
420
- if isinstance(direction, MetricDirection):
421
- if direction not in valid_directions:
422
- raise ValueError(
423
- err_message +
424
- f"Invalid MetricDirection: {direction}. Must be one of {valid_directions}, "
425
- f"got type {type(direction).__name__}."
426
- )
427
- elif isinstance(direction, dict):
428
- if not all(isinstance(k, str) for k in direction.keys()):
429
- invalid_keys = {k: type(k).__name__ for k in direction.keys() if not isinstance(k, str)}
430
- raise TypeError(
431
- err_message +
432
- f"All keys in `direction` must be strings, got invalid key types: {invalid_keys}."
433
- )
434
- for k, v in direction.items():
435
- if v not in valid_directions:
436
- raise ValueError(
437
- err_message +
438
- f"Invalid direction for key '{k}': {v}. Must be one of {valid_directions}, "
439
- f"got type {type(v).__name__}."
440
- )
441
- else:
442
- raise TypeError(
443
- err_message +
444
- f"`direction` must be a MetricDirection or a Dict[str, MetricDirection], "
445
- f"got type {type(direction).__name__}."
446
- )
447
- if compute_insights is not None:
448
- if not isinstance(compute_insights, (bool, dict)):
449
- raise TypeError(
450
- err_message +
451
- f"`compute_insights` must be a bool or a Dict[str, bool], "
452
- f"got type {type(compute_insights).__name__}."
453
- )
454
- if isinstance(compute_insights, dict):
455
- if not all(isinstance(k, str) for k in compute_insights.keys()):
456
- invalid_keys = {k: type(k).__name__ for k in compute_insights.keys() if not isinstance(k, str)}
457
- raise TypeError(
458
- err_message +
459
- f"All keys in `compute_insights` must be strings, got invalid key types: {invalid_keys}."
460
- )
461
- for k, v in compute_insights.items():
462
- if not isinstance(v, bool):
463
- raise TypeError(
464
- err_message +
465
- f"Invalid type for compute_insights['{k}']: expected bool, got type {type(v).__name__}."
466
- )
467
- if connects_to is not None:
468
- valid_types = (str, list, tuple, set)
469
- if not isinstance(connects_to, valid_types):
470
- raise TypeError(
471
- err_message +
472
- f"`connects_to` must be one of {valid_types}, got type {type(connects_to).__name__}."
473
- )
474
- if isinstance(connects_to, (list, tuple, set)):
475
- invalid_elems = [f"{type(e).__name__}" for e in connects_to if not isinstance(e, str)]
476
- if invalid_elems:
477
- raise TypeError(
478
- err_message +
479
- f"All elements in `connects_to` must be strings, "
480
- f"but found element types: {invalid_elems}."
481
- )
482
-
483
-
484
- _validate_decorators_signature()
485
-
486
271
  for metric_handler in leap_binder.setup_container.metrics:
487
272
  if metric_handler.metric_handler_data.name == name:
488
273
  raise Exception(f'Metric with name {name} already exists. '
489
274
  f'Please choose another')
490
275
 
491
276
  def _validate_input_args(*args, **kwargs) -> None:
492
- assert len(args) > 0, (
493
- f"{user_function.__name__}() validation failed: "
494
- f"Expected at least one positional|key-word argument of type np.ndarray, "
495
- f"but received none. "
496
- f"Correct usage example: tensorleap_custom_metric(input_array: np.ndarray, ...)"
497
- )
498
277
  for i, arg in enumerate(args):
499
278
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
500
- f'{user_function.__name__}() validation failed: '
279
+ f'tensorleap_custom_metric validation failed: '
501
280
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
502
281
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
503
282
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
504
- (f'{user_function.__name__}() validation failed: Argument #{i} '
283
+ (f'tensorleap_custom_metric validation failed: Argument #{i} '
505
284
  f'first dim should be as the batch size. Got {arg.shape[0]} '
506
285
  f'instead of {leap_binder.batch_size_to_validate}')
507
286
 
508
287
  for _arg_name, arg in kwargs.items():
509
288
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
510
- f'{user_function.__name__}() validation failed: '
289
+ f'tensorleap_custom_metric validation failed: '
511
290
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
512
291
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
513
292
  assert arg.shape[0] == leap_binder.batch_size_to_validate, \
514
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
293
+ (f'tensorleap_custom_metric validation failed: Argument {_arg_name} '
515
294
  f'first dim should be as the batch size. Got {arg.shape[0]} '
516
295
  f'instead of {leap_binder.batch_size_to_validate}')
517
296
 
518
297
  def _validate_result(result) -> None:
519
- validate_output_structure(result, func_name=user_function.__name__,
520
- expected_type_name="List[float | int | None | List[ConfusionMatrixElement] ] | NDArray[np.float32] or dictonary with one of these types as its values types")
521
- supported_types_message = (f'{user_function.__name__}() validation failed: '
522
- f'{user_function.__name__}() has returned unsupported type.\nSupported types are List[float|int|None], '
523
- f'List[List[ConfusionMatrixElement]], NDArray[np.float32] or dictonary with one of these types as its values types. ')
298
+ supported_types_message = (f'tensorleap_custom_metric validation failed: '
299
+ f'Metric has returned unsupported type. Supported types are List[float], '
300
+ f'List[List[ConfusionMatrixElement]], NDArray[np.float32]. ')
524
301
 
525
- def _validate_single_metric(single_metric_result,key=None):
302
+ def _validate_single_metric(single_metric_result):
526
303
  if isinstance(single_metric_result, list):
527
304
  if isinstance(single_metric_result[0], list):
528
- assert all(isinstance(cm, ConfusionMatrixElement) for cm in single_metric_result[0]), (
529
- f"{supported_types_message} "
530
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
531
- f"List[List[{', '.join(type(cm).__name__ for cm in single_metric_result[0])}]]."
532
- )
533
-
305
+ assert isinstance(single_metric_result[0][0], ConfusionMatrixElement), \
306
+ f'{supported_types_message}Got List[List[{type(single_metric_result[0][0])}]].'
534
307
  else:
535
- assert all(isinstance(v, (float,int,type(None),np.float32)) for v in single_metric_result), (
536
- f"{supported_types_message}\n"
537
- f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
538
- f"List[{', '.join(type(v).__name__ for v in single_metric_result)}]."
539
- )
308
+ assert isinstance(single_metric_result[0], (
309
+ float, int,
310
+ type(None))), f'{supported_types_message}Got List[{type(single_metric_result[0])}].'
540
311
  else:
541
312
  assert isinstance(single_metric_result,
542
- np.ndarray), f'{supported_types_message}\nGot {type(single_metric_result)}.'
543
- assert len(single_metric_result.shape) == 1, (f'{user_function.__name__}() validation failed: '
313
+ np.ndarray), f'{supported_types_message}Got {type(single_metric_result)}.'
314
+ assert len(single_metric_result.shape) == 1, (f'tensorleap_custom_metric validation failed: '
544
315
  f'The return shape should be 1D. Got {len(single_metric_result.shape)}D.')
545
316
 
546
317
  if leap_binder.batch_size_to_validate:
547
318
  assert len(single_metric_result) == leap_binder.batch_size_to_validate, \
548
- f'{user_function.__name__}() validation failed: The return len {f"of srt{key} value" if key is not None else ""} should be as the batch size.'
319
+ f'tensorleap_custom_metrix validation failed: The return len should be as the batch size.'
549
320
 
550
321
  if isinstance(result, dict):
551
322
  for key, value in result.items():
552
- _validate_single_metric(value,key)
553
-
554
323
  assert isinstance(key, str), \
555
- (f'{user_function.__name__}() validation failed: '
324
+ (f'tensorleap_custom_metric validation failed: '
556
325
  f'Keys in the return dict should be of type str. Got {type(key)}.')
557
326
  _validate_single_metric(value)
558
327
 
559
328
  if isinstance(direction, dict):
560
329
  for direction_key in direction:
561
330
  assert direction_key in result, \
562
- (f'{user_function.__name__}() validation failed: '
331
+ (f'tensorleap_custom_metric validation failed: '
563
332
  f'Keys in the direction mapping should be part of result keys. Got key {direction_key}.')
564
333
 
565
334
  if compute_insights is not None:
566
335
  assert isinstance(compute_insights, dict), \
567
- (f'{user_function.__name__}() validation failed: '
336
+ (f'tensorleap_custom_metric validation failed: '
568
337
  f'compute_insights should be dict if using the dict results. Got {type(compute_insights)}.')
569
338
 
570
339
  for ci_key in compute_insights:
571
340
  assert ci_key in result, \
572
- (f'{user_function.__name__}() validation failed: '
341
+ (f'tensorleap_custom_metric validation failed: '
573
342
  f'Keys in the compute_insights mapping should be part of result keys. Got key {ci_key}.')
574
343
 
575
344
  else:
@@ -577,7 +346,7 @@ def tensorleap_custom_metric(name: str,
577
346
 
578
347
  if compute_insights is not None:
579
348
  assert isinstance(compute_insights, bool), \
580
- (f'{user_function.__name__}() validation failed: '
349
+ (f'tensorleap_custom_metric validation failed: '
581
350
  f'compute_insights should be boolean. Got {type(compute_insights)}.')
582
351
 
583
352
  @functools.wraps(user_function)
@@ -609,8 +378,6 @@ def tensorleap_custom_metric(name: str,
609
378
  result = inner_without_validate(*args, **kwargs)
610
379
 
611
380
  _validate_result(result)
612
- if not _update_env_status is None:
613
- _update_env_status("tensorleap_custom_metric","v")
614
381
  return result
615
382
 
616
383
  def mapping_inner(*args, **kwargs):
@@ -650,38 +417,28 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
650
417
  name_to_unique_name = defaultdict(set)
651
418
 
652
419
  def decorating_function(user_function: VisualizerCallableInterface):
653
- assert isinstance(visualizer_type,LeapDataType),(f"{user_function.__name__} validation failed: "
654
- f"visualizer_type should be of type {LeapDataType.__name__} but got {type(visualizer_type)}"
655
- )
656
-
657
420
  for viz_handler in leap_binder.setup_container.visualizers:
658
421
  if viz_handler.visualizer_handler_data.name == name:
659
422
  raise Exception(f'Visualizer with name {name} already exists. '
660
423
  f'Please choose another')
661
424
 
662
425
  def _validate_input_args(*args, **kwargs):
663
- assert len(args) > 0, (
664
- f"{user_function.__name__}() validation failed: "
665
- f"Expected at least one positional|key-word argument of type np.ndarray, "
666
- f"but received none. "
667
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
668
- )
669
426
  for i, arg in enumerate(args):
670
427
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
671
- f'{user_function.__name__}() validation failed: '
428
+ f'tensorleap_custom_visualizer validation failed: '
672
429
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
673
430
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
674
431
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
675
- (f'{user_function.__name__}() validation failed: '
432
+ (f'tensorleap_custom_visualizer validation failed: '
676
433
  f'Argument #{i} should be without batch dimension. ')
677
434
 
678
435
  for _arg_name, arg in kwargs.items():
679
436
  assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
680
- f'{user_function.__name__}() validation failed: '
437
+ f'tensorleap_custom_visualizer validation failed: '
681
438
  f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
682
439
  if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
683
440
  assert arg.shape[0] != leap_binder.batch_size_to_validate, \
684
- (f'{user_function.__name__}() validation failed: Argument {_arg_name} '
441
+ (f'tensorleap_custom_visualizer validation failed: Argument {_arg_name} '
685
442
  f'should be without batch dimension. ')
686
443
 
687
444
  def _validate_result(result):
@@ -695,11 +452,8 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
695
452
  LeapDataType.ImageWithBBox: LeapImageWithBBox,
696
453
  LeapDataType.ImageWithHeatmap: LeapImageWithHeatmap
697
454
  }
698
- validate_output_structure(result, func_name=user_function.__name__,
699
- expected_type_name=result_type_map[visualizer_type])
700
-
701
455
  assert isinstance(result, result_type_map[visualizer_type]), \
702
- (f'{user_function.__name__}() validation failed: '
456
+ (f'tensorleap_custom_visualizer validation failed: '
703
457
  f'The return type should be {result_type_map[visualizer_type]}. Got {type(result)}.')
704
458
 
705
459
  @functools.wraps(user_function)
@@ -731,8 +485,6 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
731
485
  result = inner_without_validate(*args, **kwargs)
732
486
 
733
487
  _validate_result(result)
734
- if not _update_env_status is None:
735
- _update_env_status("tensorleap_custom_visualizer","v")
736
488
  return result
737
489
 
738
490
  def mapping_inner(*args, **kwargs):
@@ -774,26 +526,30 @@ def tensorleap_metadata(
774
526
  f'Please choose another')
775
527
 
776
528
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
529
+ assert isinstance(sample_id, (int, str)), \
530
+ (f'tensorleap_metadata validation failed: '
531
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
532
+ assert isinstance(preprocess_response, PreprocessResponse), \
533
+ (f'tensorleap_metadata validation failed: '
534
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
777
535
  assert type(sample_id) == preprocess_response.sample_id_type, \
778
- (f'{user_function.__name__}() validation failed: '
536
+ (f'tensorleap_metadata validation failed: '
779
537
  f'Argument sample_id should be as the same type as defined in the preprocess response '
780
538
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
781
539
 
782
540
  def _validate_result(result):
783
541
  supported_result_types = (type(None), int, str, bool, float, dict, np.floating,
784
542
  np.bool_, np.unsignedinteger, np.signedinteger, np.integer)
785
- validate_output_structure(result, func_name=user_function.__name__,
786
- expected_type_name=supported_result_types)
787
543
  assert isinstance(result, supported_result_types), \
788
- (f'{user_function.__name__}() validation failed: '
544
+ (f'tensorleap_metadata validation failed: '
789
545
  f'Unsupported return type. Got {type(result)}. should be any of {str(supported_result_types)}')
790
546
  if isinstance(result, dict):
791
547
  for key, value in result.items():
792
548
  assert isinstance(key, str), \
793
- (f'{user_function.__name__}() validation failed: '
549
+ (f'tensorleap_metadata validation failed: '
794
550
  f'Keys in the return dict should be of type str. Got {type(key)}.')
795
551
  assert isinstance(value, supported_result_types), \
796
- (f'{user_function.__name__}() validation failed: '
552
+ (f'tensorleap_metadata validation failed: '
797
553
  f'Values in the return dict should be of type {str(supported_result_types)}. Got {type(value)}.')
798
554
 
799
555
  def inner_without_validate(sample_id, preprocess_response):
@@ -810,19 +566,15 @@ def tensorleap_metadata(
810
566
 
811
567
  leap_binder.set_metadata(inner_without_validate, name, metadata_type)
812
568
 
813
- def inner(*args,**kwargs):
569
+ def inner(sample_id, preprocess_response):
814
570
  if os.environ.get(mapping_runtime_mode_env_var_mame):
815
571
  return None
816
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
817
- func_name=user_function.__name__, expected_names=["idx", "preprocess"],**kwargs)
818
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
572
+
819
573
  _validate_input_args(sample_id, preprocess_response)
820
574
 
821
575
  result = inner_without_validate(sample_id, preprocess_response)
822
576
 
823
577
  _validate_result(result)
824
- if not _update_env_status is None:
825
- _update_env_status("tensorleap_metadata","v")
826
578
  return result
827
579
 
828
580
  return inner
@@ -885,23 +637,19 @@ def tensorleap_preprocess():
885
637
 
886
638
  def _validate_input_args(*args, **kwargs):
887
639
  assert len(args) == 0 and len(kwargs) == 0, \
888
- (f'{user_function.__name__}() validation failed: '
640
+ (f'tensorleap_preprocess validation failed: '
889
641
  f'The function should not take any arguments. Got {args} and {kwargs}.')
890
642
 
891
643
  def _validate_result(result):
892
- assert isinstance(result, list), (
893
- f"{user_function.__name__}() validation failed: expected return type list[{PreprocessResponse.__name__}]"
894
- f"(e.g., [PreprocessResponse1, PreprocessResponse2, ...]), but returned type is {type(result).__name__}."
895
- if not isinstance(result, tuple)
896
- else f"{user_function.__name__}() validation failed: expected to return a single list[{PreprocessResponse.__name__}] object, "
897
- f"but returned {len(result)} objects instead."
898
- )
644
+ assert isinstance(result, list), \
645
+ (f'tensorleap_preprocess validation failed: '
646
+ f'The return type should be a list. Got {type(result)}.')
899
647
  for i, response in enumerate(result):
900
648
  assert isinstance(response, PreprocessResponse), \
901
- (f'{user_function.__name__}() validation failed: '
649
+ (f'tensorleap_preprocess validation failed: '
902
650
  f'Element #{i} in the return list should be a PreprocessResponse. Got {type(response)}.')
903
651
  assert len(set(result)) == len(result), \
904
- (f'{user_function.__name__}() validation failed: '
652
+ (f'tensorleap_preprocess validation failed: '
905
653
  f'The return list should not contain duplicate PreprocessResponse objects.')
906
654
 
907
655
  def inner(*args, **kwargs):
@@ -909,10 +657,18 @@ def tensorleap_preprocess():
909
657
  return [None, None, None, None]
910
658
 
911
659
  _validate_input_args(*args, **kwargs)
660
+
912
661
  result = user_function()
913
662
  _validate_result(result)
914
- if not _update_env_status is None:
915
- _update_env_status("tensorleap_preprocess", "v")
663
+
664
+ # Emit integration test event once per test
665
+ try:
666
+ emit_integration_event_once(AnalyticsEvent.PREPROCESS_INTEGRATION_TEST, {
667
+ 'preprocess_responses_count': len(result)
668
+ })
669
+ except Exception as e:
670
+ logger.debug(f"Failed to emit preprocess integration test event: {e}")
671
+
916
672
  return result
917
673
 
918
674
  return inner
@@ -1111,23 +867,29 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1111
867
  raise Exception(f"Channel dim for input {name} is expected to be either -1 or positive")
1112
868
 
1113
869
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
870
+ assert isinstance(sample_id, (int, str)), \
871
+ (f'tensorleap_input_encoder validation failed: '
872
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
873
+ assert isinstance(preprocess_response, PreprocessResponse), \
874
+ (f'tensorleap_input_encoder validation failed: '
875
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1114
876
  assert type(sample_id) == preprocess_response.sample_id_type, \
1115
- (f'{user_function.__name__}() validation failed: '
877
+ (f'tensorleap_input_encoder validation failed: '
1116
878
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1117
879
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1118
880
 
1119
881
  def _validate_result(result):
1120
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray")
1121
882
  assert isinstance(result, np.ndarray), \
1122
- (f'{user_function.__name__}() validation failed: '
883
+ (f'tensorleap_input_encoder validation failed: '
1123
884
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1124
885
  assert result.dtype == np.float32, \
1125
- (f'{user_function.__name__}() validation failed: '
886
+ (f'tensorleap_input_encoder validation failed: '
1126
887
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1127
- assert channel_dim - 1 <= len(result.shape), (f'{user_function.__name__}() validation failed: '
888
+ assert channel_dim - 1 <= len(result.shape), (f'tensorleap_input_encoder validation failed: '
1128
889
  f'The channel_dim ({channel_dim}) should be <= to the rank of the resulting input rank ({len(result.shape)}).')
1129
890
 
1130
891
  def inner_without_validate(sample_id, preprocess_response):
892
+
1131
893
  global _called_from_inside_tl_decorator
1132
894
  _called_from_inside_tl_decorator += 1
1133
895
 
@@ -1141,10 +903,7 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1141
903
  leap_binder.set_input(inner_without_validate, name, channel_dim=channel_dim)
1142
904
 
1143
905
 
1144
- def inner(*args, **kwargs):
1145
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1146
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1147
- sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
906
+ def inner(sample_id, preprocess_response):
1148
907
  _validate_input_args(sample_id, preprocess_response)
1149
908
 
1150
909
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1152,20 +911,27 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1152
911
  _validate_result(result)
1153
912
 
1154
913
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1155
- batch_warning(result,user_function.__name__)
1156
914
  result = np.expand_dims(result, axis=0)
1157
- if not _update_env_status is None:
1158
- _update_env_status("tensorleap_input_encoder", "v")
915
+ # Emit integration test event once per test
916
+ try:
917
+ emit_integration_event_once(AnalyticsEvent.INPUT_ENCODER_INTEGRATION_TEST, {
918
+ 'encoder_name': name,
919
+ 'channel_dim': channel_dim,
920
+ 'model_input_index': model_input_index
921
+ })
922
+ except Exception as e:
923
+ logger.debug(f"Failed to emit input_encoder integration test event: {e}")
1159
924
 
1160
925
  return result
1161
926
 
1162
927
 
928
+
1163
929
  node_mapping_type = NodeMappingType.Input
1164
930
  if model_input_index is not None:
1165
931
  node_mapping_type = NodeMappingType(f'Input{str(model_input_index)}')
1166
932
  inner.node_mapping = NodeMapping(name, node_mapping_type)
1167
933
 
1168
- def mapping_inner(*args, **kwargs):
934
+ def mapping_inner(sample_id, preprocess_response):
1169
935
  class TempMapping:
1170
936
  pass
1171
937
 
@@ -1177,11 +943,11 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
1177
943
 
1178
944
  mapping_inner.node_mapping = NodeMapping(name, node_mapping_type)
1179
945
 
1180
- def final_inner(*args, **kwargs):
946
+ def final_inner(sample_id, preprocess_response):
1181
947
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1182
- return mapping_inner(*args, **kwargs)
948
+ return mapping_inner(sample_id, preprocess_response)
1183
949
  else:
1184
- return inner(*args, **kwargs)
950
+ return inner(sample_id, preprocess_response)
1185
951
 
1186
952
  final_inner.node_mapping = NodeMapping(name, node_mapping_type)
1187
953
 
@@ -1198,18 +964,23 @@ def tensorleap_gt_encoder(name: str):
1198
964
  f'Please choose another')
1199
965
 
1200
966
  def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
967
+ assert isinstance(sample_id, (int, str)), \
968
+ (f'tensorleap_gt_encoder validation failed: '
969
+ f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
970
+ assert isinstance(preprocess_response, PreprocessResponse), \
971
+ (f'tensorleap_gt_encoder validation failed: '
972
+ f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
1201
973
  assert type(sample_id) == preprocess_response.sample_id_type, \
1202
- (f'{user_function.__name__}() validation failed: '
974
+ (f'tensorleap_gt_encoder validation failed: '
1203
975
  f'Argument sample_id should be as the same type as defined in the preprocess response '
1204
976
  f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
1205
977
 
1206
978
  def _validate_result(result):
1207
- validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray",gt_flag=True)
1208
979
  assert isinstance(result, np.ndarray), \
1209
- (f'{user_function.__name__}() validation failed: '
980
+ (f'tensorleap_gt_encoder validation failed: '
1210
981
  f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
1211
982
  assert result.dtype == np.float32, \
1212
- (f'{user_function.__name__}() validation failed: '
983
+ (f'tensorleap_gt_encoder validation failed: '
1213
984
  f'The return type should be a numpy array of type float32. Got {result.dtype}.')
1214
985
 
1215
986
  def inner_without_validate(sample_id, preprocess_response):
@@ -1226,10 +997,7 @@ def tensorleap_gt_encoder(name: str):
1226
997
  leap_binder.set_ground_truth(inner_without_validate, name)
1227
998
 
1228
999
 
1229
- def inner(*args, **kwargs):
1230
- validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
1231
- func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
1232
- sample_id, preprocess_response = args
1000
+ def inner(sample_id, preprocess_response):
1233
1001
  _validate_input_args(sample_id, preprocess_response)
1234
1002
 
1235
1003
  result = inner_without_validate(sample_id, preprocess_response)
@@ -1237,15 +1005,20 @@ def tensorleap_gt_encoder(name: str):
1237
1005
  _validate_result(result)
1238
1006
 
1239
1007
  if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
1240
- batch_warning(result, user_function.__name__)
1241
1008
  result = np.expand_dims(result, axis=0)
1242
- if not _update_env_status is None:
1243
- _update_env_status("tensorleap_gt_encoder", "v")
1009
+ # Emit integration test event once per test
1010
+ try:
1011
+ emit_integration_event_once(AnalyticsEvent.GT_ENCODER_INTEGRATION_TEST, {
1012
+ 'encoder_name': name
1013
+ })
1014
+ except Exception as e:
1015
+ logger.debug(f"Failed to emit gt_encoder integration test event: {e}")
1016
+
1244
1017
  return result
1245
1018
 
1246
1019
  inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1247
1020
 
1248
- def mapping_inner(*args, **kwargs):
1021
+ def mapping_inner(sample_id, preprocess_response):
1249
1022
  class TempMapping:
1250
1023
  pass
1251
1024
 
@@ -1256,11 +1029,11 @@ def tensorleap_gt_encoder(name: str):
1256
1029
 
1257
1030
  mapping_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1258
1031
 
1259
- def final_inner(*args, **kwargs):
1032
+ def final_inner(sample_id, preprocess_response):
1260
1033
  if os.environ.get(mapping_runtime_mode_env_var_mame):
1261
- return mapping_inner(*args, **kwargs)
1034
+ return mapping_inner(sample_id, preprocess_response)
1262
1035
  else:
1263
- return inner(*args, **kwargs)
1036
+ return inner(sample_id, preprocess_response)
1264
1037
 
1265
1038
  final_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
1266
1039
 
@@ -1281,37 +1054,28 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1281
1054
  valid_types = (np.ndarray, SamplePreprocessResponse)
1282
1055
 
1283
1056
  def _validate_input_args(*args, **kwargs):
1284
- assert len(args) > 0 and len(kwargs)==0, (
1285
- f"{user_function.__name__}() validation failed: "
1286
- f"Expected at least one positional|key-word argument of the allowed types (np.ndarray|SamplePreprocessResponse|list(np.ndarray|SamplePreprocessResponse)). "
1287
- f"but received none. "
1288
- f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
1289
- )
1290
1057
  for i, arg in enumerate(args):
1291
1058
  if isinstance(arg, list):
1292
1059
  for y, elem in enumerate(arg):
1293
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1060
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1294
1061
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1295
1062
  else:
1296
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1063
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1297
1064
  f'Argument #{i} should be a numpy array. Got {type(arg)}.')
1298
1065
  for _arg_name, arg in kwargs.items():
1299
1066
  if isinstance(arg, list):
1300
1067
  for y, elem in enumerate(arg):
1301
- assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
1068
+ assert isinstance(elem, valid_types), (f'tensorleap_custom_loss validation failed: '
1302
1069
  f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
1303
1070
  else:
1304
- assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
1071
+ assert isinstance(arg, valid_types), (f'tensorleap_custom_loss validation failed: '
1305
1072
  f'Argument #{_arg_name} should be a numpy array. Got {type(arg)}.')
1306
1073
 
1307
1074
  def _validate_result(result):
1308
- validate_output_structure(result, func_name=user_function.__name__,
1309
- expected_type_name="np.ndarray")
1310
1075
  assert isinstance(result, np.ndarray), \
1311
- (f'{user_function.__name__} validation failed: '
1076
+ (f'tensorleap_custom_loss validation failed: '
1312
1077
  f'The return type should be a numpy array. Got {type(result)}.')
1313
- assert result.ndim<2 ,(f'{user_function.__name__} validation failed: '
1314
- f'The return type should be a 1Dim numpy array but got {result.ndim}Dim.')
1078
+
1315
1079
 
1316
1080
  @functools.wraps(user_function)
1317
1081
  def inner_without_validate(*args, **kwargs):
@@ -1342,9 +1106,6 @@ def tensorleap_custom_loss(name: str, connects_to=None):
1342
1106
  result = inner_without_validate(*args, **kwargs)
1343
1107
 
1344
1108
  _validate_result(result)
1345
- if not _update_env_status is None:
1346
- _update_env_status("tensorleap_custom_loss", "v")
1347
-
1348
1109
  return result
1349
1110
 
1350
1111
  def mapping_inner(*args, **kwargs):
@@ -1401,94 +1162,3 @@ def tensorleap_custom_layer(name: str):
1401
1162
  return custom_layer
1402
1163
 
1403
1164
  return decorating_function
1404
-
1405
-
1406
- def tensorleap_status_table():
1407
- '''
1408
- Usage example:
1409
- ###################
1410
- leap_integration.py
1411
- ###################
1412
- from code_loader.inner_leap_binder.leapbinder_decorators import tensorleap_status_table
1413
- ...
1414
- ...
1415
- ...
1416
- if __name__ == '__main__':
1417
- tensorleap_status_table()
1418
- ...
1419
- '''
1420
- import atexit
1421
- import sys
1422
- import traceback
1423
- CHECK = "✅"
1424
- CROSS = "❌"
1425
-
1426
- table = [
1427
- {"name": "tensorleap_preprocess", "Added to integration": CROSS},
1428
- {"name": "tensorleap_integration_test", "Added to integration": CROSS},
1429
- {"name": "tensorleap_input_encoder", "Added to integration": CROSS},
1430
- {"name": "tensorleap_gt_encoder", "Added to integration": CROSS},
1431
- {"name": "tensorleap_load_model", "Added to integration": CROSS},
1432
- {"name": "tensorleap_custom_loss", "Added to integration": CROSS},
1433
- {"name": "tensorleap_custom_metric (optional)", "Added to integration": CROSS},
1434
- {"name": "tensorleap_metadata (optional)", "Added to integration": CROSS},
1435
- {"name": "tensorleap_custom_visualizer (optional)", "Added to integration": CROSS},
1436
-
1437
- ]
1438
-
1439
- _finalizer_called = {"done": False}
1440
-
1441
- def _remove_suffix(s: str, suffix: str) -> str:
1442
- #This is needed because str.remove_suffix was presented in python3.9+
1443
- if suffix and s.endswith(suffix):
1444
- return s[:-len(suffix)]
1445
- return s
1446
-
1447
- def _print_table():
1448
- ready_mess = "\nAll parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system."
1449
- not_ready_mess = "\nSome mandatory components have not yet been added to the Integration test. Recommended next interface to add is: "
1450
- mandatory_ready_mess = "\nAll mandatory parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system or continue to the next optional reccomeded interface,adding: "
1451
-
1452
- name_width = max(len(row["name"]) for row in table)
1453
- status_width = max(len(row["Added to integration"]) for row in table)
1454
- header = f"{'Decorator Name'.ljust(name_width)} | {'Added to integration'.ljust(status_width)}"
1455
- sep = "-" * len(header)
1456
- print("\n" + header)
1457
- print(sep)
1458
- ready=True
1459
- for row in table:
1460
- print(f"{row['name'].ljust(name_width)} | {row['Added to integration'].ljust(status_width)}")
1461
- if row['Added to integration']==CROSS and ready:
1462
- ready=False
1463
- next_step=row['name']
1464
-
1465
-
1466
- print(ready_mess) if ready else print(mandatory_ready_mess+next_step) if "optional" in next_step else print(not_ready_mess+next_step)
1467
- def update_env_params(name: str, status: str = "✓"):
1468
- for row in table:
1469
- if _remove_suffix(row["name"]," (optional)") == name:
1470
- row["Added to integration"] = CHECK if status=="v" else CROSS
1471
- break
1472
- def run_on_exit():
1473
- if _finalizer_called["done"]:
1474
- return
1475
- _finalizer_called["done"] = True
1476
- _print_table()
1477
- def handle_exception(exc_type, exc_value, exc_traceback):
1478
- traceback.print_exception(exc_type, exc_value, exc_traceback)
1479
- run_on_exit()
1480
- atexit.register(run_on_exit)
1481
- sys.excepthook = handle_exception
1482
- global _update_env_status
1483
- _update_env_status = update_env_params
1484
- return update_env_params
1485
-
1486
-
1487
-
1488
-
1489
-
1490
-
1491
-
1492
-
1493
-
1494
-
@@ -5,12 +5,53 @@ import os
5
5
  import sys
6
6
  import getpass
7
7
  import uuid
8
- from typing import Optional, Dict, Any
8
+ import logging
9
+ from enum import Enum
10
+ from typing import Optional, Dict, Any, Set, Union, TypedDict
9
11
  import mixpanel # type: ignore[import]
10
12
 
13
+ logger = logging.getLogger(__name__)
14
+
11
15
  TRACKING_VERSION = '1'
12
16
 
13
17
 
18
+ class AnalyticsEvent(str, Enum):
19
+ """Enumeration of all tracked analytics events."""
20
+ CODE_LOADER_LOADED = "code_loader_loaded"
21
+ LOAD_MODEL_INTEGRATION_TEST = "load_model_integration_test"
22
+ PREPROCESS_INTEGRATION_TEST = "preprocess_integration_test"
23
+ INPUT_ENCODER_INTEGRATION_TEST = "input_encoder_integration_test"
24
+ GT_ENCODER_INTEGRATION_TEST = "gt_encoder_integration_test"
25
+
26
+
27
+ class CodeLoaderLoadedProps(TypedDict, total=False):
28
+ """Properties for code_loader_loaded event."""
29
+ event_type: str
30
+ code_path: str
31
+ code_entry_name: str
32
+
33
+
34
+ class LoadModelEventProps(TypedDict, total=False):
35
+ """Properties for load_model_integration_test event."""
36
+ prediction_types_count: int
37
+
38
+
39
+ class PreprocessEventProps(TypedDict, total=False):
40
+ """Properties for preprocess_integration_test event."""
41
+ preprocess_responses_count: int
42
+
43
+
44
+ class InputEncoderEventProps(TypedDict, total=False):
45
+ """Properties for input_encoder_integration_test event."""
46
+ encoder_name: str
47
+ channel_dim: int
48
+
49
+
50
+ class GtEncoderEventProps(TypedDict, total=False):
51
+ """Properties for gt_encoder_integration_test event."""
52
+ encoder_name: str
53
+
54
+
14
55
  class MixpanelTracker:
15
56
  """Handles Mixpanel event tracking for code-loader."""
16
57
 
@@ -28,7 +69,8 @@ class MixpanelTracker:
28
69
  if self._user_id is None:
29
70
  try:
30
71
  self._user_id = getpass.getuser()
31
- except Exception:
72
+ except Exception as e:
73
+ logger.debug(f"Failed to get username via getpass: {e}")
32
74
  # Fallback to environment variables or default
33
75
  self._user_id = os.environ.get('USER', os.environ.get('USERNAME', 'unknown'))
34
76
  return self._user_id or 'unknown'
@@ -43,8 +85,8 @@ class MixpanelTracker:
43
85
  user_id = f.read().strip()
44
86
  if user_id:
45
87
  return user_id
46
- except Exception:
47
- pass
88
+ except Exception as e:
89
+ logger.debug(f"Failed to read TensorLeap user ID: {e}")
48
90
  return None
49
91
 
50
92
  def _get_or_create_device_id(self) -> str:
@@ -73,7 +115,8 @@ class MixpanelTracker:
73
115
  f.write(device_id)
74
116
 
75
117
  return device_id
76
- except Exception:
118
+ except Exception as e:
119
+ logger.debug(f"Failed to read/write device ID file: {e}")
77
120
  # Fallback to generating a new UUID if file operations fail
78
121
  return str(uuid.uuid4())
79
122
 
@@ -90,10 +133,11 @@ class MixpanelTracker:
90
133
 
91
134
  return self._get_or_create_device_id()
92
135
 
93
- def track_code_loader_loaded(self, event_properties: Optional[Dict[str, Any]] = None) -> None:
94
- """Track code loader loaded event with device identification.
136
+ def _track_event(self, event_name: Union[str, AnalyticsEvent], event_properties: Optional[Dict[str, Any]] = None) -> None:
137
+ """Internal method to track any event with device identification.
95
138
 
96
139
  Args:
140
+ event_name: The name of the event to track (string or AnalyticsEvent enum)
97
141
  event_properties: Optional additional properties to include in the event
98
142
  """
99
143
  # Skip tracking if IS_TENSORLEAP_PLATFORM environment variable is set to 'true'
@@ -122,9 +166,26 @@ class MixpanelTracker:
122
166
  if event_properties:
123
167
  properties.update(event_properties)
124
168
 
125
- self.mp.track(distinct_id, 'code_loader_loaded', properties)
169
+ self.mp.track(distinct_id, str(event_name), properties)
126
170
  except Exception as e:
127
- pass
171
+ logger.debug(f"Failed to track event '{event_name}': {e}")
172
+
173
+ def track_code_loader_loaded(self, event_properties: Optional[Dict[str, Any]] = None) -> None:
174
+ """Track code loader loaded event with device identification.
175
+
176
+ Args:
177
+ event_properties: Optional additional properties to include in the event
178
+ """
179
+ self._track_event(AnalyticsEvent.CODE_LOADER_LOADED, event_properties)
180
+
181
+ def track_integration_test_event(self, event_name: Union[str, AnalyticsEvent], event_properties: Optional[Dict[str, Any]] = None) -> None:
182
+ """Track an integration test event with device identification.
183
+
184
+ Args:
185
+ event_name: The name of the event to track (string or AnalyticsEvent enum)
186
+ event_properties: Optional additional properties to include in the event
187
+ """
188
+ self._track_event(event_name, event_properties)
128
189
 
129
190
 
130
191
  # Global tracker instance
@@ -140,3 +201,30 @@ def get_tracker() -> MixpanelTracker:
140
201
 
141
202
  def track_code_loader_loaded(event_properties: Optional[Dict[str, Any]] = None) -> None:
142
203
  get_tracker().track_code_loader_loaded(event_properties)
204
+
205
+
206
+ def track_integration_test_event(event_name: Union[str, AnalyticsEvent], event_properties: Optional[Dict[str, Any]] = None) -> None:
207
+ get_tracker().track_integration_test_event(event_name, event_properties)
208
+
209
+
210
+ # Module-level set to track which integration test events have been emitted
211
+ _integration_events_emitted: Set[str] = set()
212
+
213
+
214
+ def emit_integration_event_once(event_name: Union[str, AnalyticsEvent], props: Dict[str, Any]) -> None:
215
+ """Emit an integration test event only once per test run."""
216
+ event_name_str = str(event_name)
217
+ if event_name_str in _integration_events_emitted:
218
+ return
219
+
220
+ try:
221
+ track_integration_test_event(event_name, props)
222
+ _integration_events_emitted.add(event_name_str)
223
+ except Exception as e:
224
+ logger.debug(f"Failed to emit integration event once '{event_name}': {e}")
225
+
226
+
227
+ def clear_integration_events() -> None:
228
+ """Clear the integration events set for a new test run."""
229
+ global _integration_events_emitted
230
+ _integration_events_emitted.clear()
@@ -324,7 +324,7 @@ def plot_image_mask(leap_data: LeapImageMask, title: str) -> None:
324
324
 
325
325
  # fill the instance mask with a translucent color
326
326
  overlayed_image[instance_mask] = (
327
- overlayed_image[instance_mask] * (1 - 0.5) + np.array(colors[i][:image.shape[-1]], dtype=np.uint8) * 0.5)
327
+ overlayed_image[instance_mask] * (1 - 0.5) + np.array(colors[i][:image.shape[-1]], dtype=image.dtype) * 0.5)
328
328
 
329
329
  # Display the result using matplotlib
330
330
  fig, ax = plt.subplots(1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 1.0.139.dev9
3
+ Version: 1.0.141
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -1,7 +1,7 @@
1
1
  LICENSE,sha256=qIwWjdspQeSMTtnFZBC8MuT-95L02FPvzRUdWFxrwJY,1067
2
2
  code_loader/__init__.py,sha256=outxRQ0M-zMfV0QGVJmAed5qWfRmyD0TV6-goEGAzBw,406
3
3
  code_loader/contract/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- code_loader/contract/datasetclasses.py,sha256=hkHMU1upWzVWkjIBoU3r14XPDleicif4Oia2xfuHgvQ,9395
4
+ code_loader/contract/datasetclasses.py,sha256=u0gfDDy02skhFG3ejJOxqxCnykhAcBPGJfv8Bi4s9eQ,8966
5
5
  code_loader/contract/enums.py,sha256=GEFkvUMXnCNt-GOoz7NJ9ecQZ2PPDettJNOsxsiM0wk,1622
6
6
  code_loader/contract/exceptions.py,sha256=jWqu5i7t-0IG0jGRsKF4DjJdrsdpJjIYpUkN1F4RiyQ,51
7
7
  code_loader/contract/mapping.py,sha256=sWJhpng-IkOzQnWQdMT5w2ZZ3X1Z_OOzSwCLXIS7oxE,1446
@@ -21,17 +21,17 @@ code_loader/experiment_api/utils.py,sha256=XZHtxge12TS4H4-8PjV3sKuhp8Ud6ojAiIzTZ
21
21
  code_loader/experiment_api/workingspace_config_utils.py,sha256=DLzXQCg4dgTV_YgaSbeTVzq-2ja_SQw4zi7LXwKL9cY,990
22
22
  code_loader/inner_leap_binder/__init__.py,sha256=koOlJyMNYzGbEsoIbXathSmQ-L38N_pEXH_HvL7beXU,99
23
23
  code_loader/inner_leap_binder/leapbinder.py,sha256=Q3D9yVM-GNEJfYRFvMV__BoZbcWOgnWKhrZXAv6Tu7o,33232
24
- code_loader/inner_leap_binder/leapbinder_decorators.py,sha256=Trnl8PDQn_TDawx4KQJ14S_Y2DHhUdRNUvn4sSqbm-Q,70809
24
+ code_loader/inner_leap_binder/leapbinder_decorators.py,sha256=cQwk6sHC6il4eqhxv6rnM-abP7VTL2glYnXNeU26zq4,53943
25
25
  code_loader/leaploader.py,sha256=6D6xZzMI6qSNIb3tuKLB3BbK5H8QS1_r7iQjIXO3OkM,29795
26
26
  code_loader/leaploaderbase.py,sha256=LIFcC6xo6V_iiGN3BjibXETu_l84EWM_WIOKAvkfTiM,4458
27
- code_loader/mixpanel_tracker.py,sha256=l9z_szKKQ7apEbdNZpGH1TKAiT_TsBHb9AQnePaWTyo,4942
27
+ code_loader/mixpanel_tracker.py,sha256=eKvymkw7X2Ht6iw-a0V9VQm6OnB9kW7hYy35YtwRAvU,8457
28
28
  code_loader/plot_functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- code_loader/plot_functions/plot_functions.py,sha256=VyVWxd7R3lALIo2z8oZlYybbN0Ip6G0OiKNTNZ77xHk,14557
29
+ code_loader/plot_functions/plot_functions.py,sha256=OGFLfbL31N2wuwcXIxxQ14f0Kuuvv1BZkAuFi2c0ma4,14560
30
30
  code_loader/plot_functions/visualize.py,sha256=gsBAYYkwMh7jIpJeDMPS8G4CW-pxwx6LznoQIvi4vpo,657
31
31
  code_loader/utils.py,sha256=gXENTYpjdidq2dx0gVbXlErPeHoNs-4TYAZbLRe0y2c,2712
32
32
  code_loader/visualizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  code_loader/visualizers/default_visualizers.py,sha256=onRnLE_TXfgLN4o52hQIOOhUcFexGlqJ3xSpQDVLuZM,2604
34
- code_loader-1.0.139.dev9.dist-info/LICENSE,sha256=qIwWjdspQeSMTtnFZBC8MuT-95L02FPvzRUdWFxrwJY,1067
35
- code_loader-1.0.139.dev9.dist-info/METADATA,sha256=8EE51UpX81hCr1sE89et8K-1O4Y1-2wrQCr13Tc4nuk,1095
36
- code_loader-1.0.139.dev9.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
37
- code_loader-1.0.139.dev9.dist-info/RECORD,,
34
+ code_loader-1.0.141.dist-info/LICENSE,sha256=qIwWjdspQeSMTtnFZBC8MuT-95L02FPvzRUdWFxrwJY,1067
35
+ code_loader-1.0.141.dist-info/METADATA,sha256=lsbN9TJFGKpD9hcJSu9-7DLnTRcCqSF8yJl24bi7Ku8,1090
36
+ code_loader-1.0.141.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
37
+ code_loader-1.0.141.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any