code-loader 1.0.144__tar.gz → 1.0.145__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of code-loader might be problematic. Click here for more details.
- {code_loader-1.0.144 → code_loader-1.0.145}/PKG-INFO +1 -1
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/datasetclasses.py +9 -1
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/inner_leap_binder/leapbinder_decorators.py +471 -108
- {code_loader-1.0.144 → code_loader-1.0.145}/pyproject.toml +1 -1
- {code_loader-1.0.144 → code_loader-1.0.145}/LICENSE +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/README.md +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/enums.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/exceptions.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/mapping.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/responsedataclasses.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/contract/visualizer_classes.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/default_losses.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/default_metrics.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/api.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/cli_config_utils.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/client.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/epoch.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/experiment.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/experiment_context.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/types.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/utils.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/workingspace_config_utils.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/inner_leap_binder/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/inner_leap_binder/leapbinder.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/leaploader.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/leaploaderbase.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/mixpanel_tracker.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/plot_functions/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/plot_functions/plot_functions.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/plot_functions/visualize.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/utils.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/visualizers/__init__.py +0 -0
- {code_loader-1.0.144 → code_loader-1.0.145}/code_loader/visualizers/default_visualizers.py +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import warnings
|
|
1
2
|
from dataclasses import dataclass, field
|
|
2
3
|
from typing import Any, Callable, List, Optional, Dict, Union, Type
|
|
3
4
|
import re
|
|
@@ -56,7 +57,14 @@ class PreprocessResponse:
|
|
|
56
57
|
for sample_id in self.sample_ids:
|
|
57
58
|
assert isinstance(sample_id, str), f"Sample id should be of type str. Got: {type(sample_id)}"
|
|
58
59
|
else:
|
|
59
|
-
raise Exception("length is deprecated.")
|
|
60
|
+
raise Exception("length is deprecated, please use sample_ids instead.")
|
|
61
|
+
|
|
62
|
+
if self.state is None:
|
|
63
|
+
warnings.warn(
|
|
64
|
+
"PreprocessResponse.state is not set. For best practice, assign a unique `state` value to each PreprocessResponse instance."
|
|
65
|
+
)
|
|
66
|
+
else:
|
|
67
|
+
assert isinstance(self.state, DataStateType), f"PreprocessResponse.state must be of type {DataStateType.__name__} but got {type(self.state)}"
|
|
60
68
|
|
|
61
69
|
def __hash__(self) -> int:
|
|
62
70
|
return id(self)
|
{code_loader-1.0.144 → code_loader-1.0.145}/code_loader/inner_leap_binder/leapbinder_decorators.py
RENAMED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
# mypy: ignore-errors
|
|
2
2
|
import os
|
|
3
|
+
import warnings
|
|
3
4
|
import logging
|
|
4
5
|
from collections import defaultdict
|
|
5
6
|
from functools import lru_cache
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from typing import Optional, Union, Callable, List, Dict, Set, Any
|
|
9
|
+
from typing import Optional, Union, Callable, List, Dict, get_args, get_origin
|
|
8
10
|
|
|
9
11
|
import numpy as np
|
|
10
12
|
import numpy.typing as npt
|
|
@@ -29,8 +31,116 @@ import functools
|
|
|
29
31
|
|
|
30
32
|
_called_from_inside_tl_decorator = 0
|
|
31
33
|
_called_from_inside_tl_integration_test_decorator = False
|
|
32
|
-
|
|
33
|
-
|
|
34
|
+
_call_from_tl_platform= os.environ.get('IS_TENSORLEAP_PLATFORM') == 'true'
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def validate_args_structure(*args, types_order, func_name, expected_names, **kwargs):
|
|
40
|
+
def _type_to_str(t):
|
|
41
|
+
origin = get_origin(t)
|
|
42
|
+
if origin is Union:
|
|
43
|
+
return " | ".join(tt.__name__ for tt in get_args(t))
|
|
44
|
+
elif hasattr(t, "__name__"):
|
|
45
|
+
return t.__name__
|
|
46
|
+
else:
|
|
47
|
+
return str(t)
|
|
48
|
+
|
|
49
|
+
def _format_types(types, names=None):
|
|
50
|
+
return ", ".join(
|
|
51
|
+
f"{(names[i] + ': ') if names else f'arg{i}: '}{_type_to_str(ty)}"
|
|
52
|
+
for i, ty in enumerate(types)
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
if expected_names:
|
|
56
|
+
normalized_args = []
|
|
57
|
+
for i, name in enumerate(expected_names):
|
|
58
|
+
if i < len(args):
|
|
59
|
+
normalized_args.append(args[i])
|
|
60
|
+
elif name in kwargs:
|
|
61
|
+
normalized_args.append(kwargs[name])
|
|
62
|
+
else:
|
|
63
|
+
raise AssertionError(
|
|
64
|
+
f"{func_name} validation failed: "
|
|
65
|
+
f"Missing required argument '{name}'. "
|
|
66
|
+
f"Expected arguments: {expected_names}."
|
|
67
|
+
)
|
|
68
|
+
else:
|
|
69
|
+
normalized_args = list(args)
|
|
70
|
+
if len(normalized_args) != len(types_order):
|
|
71
|
+
expected = _format_types(types_order, expected_names)
|
|
72
|
+
got_types = ", ".join(type(arg).__name__ for arg in normalized_args)
|
|
73
|
+
raise AssertionError(
|
|
74
|
+
f"{func_name} validation failed: "
|
|
75
|
+
f"Expected exactly {len(types_order)} arguments ({expected}), "
|
|
76
|
+
f"but got {len(normalized_args)} argument(s) of type(s): ({got_types}). "
|
|
77
|
+
f"Correct usage example: {func_name}({expected})"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
for i, (arg, expected_type) in enumerate(zip(normalized_args, types_order)):
|
|
81
|
+
origin = get_origin(expected_type)
|
|
82
|
+
if origin is Union:
|
|
83
|
+
allowed_types = get_args(expected_type)
|
|
84
|
+
else:
|
|
85
|
+
allowed_types = (expected_type,)
|
|
86
|
+
|
|
87
|
+
if not isinstance(arg, allowed_types):
|
|
88
|
+
allowed_str = " | ".join(t.__name__ for t in allowed_types)
|
|
89
|
+
raise AssertionError(
|
|
90
|
+
f"{func_name} validation failed: "
|
|
91
|
+
f"Argument '{expected_names[i] if expected_names else f'arg{i}'}' "
|
|
92
|
+
f"expected type {allowed_str}, but got {type(arg).__name__}. "
|
|
93
|
+
f"Correct usage example: {func_name}({_format_types(types_order, expected_names)})"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def validate_output_structure(result, func_name: str, expected_type_name="np.ndarray",gt_flag=False):
|
|
98
|
+
if result is None or (isinstance(result, float) and np.isnan(result)):
|
|
99
|
+
if gt_flag:
|
|
100
|
+
raise AssertionError(
|
|
101
|
+
f"{func_name} validation failed: "
|
|
102
|
+
f"The function returned {result!r}. "
|
|
103
|
+
f"If you are working with an unlabeled dataset and no ground truth is available, "
|
|
104
|
+
f"use 'return np.array([], dtype=np.float32)' instead. "
|
|
105
|
+
f"Otherwise, {func_name} expected a single {expected_type_name} object. "
|
|
106
|
+
f"Make sure the function ends with 'return <{expected_type_name}>'."
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
raise AssertionError(
|
|
110
|
+
f"{func_name} validation failed: "
|
|
111
|
+
f"The function returned None. "
|
|
112
|
+
f"Expected a single {expected_type_name} object. "
|
|
113
|
+
f"Make sure the function ends with 'return <{expected_type_name}>'."
|
|
114
|
+
)
|
|
115
|
+
if isinstance(result, tuple):
|
|
116
|
+
element_descriptions = [
|
|
117
|
+
f"[{i}] type: {type(r).__name__}"
|
|
118
|
+
for i, r in enumerate(result)
|
|
119
|
+
]
|
|
120
|
+
element_summary = "\n ".join(element_descriptions)
|
|
121
|
+
|
|
122
|
+
raise AssertionError(
|
|
123
|
+
f"{func_name} validation failed: "
|
|
124
|
+
f"The function returned multiple outputs ({len(result)} values), "
|
|
125
|
+
f"but only a single {expected_type_name} is allowed.\n\n"
|
|
126
|
+
f"Returned elements:\n"
|
|
127
|
+
f" {element_summary}\n\n"
|
|
128
|
+
f"Correct usage example:\n"
|
|
129
|
+
f" def {func_name}(...):\n"
|
|
130
|
+
f" return <{expected_type_name}>\n\n"
|
|
131
|
+
f"If you intended to return multiple values, combine them into a single "
|
|
132
|
+
f"{expected_type_name} (e.g., by concatenation or stacking)."
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def batch_warning(result, func_name):
|
|
136
|
+
if result.shape[0] == 1:
|
|
137
|
+
warnings.warn(
|
|
138
|
+
f"{func_name} warning: Tensorleap will add a batch dimension at axis 0 to the output of {func_name}, "
|
|
139
|
+
f"although the detected size of axis 0 is already 1. "
|
|
140
|
+
f"This may lead to an extra batch dimension (e.g., shape (1, 1, ...)). "
|
|
141
|
+
f"Please ensure that the output of '{func_name}' is not already batched "
|
|
142
|
+
f"to avoid computation errors."
|
|
143
|
+
)
|
|
34
144
|
def _add_mapping_connection(user_unique_name, connection_destinations, arg_names, name, node_mapping_type):
|
|
35
145
|
connection_destinations = [connection_destination for connection_destination in connection_destinations
|
|
36
146
|
if not isinstance(connection_destination, SamplePreprocessResponse)]
|
|
@@ -53,7 +163,19 @@ def tensorleap_integration_test():
|
|
|
53
163
|
def decorating_function(integration_test_function: Callable):
|
|
54
164
|
leap_binder.integration_test_func = integration_test_function
|
|
55
165
|
|
|
166
|
+
def _validate_input_args(*args, **kwargs):
|
|
167
|
+
sample_id,preprocess_response=args
|
|
168
|
+
assert type(sample_id) == preprocess_response.sample_id_type, (
|
|
169
|
+
f"tensorleap_integration_test validation failed: "
|
|
170
|
+
f"sample_id type ({type(sample_id).__name__}) does not match the expected "
|
|
171
|
+
f"type ({preprocess_response.sample_id_type}) from the PreprocessResponse."
|
|
172
|
+
)
|
|
173
|
+
|
|
56
174
|
def inner(*args, **kwargs):
|
|
175
|
+
validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
|
|
176
|
+
func_name='integration_test',expected_names=["idx", "preprocess"],**kwargs)
|
|
177
|
+
_validate_input_args(*args, **kwargs)
|
|
178
|
+
|
|
57
179
|
global _called_from_inside_tl_integration_test_decorator
|
|
58
180
|
# Clear integration test events for new test
|
|
59
181
|
try:
|
|
@@ -62,6 +184,8 @@ def tensorleap_integration_test():
|
|
|
62
184
|
logger.debug(f"Failed to clear integration events: {e}")
|
|
63
185
|
try:
|
|
64
186
|
_called_from_inside_tl_integration_test_decorator = True
|
|
187
|
+
if not _call_from_tl_platform:
|
|
188
|
+
update_env_params_func("tensorleap_integration_test", "v")#put here because otherwise it will become v only if it finishes all the script
|
|
65
189
|
ret = integration_test_function(*args, **kwargs)
|
|
66
190
|
|
|
67
191
|
try:
|
|
@@ -74,7 +198,7 @@ def tensorleap_integration_test():
|
|
|
74
198
|
line_number = first_tb.lineno
|
|
75
199
|
if isinstance(e, TypeError) and 'is not subscriptable' in str(e):
|
|
76
200
|
print(f'Invalid integration code. File {file_name}, line {line_number}: '
|
|
77
|
-
f'Please remove this indexing operation usage from the integration test code.
|
|
201
|
+
f"indexing is supported only on the model's predictions inside the integration test. Please remove this indexing operation usage from the integration test code.")
|
|
78
202
|
else:
|
|
79
203
|
print(f'Invalid integration code. File {file_name}, line {line_number}: '
|
|
80
204
|
f'Integration test is only allowed to call Tensorleap decorators. '
|
|
@@ -86,9 +210,9 @@ def tensorleap_integration_test():
|
|
|
86
210
|
_called_from_inside_tl_integration_test_decorator = False
|
|
87
211
|
|
|
88
212
|
leap_binder.check()
|
|
89
|
-
|
|
90
213
|
return inner
|
|
91
214
|
|
|
215
|
+
|
|
92
216
|
return decorating_function
|
|
93
217
|
|
|
94
218
|
def _safe_get_item(key):
|
|
@@ -97,34 +221,63 @@ def _safe_get_item(key):
|
|
|
97
221
|
except ValueError:
|
|
98
222
|
raise Exception(f'Tensorleap currently supports models with no more then 10 inputs')
|
|
99
223
|
|
|
100
|
-
|
|
101
224
|
def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]] = []):
|
|
225
|
+
assert isinstance(prediction_types, list),(
|
|
226
|
+
f"tensorleap_load_model validation failed: "
|
|
227
|
+
f" prediction_types is an optional argument of type List[PredictionTypeHandler]] but got {type(prediction_types).__name__}."
|
|
228
|
+
)
|
|
102
229
|
for i, prediction_type in enumerate(prediction_types):
|
|
230
|
+
assert isinstance(prediction_type, PredictionTypeHandler),(f"tensorleap_load_model validation failed: "
|
|
231
|
+
f" prediction_types at position {i} must be of type PredictionTypeHandler but got {type(prediction_types[i]).__name__}.")
|
|
103
232
|
leap_binder.add_prediction(prediction_type.name, prediction_type.labels, prediction_type.channel_dim, i)
|
|
104
233
|
|
|
234
|
+
def _validate_result(result) -> None:
|
|
235
|
+
valid_types=["onnxruntime","keras"]
|
|
236
|
+
err_message=f"tensorleap_load_model validation failed:\nSupported models are Keras and onnxruntime only and non of them was returned."
|
|
237
|
+
validate_output_structure(result, func_name="tensorleap_load_model", expected_type_name= [" | ".join(t for t in valid_types)][0])
|
|
238
|
+
try:
|
|
239
|
+
import keras
|
|
240
|
+
except ImportError:
|
|
241
|
+
keras = None
|
|
242
|
+
try:
|
|
243
|
+
import tensorflow as tf
|
|
244
|
+
except ImportError:
|
|
245
|
+
tf = None
|
|
246
|
+
try:
|
|
247
|
+
import onnxruntime
|
|
248
|
+
except ImportError:
|
|
249
|
+
onnxruntime = None
|
|
250
|
+
|
|
251
|
+
if not keras and not onnxruntime:
|
|
252
|
+
raise AssertionError(err_message)
|
|
253
|
+
|
|
254
|
+
is_keras_model = (
|
|
255
|
+
bool(keras and isinstance(result, getattr(keras, "Model", tuple())))
|
|
256
|
+
or bool(tf and isinstance(result, getattr(tf.keras, "Model", tuple())))
|
|
257
|
+
)
|
|
258
|
+
is_onnx_model = bool(onnxruntime and isinstance(result, onnxruntime.InferenceSession))
|
|
259
|
+
|
|
260
|
+
if not any([is_keras_model, is_onnx_model]):
|
|
261
|
+
raise AssertionError( err_message)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
|
|
105
265
|
def decorating_function(load_model_func):
|
|
106
266
|
class TempMapping:
|
|
107
267
|
pass
|
|
108
268
|
|
|
109
269
|
@lru_cache()
|
|
110
|
-
def inner():
|
|
270
|
+
def inner(*args, **kwargs):
|
|
271
|
+
validate_args_structure(*args, types_order=[],
|
|
272
|
+
func_name='tensorleap_load_model',expected_names=[],**kwargs)
|
|
111
273
|
class ModelPlaceholder:
|
|
112
274
|
def __init__(self):
|
|
113
|
-
self.model = load_model_func()
|
|
114
|
-
|
|
115
|
-
try:
|
|
116
|
-
emit_integration_event_once(AnalyticsEvent.LOAD_MODEL_INTEGRATION_TEST, {
|
|
117
|
-
'prediction_types_count': len(prediction_types)
|
|
118
|
-
})
|
|
119
|
-
except Exception as e:
|
|
120
|
-
logger.debug(f"Failed to emit load_model integration test event: {e}")
|
|
275
|
+
self.model = load_model_func() #TODO- check why this fails on onnx model
|
|
276
|
+
_validate_result(self.model)
|
|
121
277
|
|
|
122
278
|
# keras interface
|
|
123
279
|
def __call__(self, arg):
|
|
124
280
|
ret = self.model(arg)
|
|
125
|
-
if isinstance(ret, list):
|
|
126
|
-
return [r.numpy() for r in ret]
|
|
127
|
-
|
|
128
281
|
return ret.numpy()
|
|
129
282
|
|
|
130
283
|
def _convert_onnx_inputs_to_correct_type(
|
|
@@ -184,8 +337,10 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
|
|
|
184
337
|
|
|
185
338
|
def get_inputs(self):
|
|
186
339
|
return self.model.get_inputs()
|
|
187
|
-
|
|
188
|
-
|
|
340
|
+
model_placeholder=ModelPlaceholder()
|
|
341
|
+
if not _call_from_tl_platform:
|
|
342
|
+
update_env_params_func("tensorleap_load_model", "v")
|
|
343
|
+
return model_placeholder
|
|
189
344
|
|
|
190
345
|
def mapping_inner():
|
|
191
346
|
class ModelOutputPlaceholder:
|
|
@@ -248,12 +403,11 @@ def tensorleap_load_model(prediction_types: Optional[List[PredictionTypeHandler]
|
|
|
248
403
|
|
|
249
404
|
return ModelPlaceholder()
|
|
250
405
|
|
|
251
|
-
def final_inner():
|
|
406
|
+
def final_inner(*args, **kwargs):
|
|
252
407
|
if os.environ.get(mapping_runtime_mode_env_var_mame):
|
|
253
408
|
return mapping_inner()
|
|
254
409
|
else:
|
|
255
|
-
return inner()
|
|
256
|
-
|
|
410
|
+
return inner(*args, **kwargs)
|
|
257
411
|
return final_inner
|
|
258
412
|
|
|
259
413
|
return decorating_function
|
|
@@ -264,81 +418,168 @@ def tensorleap_custom_metric(name: str,
|
|
|
264
418
|
compute_insights: Optional[Union[bool, Dict[str, bool]]] = None,
|
|
265
419
|
connects_to=None):
|
|
266
420
|
name_to_unique_name = defaultdict(set)
|
|
267
|
-
|
|
268
421
|
def decorating_function(
|
|
269
422
|
user_function: Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs,
|
|
270
423
|
ConfusionMatrixCallableInterfaceMultiArgs]):
|
|
424
|
+
|
|
425
|
+
def _validate_decorators_signature():
|
|
426
|
+
err_message = f"{user_function.__name__} validation failed.\n"
|
|
427
|
+
if not isinstance(name, str):
|
|
428
|
+
raise TypeError(err_message + f"`name` must be a string, got type {type(name).__name__}.")
|
|
429
|
+
valid_directions = {MetricDirection.Upward, MetricDirection.Downward}
|
|
430
|
+
if isinstance(direction, MetricDirection):
|
|
431
|
+
if direction not in valid_directions:
|
|
432
|
+
raise ValueError(
|
|
433
|
+
err_message +
|
|
434
|
+
f"Invalid MetricDirection: {direction}. Must be one of {valid_directions}, "
|
|
435
|
+
f"got type {type(direction).__name__}."
|
|
436
|
+
)
|
|
437
|
+
elif isinstance(direction, dict):
|
|
438
|
+
if not all(isinstance(k, str) for k in direction.keys()):
|
|
439
|
+
invalid_keys = {k: type(k).__name__ for k in direction.keys() if not isinstance(k, str)}
|
|
440
|
+
raise TypeError(
|
|
441
|
+
err_message +
|
|
442
|
+
f"All keys in `direction` must be strings, got invalid key types: {invalid_keys}."
|
|
443
|
+
)
|
|
444
|
+
for k, v in direction.items():
|
|
445
|
+
if v not in valid_directions:
|
|
446
|
+
raise ValueError(
|
|
447
|
+
err_message +
|
|
448
|
+
f"Invalid direction for key '{k}': {v}. Must be one of {valid_directions}, "
|
|
449
|
+
f"got type {type(v).__name__}."
|
|
450
|
+
)
|
|
451
|
+
else:
|
|
452
|
+
raise TypeError(
|
|
453
|
+
err_message +
|
|
454
|
+
f"`direction` must be a MetricDirection or a Dict[str, MetricDirection], "
|
|
455
|
+
f"got type {type(direction).__name__}."
|
|
456
|
+
)
|
|
457
|
+
if compute_insights is not None:
|
|
458
|
+
if not isinstance(compute_insights, (bool, dict)):
|
|
459
|
+
raise TypeError(
|
|
460
|
+
err_message +
|
|
461
|
+
f"`compute_insights` must be a bool or a Dict[str, bool], "
|
|
462
|
+
f"got type {type(compute_insights).__name__}."
|
|
463
|
+
)
|
|
464
|
+
if isinstance(compute_insights, dict):
|
|
465
|
+
if not all(isinstance(k, str) for k in compute_insights.keys()):
|
|
466
|
+
invalid_keys = {k: type(k).__name__ for k in compute_insights.keys() if not isinstance(k, str)}
|
|
467
|
+
raise TypeError(
|
|
468
|
+
err_message +
|
|
469
|
+
f"All keys in `compute_insights` must be strings, got invalid key types: {invalid_keys}."
|
|
470
|
+
)
|
|
471
|
+
for k, v in compute_insights.items():
|
|
472
|
+
if not isinstance(v, bool):
|
|
473
|
+
raise TypeError(
|
|
474
|
+
err_message +
|
|
475
|
+
f"Invalid type for compute_insights['{k}']: expected bool, got type {type(v).__name__}."
|
|
476
|
+
)
|
|
477
|
+
if connects_to is not None:
|
|
478
|
+
valid_types = (str, list, tuple, set)
|
|
479
|
+
if not isinstance(connects_to, valid_types):
|
|
480
|
+
raise TypeError(
|
|
481
|
+
err_message +
|
|
482
|
+
f"`connects_to` must be one of {valid_types}, got type {type(connects_to).__name__}."
|
|
483
|
+
)
|
|
484
|
+
if isinstance(connects_to, (list, tuple, set)):
|
|
485
|
+
invalid_elems = [f"{type(e).__name__}" for e in connects_to if not isinstance(e, str)]
|
|
486
|
+
if invalid_elems:
|
|
487
|
+
raise TypeError(
|
|
488
|
+
err_message +
|
|
489
|
+
f"All elements in `connects_to` must be strings, "
|
|
490
|
+
f"but found element types: {invalid_elems}."
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
_validate_decorators_signature()
|
|
495
|
+
|
|
271
496
|
for metric_handler in leap_binder.setup_container.metrics:
|
|
272
497
|
if metric_handler.metric_handler_data.name == name:
|
|
273
498
|
raise Exception(f'Metric with name {name} already exists. '
|
|
274
499
|
f'Please choose another')
|
|
275
500
|
|
|
276
501
|
def _validate_input_args(*args, **kwargs) -> None:
|
|
502
|
+
assert len(args)+len(kwargs) > 0, (
|
|
503
|
+
f"{user_function.__name__}() validation failed: "
|
|
504
|
+
f"Expected at least one positional|key-word argument of type np.ndarray, "
|
|
505
|
+
f"but received none. "
|
|
506
|
+
f"Correct usage example: tensorleap_custom_metric(input_array: np.ndarray, ...)"
|
|
507
|
+
)
|
|
277
508
|
for i, arg in enumerate(args):
|
|
278
509
|
assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
|
|
279
|
-
f'
|
|
510
|
+
f'{user_function.__name__}() validation failed: '
|
|
280
511
|
f'Argument #{i} should be a numpy array. Got {type(arg)}.')
|
|
281
512
|
if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
|
|
282
513
|
assert arg.shape[0] == leap_binder.batch_size_to_validate, \
|
|
283
|
-
(f'
|
|
514
|
+
(f'{user_function.__name__}() validation failed: Argument #{i} '
|
|
284
515
|
f'first dim should be as the batch size. Got {arg.shape[0]} '
|
|
285
516
|
f'instead of {leap_binder.batch_size_to_validate}')
|
|
286
517
|
|
|
287
518
|
for _arg_name, arg in kwargs.items():
|
|
288
519
|
assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
|
|
289
|
-
f'
|
|
520
|
+
f'{user_function.__name__}() validation failed: '
|
|
290
521
|
f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
|
|
291
522
|
if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
|
|
292
523
|
assert arg.shape[0] == leap_binder.batch_size_to_validate, \
|
|
293
|
-
(f'
|
|
524
|
+
(f'{user_function.__name__}() validation failed: Argument {_arg_name} '
|
|
294
525
|
f'first dim should be as the batch size. Got {arg.shape[0]} '
|
|
295
526
|
f'instead of {leap_binder.batch_size_to_validate}')
|
|
296
527
|
|
|
297
528
|
def _validate_result(result) -> None:
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
529
|
+
validate_output_structure(result, func_name=user_function.__name__,
|
|
530
|
+
expected_type_name="List[float | int | None | List[ConfusionMatrixElement] ] | NDArray[np.float32] or dictonary with one of these types as its values types")
|
|
531
|
+
supported_types_message = (f'{user_function.__name__}() validation failed: '
|
|
532
|
+
f'{user_function.__name__}() has returned unsupported type.\nSupported types are List[float|int|None], '
|
|
533
|
+
f'List[List[ConfusionMatrixElement]], NDArray[np.float32] or dictonary with one of these types as its values types. ')
|
|
301
534
|
|
|
302
|
-
def _validate_single_metric(single_metric_result):
|
|
535
|
+
def _validate_single_metric(single_metric_result,key=None):
|
|
303
536
|
if isinstance(single_metric_result, list):
|
|
304
537
|
if isinstance(single_metric_result[0], list):
|
|
305
|
-
assert isinstance(single_metric_result[0]
|
|
306
|
-
f
|
|
538
|
+
assert all(isinstance(cm, ConfusionMatrixElement) for cm in single_metric_result[0]), (
|
|
539
|
+
f"{supported_types_message} "
|
|
540
|
+
f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
|
|
541
|
+
f"List[List[{', '.join(type(cm).__name__ for cm in single_metric_result[0])}]]."
|
|
542
|
+
)
|
|
543
|
+
|
|
307
544
|
else:
|
|
308
|
-
assert isinstance(single_metric_result
|
|
309
|
-
|
|
310
|
-
|
|
545
|
+
assert all(isinstance(v, (float,int,type(None),np.float32)) for v in single_metric_result), (
|
|
546
|
+
f"{supported_types_message}\n"
|
|
547
|
+
f"Got {'a dict where the value of ' + str(key) + ' is of type ' if key is not None else ''}"
|
|
548
|
+
f"List[{', '.join(type(v).__name__ for v in single_metric_result)}]."
|
|
549
|
+
)
|
|
311
550
|
else:
|
|
312
551
|
assert isinstance(single_metric_result,
|
|
313
|
-
np.ndarray), f'{supported_types_message}
|
|
314
|
-
assert len(single_metric_result.shape) == 1, (f'
|
|
552
|
+
np.ndarray), f'{supported_types_message}\nGot {type(single_metric_result)}.'
|
|
553
|
+
assert len(single_metric_result.shape) == 1, (f'{user_function.__name__}() validation failed: '
|
|
315
554
|
f'The return shape should be 1D. Got {len(single_metric_result.shape)}D.')
|
|
316
555
|
|
|
317
556
|
if leap_binder.batch_size_to_validate:
|
|
318
557
|
assert len(single_metric_result) == leap_binder.batch_size_to_validate, \
|
|
319
|
-
f'
|
|
558
|
+
f'{user_function.__name__}() validation failed: The return len {f"of srt{key} value" if key is not None else ""} should be as the batch size.'
|
|
320
559
|
|
|
321
560
|
if isinstance(result, dict):
|
|
322
561
|
for key, value in result.items():
|
|
562
|
+
_validate_single_metric(value,key)
|
|
563
|
+
|
|
323
564
|
assert isinstance(key, str), \
|
|
324
|
-
(f'
|
|
565
|
+
(f'{user_function.__name__}() validation failed: '
|
|
325
566
|
f'Keys in the return dict should be of type str. Got {type(key)}.')
|
|
326
567
|
_validate_single_metric(value)
|
|
327
568
|
|
|
328
569
|
if isinstance(direction, dict):
|
|
329
570
|
for direction_key in direction:
|
|
330
571
|
assert direction_key in result, \
|
|
331
|
-
(f'
|
|
572
|
+
(f'{user_function.__name__}() validation failed: '
|
|
332
573
|
f'Keys in the direction mapping should be part of result keys. Got key {direction_key}.')
|
|
333
574
|
|
|
334
575
|
if compute_insights is not None:
|
|
335
576
|
assert isinstance(compute_insights, dict), \
|
|
336
|
-
(f'
|
|
577
|
+
(f'{user_function.__name__}() validation failed: '
|
|
337
578
|
f'compute_insights should be dict if using the dict results. Got {type(compute_insights)}.')
|
|
338
579
|
|
|
339
580
|
for ci_key in compute_insights:
|
|
340
581
|
assert ci_key in result, \
|
|
341
|
-
(f'
|
|
582
|
+
(f'{user_function.__name__}() validation failed: '
|
|
342
583
|
f'Keys in the compute_insights mapping should be part of result keys. Got key {ci_key}.')
|
|
343
584
|
|
|
344
585
|
else:
|
|
@@ -346,7 +587,7 @@ def tensorleap_custom_metric(name: str,
|
|
|
346
587
|
|
|
347
588
|
if compute_insights is not None:
|
|
348
589
|
assert isinstance(compute_insights, bool), \
|
|
349
|
-
(f'
|
|
590
|
+
(f'{user_function.__name__}() validation failed: '
|
|
350
591
|
f'compute_insights should be boolean. Got {type(compute_insights)}.')
|
|
351
592
|
|
|
352
593
|
@functools.wraps(user_function)
|
|
@@ -378,6 +619,8 @@ def tensorleap_custom_metric(name: str,
|
|
|
378
619
|
result = inner_without_validate(*args, **kwargs)
|
|
379
620
|
|
|
380
621
|
_validate_result(result)
|
|
622
|
+
if not _call_from_tl_platform:
|
|
623
|
+
update_env_params_func("tensorleap_custom_metric","v")
|
|
381
624
|
return result
|
|
382
625
|
|
|
383
626
|
def mapping_inner(*args, **kwargs):
|
|
@@ -417,28 +660,38 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
|
|
|
417
660
|
name_to_unique_name = defaultdict(set)
|
|
418
661
|
|
|
419
662
|
def decorating_function(user_function: VisualizerCallableInterface):
|
|
663
|
+
assert isinstance(visualizer_type,LeapDataType),(f"{user_function.__name__} validation failed: "
|
|
664
|
+
f"visualizer_type should be of type {LeapDataType.__name__} but got {type(visualizer_type)}"
|
|
665
|
+
)
|
|
666
|
+
|
|
420
667
|
for viz_handler in leap_binder.setup_container.visualizers:
|
|
421
668
|
if viz_handler.visualizer_handler_data.name == name:
|
|
422
669
|
raise Exception(f'Visualizer with name {name} already exists. '
|
|
423
670
|
f'Please choose another')
|
|
424
671
|
|
|
425
672
|
def _validate_input_args(*args, **kwargs):
|
|
673
|
+
assert len(args) + len(kwargs) > 0, (
|
|
674
|
+
f"{user_function.__name__}() validation failed: "
|
|
675
|
+
f"Expected at least one positional|key-word argument of type np.ndarray, "
|
|
676
|
+
f"but received none. "
|
|
677
|
+
f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
|
|
678
|
+
)
|
|
426
679
|
for i, arg in enumerate(args):
|
|
427
680
|
assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
|
|
428
|
-
f'
|
|
681
|
+
f'{user_function.__name__}() validation failed: '
|
|
429
682
|
f'Argument #{i} should be a numpy array. Got {type(arg)}.')
|
|
430
683
|
if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
|
|
431
684
|
assert arg.shape[0] != leap_binder.batch_size_to_validate, \
|
|
432
|
-
(f'
|
|
685
|
+
(f'{user_function.__name__}() validation failed: '
|
|
433
686
|
f'Argument #{i} should be without batch dimension. ')
|
|
434
687
|
|
|
435
688
|
for _arg_name, arg in kwargs.items():
|
|
436
689
|
assert isinstance(arg, (np.ndarray, SamplePreprocessResponse)), (
|
|
437
|
-
f'
|
|
690
|
+
f'{user_function.__name__}() validation failed: '
|
|
438
691
|
f'Argument {_arg_name} should be a numpy array. Got {type(arg)}.')
|
|
439
692
|
if leap_binder.batch_size_to_validate and isinstance(arg, np.ndarray):
|
|
440
693
|
assert arg.shape[0] != leap_binder.batch_size_to_validate, \
|
|
441
|
-
(f'
|
|
694
|
+
(f'{user_function.__name__}() validation failed: Argument {_arg_name} '
|
|
442
695
|
f'should be without batch dimension. ')
|
|
443
696
|
|
|
444
697
|
def _validate_result(result):
|
|
@@ -452,8 +705,11 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
|
|
|
452
705
|
LeapDataType.ImageWithBBox: LeapImageWithBBox,
|
|
453
706
|
LeapDataType.ImageWithHeatmap: LeapImageWithHeatmap
|
|
454
707
|
}
|
|
708
|
+
validate_output_structure(result, func_name=user_function.__name__,
|
|
709
|
+
expected_type_name=result_type_map[visualizer_type])
|
|
710
|
+
|
|
455
711
|
assert isinstance(result, result_type_map[visualizer_type]), \
|
|
456
|
-
(f'
|
|
712
|
+
(f'{user_function.__name__}() validation failed: '
|
|
457
713
|
f'The return type should be {result_type_map[visualizer_type]}. Got {type(result)}.')
|
|
458
714
|
|
|
459
715
|
@functools.wraps(user_function)
|
|
@@ -485,6 +741,8 @@ def tensorleap_custom_visualizer(name: str, visualizer_type: LeapDataType,
|
|
|
485
741
|
result = inner_without_validate(*args, **kwargs)
|
|
486
742
|
|
|
487
743
|
_validate_result(result)
|
|
744
|
+
if not _call_from_tl_platform:
|
|
745
|
+
update_env_params_func("tensorleap_custom_visualizer","v")
|
|
488
746
|
return result
|
|
489
747
|
|
|
490
748
|
def mapping_inner(*args, **kwargs):
|
|
@@ -526,30 +784,26 @@ def tensorleap_metadata(
|
|
|
526
784
|
f'Please choose another')
|
|
527
785
|
|
|
528
786
|
def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
|
|
529
|
-
assert isinstance(sample_id, (int, str)), \
|
|
530
|
-
(f'tensorleap_metadata validation failed: '
|
|
531
|
-
f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
|
|
532
|
-
assert isinstance(preprocess_response, PreprocessResponse), \
|
|
533
|
-
(f'tensorleap_metadata validation failed: '
|
|
534
|
-
f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
|
|
535
787
|
assert type(sample_id) == preprocess_response.sample_id_type, \
|
|
536
|
-
(f'
|
|
788
|
+
(f'{user_function.__name__}() validation failed: '
|
|
537
789
|
f'Argument sample_id should be as the same type as defined in the preprocess response '
|
|
538
790
|
f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
|
|
539
791
|
|
|
540
792
|
def _validate_result(result):
|
|
541
793
|
supported_result_types = (type(None), int, str, bool, float, dict, np.floating,
|
|
542
794
|
np.bool_, np.unsignedinteger, np.signedinteger, np.integer)
|
|
795
|
+
validate_output_structure(result, func_name=user_function.__name__,
|
|
796
|
+
expected_type_name=supported_result_types)
|
|
543
797
|
assert isinstance(result, supported_result_types), \
|
|
544
|
-
(f'
|
|
798
|
+
(f'{user_function.__name__}() validation failed: '
|
|
545
799
|
f'Unsupported return type. Got {type(result)}. should be any of {str(supported_result_types)}')
|
|
546
800
|
if isinstance(result, dict):
|
|
547
801
|
for key, value in result.items():
|
|
548
802
|
assert isinstance(key, str), \
|
|
549
|
-
(f'
|
|
803
|
+
(f'{user_function.__name__}() validation failed: '
|
|
550
804
|
f'Keys in the return dict should be of type str. Got {type(key)}.')
|
|
551
805
|
assert isinstance(value, supported_result_types), \
|
|
552
|
-
(f'
|
|
806
|
+
(f'{user_function.__name__}() validation failed: '
|
|
553
807
|
f'Values in the return dict should be of type {str(supported_result_types)}. Got {type(value)}.')
|
|
554
808
|
|
|
555
809
|
def inner_without_validate(sample_id, preprocess_response):
|
|
@@ -566,15 +820,19 @@ def tensorleap_metadata(
|
|
|
566
820
|
|
|
567
821
|
leap_binder.set_metadata(inner_without_validate, name, metadata_type)
|
|
568
822
|
|
|
569
|
-
def inner(
|
|
823
|
+
def inner(*args,**kwargs):
|
|
570
824
|
if os.environ.get(mapping_runtime_mode_env_var_mame):
|
|
571
825
|
return None
|
|
572
|
-
|
|
826
|
+
validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
|
|
827
|
+
func_name=user_function.__name__, expected_names=["idx", "preprocess"],**kwargs)
|
|
828
|
+
sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
|
|
573
829
|
_validate_input_args(sample_id, preprocess_response)
|
|
574
830
|
|
|
575
831
|
result = inner_without_validate(sample_id, preprocess_response)
|
|
576
832
|
|
|
577
833
|
_validate_result(result)
|
|
834
|
+
if not _call_from_tl_platform:
|
|
835
|
+
update_env_params_func("tensorleap_metadata","v")
|
|
578
836
|
return result
|
|
579
837
|
|
|
580
838
|
return inner
|
|
@@ -636,20 +894,24 @@ def tensorleap_preprocess():
|
|
|
636
894
|
leap_binder.set_preprocess(user_function)
|
|
637
895
|
|
|
638
896
|
def _validate_input_args(*args, **kwargs):
|
|
639
|
-
assert len(args)
|
|
640
|
-
(f'
|
|
897
|
+
assert len(args) + len(kwargs) == 0, \
|
|
898
|
+
(f'{user_function.__name__}() validation failed: '
|
|
641
899
|
f'The function should not take any arguments. Got {args} and {kwargs}.')
|
|
642
900
|
|
|
643
901
|
def _validate_result(result):
|
|
644
|
-
assert isinstance(result, list),
|
|
645
|
-
(
|
|
646
|
-
|
|
902
|
+
assert isinstance(result, list), (
|
|
903
|
+
f"{user_function.__name__}() validation failed: expected return type list[{PreprocessResponse.__name__}]"
|
|
904
|
+
f"(e.g., [PreprocessResponse1, PreprocessResponse2, ...]), but returned type is {type(result).__name__}."
|
|
905
|
+
if not isinstance(result, tuple)
|
|
906
|
+
else f"{user_function.__name__}() validation failed: expected to return a single list[{PreprocessResponse.__name__}] object, "
|
|
907
|
+
f"but returned {len(result)} objects instead."
|
|
908
|
+
)
|
|
647
909
|
for i, response in enumerate(result):
|
|
648
910
|
assert isinstance(response, PreprocessResponse), \
|
|
649
|
-
(f'
|
|
911
|
+
(f'{user_function.__name__}() validation failed: '
|
|
650
912
|
f'Element #{i} in the return list should be a PreprocessResponse. Got {type(response)}.')
|
|
651
913
|
assert len(set(result)) == len(result), \
|
|
652
|
-
(f'
|
|
914
|
+
(f'{user_function.__name__}() validation failed: '
|
|
653
915
|
f'The return list should not contain duplicate PreprocessResponse objects.')
|
|
654
916
|
|
|
655
917
|
def inner(*args, **kwargs):
|
|
@@ -657,7 +919,6 @@ def tensorleap_preprocess():
|
|
|
657
919
|
return [None, None, None, None]
|
|
658
920
|
|
|
659
921
|
_validate_input_args(*args, **kwargs)
|
|
660
|
-
|
|
661
922
|
result = user_function()
|
|
662
923
|
_validate_result(result)
|
|
663
924
|
|
|
@@ -668,7 +929,8 @@ def tensorleap_preprocess():
|
|
|
668
929
|
})
|
|
669
930
|
except Exception as e:
|
|
670
931
|
logger.debug(f"Failed to emit preprocess integration test event: {e}")
|
|
671
|
-
|
|
932
|
+
if not _call_from_tl_platform:
|
|
933
|
+
update_env_params_func("tensorleap_preprocess", "v")
|
|
672
934
|
return result
|
|
673
935
|
|
|
674
936
|
return inner
|
|
@@ -867,29 +1129,23 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
|
|
|
867
1129
|
raise Exception(f"Channel dim for input {name} is expected to be either -1 or positive")
|
|
868
1130
|
|
|
869
1131
|
def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
|
|
870
|
-
assert isinstance(sample_id, (int, str)), \
|
|
871
|
-
(f'tensorleap_input_encoder validation failed: '
|
|
872
|
-
f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
|
|
873
|
-
assert isinstance(preprocess_response, PreprocessResponse), \
|
|
874
|
-
(f'tensorleap_input_encoder validation failed: '
|
|
875
|
-
f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
|
|
876
1132
|
assert type(sample_id) == preprocess_response.sample_id_type, \
|
|
877
|
-
(f'
|
|
1133
|
+
(f'{user_function.__name__}() validation failed: '
|
|
878
1134
|
f'Argument sample_id should be as the same type as defined in the preprocess response '
|
|
879
1135
|
f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
|
|
880
1136
|
|
|
881
1137
|
def _validate_result(result):
|
|
1138
|
+
validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray")
|
|
882
1139
|
assert isinstance(result, np.ndarray), \
|
|
883
|
-
(f'
|
|
1140
|
+
(f'{user_function.__name__}() validation failed: '
|
|
884
1141
|
f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
|
|
885
1142
|
assert result.dtype == np.float32, \
|
|
886
|
-
(f'
|
|
1143
|
+
(f'{user_function.__name__}() validation failed: '
|
|
887
1144
|
f'The return type should be a numpy array of type float32. Got {result.dtype}.')
|
|
888
|
-
assert channel_dim - 1 <= len(result.shape), (f'
|
|
1145
|
+
assert channel_dim - 1 <= len(result.shape), (f'{user_function.__name__}() validation failed: '
|
|
889
1146
|
f'The channel_dim ({channel_dim}) should be <= to the rank of the resulting input rank ({len(result.shape)}).')
|
|
890
1147
|
|
|
891
1148
|
def inner_without_validate(sample_id, preprocess_response):
|
|
892
|
-
|
|
893
1149
|
global _called_from_inside_tl_decorator
|
|
894
1150
|
_called_from_inside_tl_decorator += 1
|
|
895
1151
|
|
|
@@ -903,7 +1159,10 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
|
|
|
903
1159
|
leap_binder.set_input(inner_without_validate, name, channel_dim=channel_dim)
|
|
904
1160
|
|
|
905
1161
|
|
|
906
|
-
def inner(
|
|
1162
|
+
def inner(*args, **kwargs):
|
|
1163
|
+
validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
|
|
1164
|
+
func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
|
|
1165
|
+
sample_id, preprocess_response = args if len(args)!=0 else kwargs.values()
|
|
907
1166
|
_validate_input_args(sample_id, preprocess_response)
|
|
908
1167
|
|
|
909
1168
|
result = inner_without_validate(sample_id, preprocess_response)
|
|
@@ -911,6 +1170,7 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
|
|
|
911
1170
|
_validate_result(result)
|
|
912
1171
|
|
|
913
1172
|
if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
|
|
1173
|
+
batch_warning(result,user_function.__name__)
|
|
914
1174
|
result = np.expand_dims(result, axis=0)
|
|
915
1175
|
# Emit integration test event once per test
|
|
916
1176
|
try:
|
|
@@ -921,17 +1181,18 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
|
|
|
921
1181
|
})
|
|
922
1182
|
except Exception as e:
|
|
923
1183
|
logger.debug(f"Failed to emit input_encoder integration test event: {e}")
|
|
1184
|
+
if not _call_from_tl_platform:
|
|
1185
|
+
update_env_params_func("tensorleap_input_encoder", "v")
|
|
924
1186
|
|
|
925
1187
|
return result
|
|
926
1188
|
|
|
927
1189
|
|
|
928
|
-
|
|
929
1190
|
node_mapping_type = NodeMappingType.Input
|
|
930
1191
|
if model_input_index is not None:
|
|
931
1192
|
node_mapping_type = NodeMappingType(f'Input{str(model_input_index)}')
|
|
932
1193
|
inner.node_mapping = NodeMapping(name, node_mapping_type)
|
|
933
1194
|
|
|
934
|
-
def mapping_inner(
|
|
1195
|
+
def mapping_inner(*args, **kwargs):
|
|
935
1196
|
class TempMapping:
|
|
936
1197
|
pass
|
|
937
1198
|
|
|
@@ -943,11 +1204,11 @@ def tensorleap_input_encoder(name: str, channel_dim=-1, model_input_index=None):
|
|
|
943
1204
|
|
|
944
1205
|
mapping_inner.node_mapping = NodeMapping(name, node_mapping_type)
|
|
945
1206
|
|
|
946
|
-
def final_inner(
|
|
1207
|
+
def final_inner(*args, **kwargs):
|
|
947
1208
|
if os.environ.get(mapping_runtime_mode_env_var_mame):
|
|
948
|
-
return mapping_inner(
|
|
1209
|
+
return mapping_inner(*args, **kwargs)
|
|
949
1210
|
else:
|
|
950
|
-
return inner(
|
|
1211
|
+
return inner(*args, **kwargs)
|
|
951
1212
|
|
|
952
1213
|
final_inner.node_mapping = NodeMapping(name, node_mapping_type)
|
|
953
1214
|
|
|
@@ -964,23 +1225,18 @@ def tensorleap_gt_encoder(name: str):
|
|
|
964
1225
|
f'Please choose another')
|
|
965
1226
|
|
|
966
1227
|
def _validate_input_args(sample_id: Union[int, str], preprocess_response: PreprocessResponse):
|
|
967
|
-
assert isinstance(sample_id, (int, str)), \
|
|
968
|
-
(f'tensorleap_gt_encoder validation failed: '
|
|
969
|
-
f'Argument sample_id should be either int or str. Got {type(sample_id)}.')
|
|
970
|
-
assert isinstance(preprocess_response, PreprocessResponse), \
|
|
971
|
-
(f'tensorleap_gt_encoder validation failed: '
|
|
972
|
-
f'Argument preprocess_response should be a PreprocessResponse. Got {type(preprocess_response)}.')
|
|
973
1228
|
assert type(sample_id) == preprocess_response.sample_id_type, \
|
|
974
|
-
(f'
|
|
1229
|
+
(f'{user_function.__name__}() validation failed: '
|
|
975
1230
|
f'Argument sample_id should be as the same type as defined in the preprocess response '
|
|
976
1231
|
f'{preprocess_response.sample_id_type}. Got {type(sample_id)}.')
|
|
977
1232
|
|
|
978
1233
|
def _validate_result(result):
|
|
1234
|
+
validate_output_structure(result, func_name=user_function.__name__, expected_type_name = "np.ndarray",gt_flag=True)
|
|
979
1235
|
assert isinstance(result, np.ndarray), \
|
|
980
|
-
(f'
|
|
1236
|
+
(f'{user_function.__name__}() validation failed: '
|
|
981
1237
|
f'Unsupported return type. Should be a numpy array. Got {type(result)}.')
|
|
982
1238
|
assert result.dtype == np.float32, \
|
|
983
|
-
(f'
|
|
1239
|
+
(f'{user_function.__name__}() validation failed: '
|
|
984
1240
|
f'The return type should be a numpy array of type float32. Got {result.dtype}.')
|
|
985
1241
|
|
|
986
1242
|
def inner_without_validate(sample_id, preprocess_response):
|
|
@@ -997,7 +1253,10 @@ def tensorleap_gt_encoder(name: str):
|
|
|
997
1253
|
leap_binder.set_ground_truth(inner_without_validate, name)
|
|
998
1254
|
|
|
999
1255
|
|
|
1000
|
-
def inner(
|
|
1256
|
+
def inner(*args, **kwargs):
|
|
1257
|
+
validate_args_structure(*args, types_order=[Union[int, str], PreprocessResponse],
|
|
1258
|
+
func_name=user_function.__name__, expected_names=["idx", "preprocess"], **kwargs)
|
|
1259
|
+
sample_id, preprocess_response = args
|
|
1001
1260
|
_validate_input_args(sample_id, preprocess_response)
|
|
1002
1261
|
|
|
1003
1262
|
result = inner_without_validate(sample_id, preprocess_response)
|
|
@@ -1005,6 +1264,7 @@ def tensorleap_gt_encoder(name: str):
|
|
|
1005
1264
|
_validate_result(result)
|
|
1006
1265
|
|
|
1007
1266
|
if _called_from_inside_tl_decorator == 0 and _called_from_inside_tl_integration_test_decorator:
|
|
1267
|
+
batch_warning(result, user_function.__name__)
|
|
1008
1268
|
result = np.expand_dims(result, axis=0)
|
|
1009
1269
|
# Emit integration test event once per test
|
|
1010
1270
|
try:
|
|
@@ -1013,12 +1273,13 @@ def tensorleap_gt_encoder(name: str):
|
|
|
1013
1273
|
})
|
|
1014
1274
|
except Exception as e:
|
|
1015
1275
|
logger.debug(f"Failed to emit gt_encoder integration test event: {e}")
|
|
1016
|
-
|
|
1276
|
+
if not _call_from_tl_platform:
|
|
1277
|
+
update_env_params_func("tensorleap_gt_encoder", "v")
|
|
1017
1278
|
return result
|
|
1018
1279
|
|
|
1019
1280
|
inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
|
|
1020
1281
|
|
|
1021
|
-
def mapping_inner(
|
|
1282
|
+
def mapping_inner(*args, **kwargs):
|
|
1022
1283
|
class TempMapping:
|
|
1023
1284
|
pass
|
|
1024
1285
|
|
|
@@ -1029,11 +1290,11 @@ def tensorleap_gt_encoder(name: str):
|
|
|
1029
1290
|
|
|
1030
1291
|
mapping_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
|
|
1031
1292
|
|
|
1032
|
-
def final_inner(
|
|
1293
|
+
def final_inner(*args, **kwargs):
|
|
1033
1294
|
if os.environ.get(mapping_runtime_mode_env_var_mame):
|
|
1034
|
-
return mapping_inner(
|
|
1295
|
+
return mapping_inner(*args, **kwargs)
|
|
1035
1296
|
else:
|
|
1036
|
-
return inner(
|
|
1297
|
+
return inner(*args, **kwargs)
|
|
1037
1298
|
|
|
1038
1299
|
final_inner.node_mapping = NodeMapping(name, NodeMappingType.GroundTruth)
|
|
1039
1300
|
|
|
@@ -1054,28 +1315,37 @@ def tensorleap_custom_loss(name: str, connects_to=None):
|
|
|
1054
1315
|
valid_types = (np.ndarray, SamplePreprocessResponse)
|
|
1055
1316
|
|
|
1056
1317
|
def _validate_input_args(*args, **kwargs):
|
|
1318
|
+
assert len(args) + len(kwargs) > 0, (
|
|
1319
|
+
f"{user_function.__name__}() validation failed: "
|
|
1320
|
+
f"Expected at least one positional|key-word argument of the allowed types (np.ndarray|SamplePreprocessResponse|list(np.ndarray|SamplePreprocessResponse)). "
|
|
1321
|
+
f"but received none. "
|
|
1322
|
+
f"Correct usage example: {user_function.__name__}(input_array: np.ndarray, ...)"
|
|
1323
|
+
)
|
|
1057
1324
|
for i, arg in enumerate(args):
|
|
1058
1325
|
if isinstance(arg, list):
|
|
1059
1326
|
for y, elem in enumerate(arg):
|
|
1060
|
-
assert isinstance(elem, valid_types), (f'
|
|
1327
|
+
assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
|
|
1061
1328
|
f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
|
|
1062
1329
|
else:
|
|
1063
|
-
assert isinstance(arg, valid_types), (f'
|
|
1330
|
+
assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
|
|
1064
1331
|
f'Argument #{i} should be a numpy array. Got {type(arg)}.')
|
|
1065
1332
|
for _arg_name, arg in kwargs.items():
|
|
1066
1333
|
if isinstance(arg, list):
|
|
1067
1334
|
for y, elem in enumerate(arg):
|
|
1068
|
-
assert isinstance(elem, valid_types), (f'
|
|
1335
|
+
assert isinstance(elem, valid_types), (f'{user_function.__name__}() validation failed: '
|
|
1069
1336
|
f'Element #{y} of list should be a numpy array. Got {type(elem)}.')
|
|
1070
1337
|
else:
|
|
1071
|
-
assert isinstance(arg, valid_types), (f'
|
|
1338
|
+
assert isinstance(arg, valid_types), (f'{user_function.__name__}() validation failed: '
|
|
1072
1339
|
f'Argument #{_arg_name} should be a numpy array. Got {type(arg)}.')
|
|
1073
1340
|
|
|
1074
1341
|
def _validate_result(result):
|
|
1342
|
+
validate_output_structure(result, func_name=user_function.__name__,
|
|
1343
|
+
expected_type_name="np.ndarray")
|
|
1075
1344
|
assert isinstance(result, np.ndarray), \
|
|
1076
|
-
(f'
|
|
1345
|
+
(f'{user_function.__name__} validation failed: '
|
|
1077
1346
|
f'The return type should be a numpy array. Got {type(result)}.')
|
|
1078
|
-
|
|
1347
|
+
assert result.ndim<2 ,(f'{user_function.__name__} validation failed: '
|
|
1348
|
+
f'The return type should be a 1Dim numpy array but got {result.ndim}Dim.')
|
|
1079
1349
|
|
|
1080
1350
|
@functools.wraps(user_function)
|
|
1081
1351
|
def inner_without_validate(*args, **kwargs):
|
|
@@ -1106,6 +1376,9 @@ def tensorleap_custom_loss(name: str, connects_to=None):
|
|
|
1106
1376
|
result = inner_without_validate(*args, **kwargs)
|
|
1107
1377
|
|
|
1108
1378
|
_validate_result(result)
|
|
1379
|
+
if not _call_from_tl_platform:
|
|
1380
|
+
update_env_params_func("tensorleap_custom_loss", "v")
|
|
1381
|
+
|
|
1109
1382
|
return result
|
|
1110
1383
|
|
|
1111
1384
|
def mapping_inner(*args, **kwargs):
|
|
@@ -1162,3 +1435,93 @@ def tensorleap_custom_layer(name: str):
|
|
|
1162
1435
|
return custom_layer
|
|
1163
1436
|
|
|
1164
1437
|
return decorating_function
|
|
1438
|
+
|
|
1439
|
+
|
|
1440
|
+
def tensorleap_status_table():
|
|
1441
|
+
'''
|
|
1442
|
+
Usage example:
|
|
1443
|
+
###################
|
|
1444
|
+
leap_integration.py
|
|
1445
|
+
###################
|
|
1446
|
+
from code_loader.inner_leap_binder.leapbinder_decorators import tensorleap_status_table
|
|
1447
|
+
...
|
|
1448
|
+
...
|
|
1449
|
+
...
|
|
1450
|
+
if __name__ == '__main__':
|
|
1451
|
+
tensorleap_status_table()
|
|
1452
|
+
...
|
|
1453
|
+
'''
|
|
1454
|
+
import atexit
|
|
1455
|
+
import sys
|
|
1456
|
+
import traceback
|
|
1457
|
+
CHECK = "✅"
|
|
1458
|
+
CROSS = "❌"
|
|
1459
|
+
|
|
1460
|
+
table = [
|
|
1461
|
+
{"name": "tensorleap_preprocess", "Added to integration": CROSS},
|
|
1462
|
+
{"name": "tensorleap_integration_test", "Added to integration": CROSS},
|
|
1463
|
+
{"name": "tensorleap_input_encoder", "Added to integration": CROSS},
|
|
1464
|
+
{"name": "tensorleap_gt_encoder", "Added to integration": CROSS},
|
|
1465
|
+
{"name": "tensorleap_load_model", "Added to integration": CROSS},
|
|
1466
|
+
{"name": "tensorleap_custom_loss", "Added to integration": CROSS},
|
|
1467
|
+
{"name": "tensorleap_custom_metric (optional)", "Added to integration": CROSS},
|
|
1468
|
+
{"name": "tensorleap_metadata (optional)", "Added to integration": CROSS},
|
|
1469
|
+
{"name": "tensorleap_custom_visualizer (optional)", "Added to integration": CROSS},
|
|
1470
|
+
|
|
1471
|
+
]
|
|
1472
|
+
|
|
1473
|
+
_finalizer_called = {"done": False}
|
|
1474
|
+
|
|
1475
|
+
def _remove_suffix(s: str, suffix: str) -> str:
|
|
1476
|
+
#This is needed because str.remove_suffix was presented in python3.9+
|
|
1477
|
+
if suffix and s.endswith(suffix):
|
|
1478
|
+
return s[:-len(suffix)]
|
|
1479
|
+
return s
|
|
1480
|
+
|
|
1481
|
+
def _print_table():
|
|
1482
|
+
ready_mess = "\nAll parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system."
|
|
1483
|
+
not_ready_mess = "\nSome mandatory components have not yet been added to the Integration test. Recommended next interface to add is: "
|
|
1484
|
+
mandatory_ready_mess = "\nAll mandatory parts have been successfully set. If no errors accured, you can now push the project to the Tensorleap system or continue to the next optional reccomeded interface,adding: "
|
|
1485
|
+
|
|
1486
|
+
name_width = max(len(row["name"]) for row in table)
|
|
1487
|
+
status_width = max(len(row["Added to integration"]) for row in table)
|
|
1488
|
+
header = f"{'Decorator Name'.ljust(name_width)} | {'Added to integration'.ljust(status_width)}"
|
|
1489
|
+
sep = "-" * len(header)
|
|
1490
|
+
print("\n" + header)
|
|
1491
|
+
print(sep)
|
|
1492
|
+
ready=True
|
|
1493
|
+
for row in table:
|
|
1494
|
+
print(f"{row['name'].ljust(name_width)} | {row['Added to integration'].ljust(status_width)}")
|
|
1495
|
+
if row['Added to integration']==CROSS and ready:
|
|
1496
|
+
ready=False
|
|
1497
|
+
next_step=row['name']
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
print(ready_mess) if ready else print(mandatory_ready_mess+next_step) if "optional" in next_step else print(not_ready_mess+next_step)
|
|
1501
|
+
def update_env_params(name: str, status: str = "✓"):
|
|
1502
|
+
for row in table:
|
|
1503
|
+
if _remove_suffix(row["name"]," (optional)") == name:
|
|
1504
|
+
row["Added to integration"] = CHECK if status=="v" else CROSS
|
|
1505
|
+
break
|
|
1506
|
+
def run_on_exit():
|
|
1507
|
+
if _finalizer_called["done"]:
|
|
1508
|
+
return
|
|
1509
|
+
_finalizer_called["done"] = True
|
|
1510
|
+
_print_table()
|
|
1511
|
+
def handle_exception(exc_type, exc_value, exc_traceback):
|
|
1512
|
+
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
|
1513
|
+
run_on_exit()
|
|
1514
|
+
atexit.register(run_on_exit)
|
|
1515
|
+
sys.excepthook = handle_exception
|
|
1516
|
+
return update_env_params
|
|
1517
|
+
|
|
1518
|
+
|
|
1519
|
+
if not _call_from_tl_platform:
|
|
1520
|
+
update_env_params_func = tensorleap_status_table()
|
|
1521
|
+
|
|
1522
|
+
|
|
1523
|
+
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
|
|
1527
|
+
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/experiment_context.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{code_loader-1.0.144 → code_loader-1.0.145}/code_loader/experiment_api/workingspace_config_utils.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|