code-loader 1.0.41__tar.gz → 1.0.42__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 1.0.41
3
+ Version: 1.0.42
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -13,6 +13,25 @@ custom_latent_space_attribute = "custom_latent_space"
13
13
 
14
14
  @dataclass
15
15
  class PreprocessResponse:
16
+ """
17
+ An object that holds the preprocessed data for use within the Tensorleap platform.
18
+
19
+ This class is used to encapsulate the results of data preprocessing, including inputs, metadata, labels, and other relevant information.
20
+ It facilitates handling and integration of the processed data within Tensorleap.
21
+
22
+ Attributes:
23
+ length (int): The length of the preprocessed data.
24
+ data (Any): The preprocessed data itself. This can be any data type depending on the preprocessing logic.
25
+
26
+ Example:
27
+ # Example usage of PreprocessResponse
28
+ preprocessed_data = {
29
+ 'images': ['path/to/image1.jpg', 'path/to/image2.jpg'],
30
+ 'labels': ['SUV', 'truck'],
31
+ 'metadata': [{'id': 1, 'source': 'camera1'}, {'id': 2, 'source': 'camera2'}]
32
+ }
33
+ response = PreprocessResponse(length=len(preprocessed_data), data=preprocessed_data)
34
+ """
16
35
  length: int
17
36
  data: Any
18
37
 
@@ -95,7 +95,19 @@ class DatasetTestResultPayload:
95
95
 
96
96
  @dataclass
97
97
  class BoundingBox:
98
- # (x, y) is the center of the bounding box
98
+ """
99
+ Represents a bounding box for an object in an image.
100
+
101
+ Attributes:
102
+ x (float): The x-coordinate of the center of the bounding box, a value between [0, 1] representing the percentage according to the image width.
103
+ y (float): The y-coordinate of the center of the bounding box, a value between [0, 1] representing the percentage according to the image height.
104
+ width (float): The width of the bounding box, a value between [0, 1] representing the percentage according to the image width.
105
+ height (float): The height of the bounding box, a value between [0, 1] representing the percentage according to the image height.
106
+ confidence (float): The confidence score of the bounding box. For predictions, this is a score typically between [0, 1]. For ground truth data, this can be 1.
107
+ label (str): The label or class name associated with the bounding box.
108
+ rotation (float): The rotation of the bounding box, a value between [0, 360] representing the degree of rotation. Default is 0.0.
109
+ metadata (Optional[Dict[str, Union[str, int, float]]]): Optional metadata associated with the bounding box.
110
+ """
99
111
  x: float # value between [0, 1], represent the percentage according to the image size.
100
112
  y: float # value between [0, 1], represent the percentage according to the image size.
101
113
 
@@ -28,6 +28,13 @@ def validate_type(actual: Any, expected: Any, prefix_message: str = '') -> None:
28
28
 
29
29
  @dataclass
30
30
  class LeapImage:
31
+ """
32
+ Visualizer representing an image for Tensorleap.
33
+
34
+ Attributes:
35
+ data (npt.NDArray[np.float32] | npt.NDArray[np.uint8]): The image data.
36
+ type (LeapDataType): The data type, default is LeapDataType.Image.
37
+ """
31
38
  data: Union[npt.NDArray[np.float32], npt.NDArray[np.uint8]]
32
39
  type: LeapDataType = LeapDataType.Image
33
40
 
@@ -41,6 +48,14 @@ class LeapImage:
41
48
 
42
49
  @dataclass
43
50
  class LeapImageWithBBox:
51
+ """
52
+ Visualizer representing an image with bounding boxes for Tensorleap, used for object detection tasks.
53
+
54
+ Attributes:
55
+ data (npt.NDArray[np.float32] | npt.NDArray[np.uint8]): The image data, shaped [H, W, 3] or [H, W, 1].
56
+ bounding_boxes (List[BoundingBox]): List of Tensorleap bounding boxes objects in relative size to image size.
57
+ type (LeapDataType): The data type, default is LeapDataType.ImageWithBBox.
58
+ """
44
59
  data: Union[npt.NDArray[np.float32], npt.NDArray[np.uint8]]
45
60
  bounding_boxes: List[BoundingBox]
46
61
  type: LeapDataType = LeapDataType.ImageWithBBox
@@ -55,6 +70,13 @@ class LeapImageWithBBox:
55
70
 
56
71
  @dataclass
57
72
  class LeapGraph:
73
+ """
74
+ Visualizer representing a line chart data for Tensorleap.
75
+
76
+ Attributes:
77
+ data (npt.NDArray[np.float32]): The array data, shaped [M, N] where M is the number of data points and N is the number of variables.
78
+ type (LeapDataType): The data type, default is LeapDataType.Graph.
79
+ """
58
80
  data: npt.NDArray[np.float32]
59
81
  type: LeapDataType = LeapDataType.Graph
60
82
 
@@ -67,6 +89,14 @@ class LeapGraph:
67
89
 
68
90
  @dataclass
69
91
  class LeapText:
92
+ """
93
+ Visualizer representing text data for Tensorleap.
94
+
95
+ Attributes:
96
+ data (List[str]): The text data, consisting of a list of text tokens. If the model requires fixed-length inputs,
97
+ it is recommended to maintain the fixed length, using empty strings ('') instead of padding tokens ('PAD') e.g., ['I', 'ate', 'a', 'banana', '', '', '', ...]
98
+ type (LeapDataType): The data type, default is LeapDataType.Text.
99
+ """
70
100
  data: List[str]
71
101
  type: LeapDataType = LeapDataType.Text
72
102
 
@@ -79,6 +109,16 @@ class LeapText:
79
109
 
80
110
  @dataclass
81
111
  class LeapHorizontalBar:
112
+ """
113
+ Visualizer representing horizontal bar data for Tensorleap.
114
+ For example, this can be used to visualize the model's prediction scores in a classification problem.
115
+
116
+ Attributes:
117
+ body (npt.NDArray[np.float32]): The data for the bar, shaped [C], where C is the number of data points.
118
+ labels (List[str]): Labels for the horizontal bar; e.g., when visualizing the model's classification output, labels are the class names.
119
+ Length of `body` should match the length of `labels`, C.
120
+ type (LeapDataType): The data type, default is LeapDataType.HorizontalBar.
121
+ """
82
122
  body: npt.NDArray[np.float32]
83
123
  labels: List[str]
84
124
  type: LeapDataType = LeapDataType.HorizontalBar
@@ -96,6 +136,16 @@ class LeapHorizontalBar:
96
136
 
97
137
  @dataclass
98
138
  class LeapImageMask:
139
+ """
140
+ Visualizer representing an image with a mask for Tensorleap.
141
+ This can be used for tasks such as segmentation, and other applications where it is important to highlight specific regions within an image.
142
+
143
+ Attributes:
144
+ mask (npt.NDArray[np.uint8]): The mask data, shaped [H, W].
145
+ image (npt.NDArray[np.float32] | npt.NDArray[np.uint8]): The image data, shaped [H, W, 3] or shaped [H, W, 1].
146
+ labels (List[str]): Labels associated with the mask regions; e.g., class names for segmented objects. The length of `labels` should match the number of unique values in `mask`.
147
+ type (LeapDataType): The data type, default is LeapDataType.ImageMask.
148
+ """
99
149
  mask: npt.NDArray[np.uint8]
100
150
  image: Union[npt.NDArray[np.float32], npt.NDArray[np.uint8]]
101
151
  labels: List[str]
@@ -117,6 +167,16 @@ class LeapImageMask:
117
167
 
118
168
  @dataclass
119
169
  class LeapTextMask:
170
+ """
171
+ Visualizer representing text data with a mask for Tensorleap.
172
+ This can be used for tasks such as named entity recognition (NER), sentiment analysis, and other applications where it is important to highlight specific tokens or parts of the text.
173
+
174
+ Attributes:
175
+ mask (npt.NDArray[np.uint8]): The mask data, shaped [L].
176
+ text (List[str]): The text data, consisting of a list of text tokens, length of L.
177
+ labels (List[str]): Labels associated with the masked tokens; e.g., named entities or sentiment categories. The length of `labels` should match the number of unique values in `mask`.
178
+ type (LeapDataType): The data type, default is LeapDataType.TextMask.
179
+ """
120
180
  mask: npt.NDArray[np.uint8]
121
181
  text: List[str]
122
182
  labels: List[str]
@@ -137,6 +197,16 @@ class LeapTextMask:
137
197
 
138
198
  @dataclass
139
199
  class LeapImageWithHeatmap:
200
+ """
201
+ Visualizer representing an image with heatmaps for Tensorleap.
202
+ This can be used for tasks such as highlighting important regions in an image, visualizing attention maps, and other applications where it is important to overlay heatmaps on images.
203
+
204
+ Attributes:
205
+ image (npt.NDArray[np.float32]): The image data, shaped [H, W, C], where C is the number of channels.
206
+ heatmaps (npt.NDArray[np.float32]): The heatmap data, shaped [N, H, W], where N is the number of heatmaps.
207
+ labels (List[str]): Labels associated with the heatmaps; e.g., feature names or attention regions. The length of `labels` should match the number of heatmaps, N.
208
+ type (LeapDataType): The data type, default is LeapDataType.ImageWithHeatmap.
209
+ """
140
210
  image: npt.NDArray[np.float32]
141
211
  heatmaps: npt.NDArray[np.float32]
142
212
  labels: List[str]
@@ -21,6 +21,15 @@ from code_loader.visualizers.default_visualizers import DefaultVisualizer, \
21
21
 
22
22
 
23
23
  class LeapBinder:
24
+ """
25
+ Interface to the Tensorleap platform. Provides methods to set up preprocessing,
26
+ visualization, custom loss functions, metrics, and other essential components for integrating the dataset and model
27
+ with Tensorleap.
28
+
29
+ Attributes:
30
+ setup_container (DatasetIntegrationSetup): Container to hold setup configurations.
31
+ cache_container (Dict[str, Any]): Cache container to store intermediate data.
32
+ """
24
33
  def __init__(self) -> None:
25
34
  self.setup_container = DatasetIntegrationSetup()
26
35
  self.cache_container: Dict[str, Any] = {"word_to_index": {}}
@@ -48,6 +57,36 @@ class LeapBinder:
48
57
  name: str,
49
58
  visualizer_type: LeapDataType,
50
59
  heatmap_visualizer: Optional[Callable[..., npt.NDArray[np.float32]]] = None) -> None:
60
+ """
61
+ Set a visualizer for a specific data type.
62
+
63
+ Args:
64
+ function (VisualizerCallableInterface): The visualizer function to be used for visualizing the data.
65
+ name (str): The name of the visualizer.
66
+ visualizer_type (LeapDataType): The type of data the visualizer handles (e.g., LeapDataType.Image, LeapDataType.Graph, LeapDataType.Text).
67
+ heatmap_visualizer (Optional[Callable[..., npt.NDArray[np.float32]]]): An optional heatmap visualizer function.
68
+ This is used when a heatmap must be reshaped to overlay correctly on the transformed data within the visualizer
69
+ function i.e., if the visualizer changes the shape or scale of the input data, the heatmap visualizer
70
+ adjusts the heatmap accordingly to ensure it aligns properly with the visualized data.
71
+
72
+ Example:
73
+ def image_resize_visualizer(data: np.ndarray) -> LeapImage:
74
+ # Resize the image to a fixed size
75
+ resized_image = resize_image(data, (224, 224))
76
+ return LeapImage(data=resized_image)
77
+
78
+ def image_resize_heatmap_visualizer(heatmap: RawInputsForHeatmap) -> np.ndarray:
79
+ # Resize the heatmap to match the resized image
80
+ resized_heatmap = resize_heatmap(heatmap, (224, 224))
81
+ return resized_heatmap
82
+
83
+ leap_binder.set_visualizer(
84
+ function=image_resize_visualizer,
85
+ name='image_resize_visualizer',
86
+ visualizer_type=LeapDataType.Image,
87
+ heatmap_visualizer=image_resize_heatmap_visualizer
88
+ )
89
+ """
51
90
  arg_names = inspect.getfullargspec(function)[0]
52
91
  if heatmap_visualizer:
53
92
  visualizer_arg_names_set = set(arg_names)
@@ -90,18 +129,99 @@ class LeapBinder:
90
129
  self._visualizer_names.append(name)
91
130
 
92
131
  def set_preprocess(self, function: Callable[[], List[PreprocessResponse]]) -> None:
132
+ """
133
+ Set the preprocessing function for the dataset. That is the function that returns a list of PreprocessResponse objects for use within the Tensorleap platform.
134
+
135
+ Args:
136
+ function (Callable[[], List[PreprocessResponse]]): The preprocessing function.
137
+
138
+ Example:
139
+ def preprocess_func() -> List[PreprocessResponse]:
140
+ # Preprocess the dataset
141
+ train_data = {
142
+ 'subset': 'train',
143
+ 'images': ['path/to/train/image1.jpg', 'path/to/train/image2.jpg'],
144
+ 'labels': ['SUV', 'truck'],
145
+ 'metadata': [{'id': 1, 'source': 'camera1'}, {'id': 2, 'source': 'camera2'}]}
146
+
147
+ val_data = {
148
+ 'subset': 'val',
149
+ 'images': ['path/to/val/image1.jpg', 'path/to/va;/image2.jpg'],
150
+ 'labels': ['truck', 'truck'],
151
+ 'metadata': [{'id': 1, 'source': 'camera1'}, {'id': 2, 'source': 'camera2'}]}
152
+
153
+ return [PreprocessResponse(length=len(train_data['images']), data=train_data),
154
+ PreprocessResponse(length=len(val_data['images']), data=val_data)]
155
+
156
+ leap_binder.set_preprocess(preprocess_func)
157
+ """
93
158
  self.setup_container.preprocess = PreprocessHandler(function)
94
159
 
95
160
  def set_unlabeled_data_preprocess(self, function: Callable[[], PreprocessResponse]) -> None:
161
+ """
162
+ Set the preprocessing function for unlabeled dataset. This function returns a PreprocessResponse object for use within the Tensorleap platform for sample data that does not contain labels.
163
+
164
+ Args:
165
+ function (Callable[[], PreprocessResponse]): The preprocessing function for unlabeled data.
166
+
167
+ Example:
168
+ def unlabeled_preprocess_func() -> List[PreprocessResponse]:
169
+
170
+ # Preprocess the dataset
171
+ ul_data = {
172
+ 'subset': 'unlabeled',
173
+ 'images': ['path/to/train/image1.jpg', 'path/to/train/image2.jpg'],
174
+ 'metadata': [{'id': 1, 'source': 'camera1'}, {'id': 2, 'source': 'camera2'}]}
175
+
176
+ return [PreprocessResponse(length=len(train_data['images']), data=train_data)]
177
+
178
+ leap_binder.set_preprocess(unlabeled_preprocess_func)
179
+ """
96
180
  self.setup_container.unlabeled_data_preprocess = UnlabeledDataPreprocessHandler(function)
97
181
 
98
182
  def set_input(self, function: SectionCallableInterface, name: str) -> None:
183
+ """
184
+ Set the input handler function.
185
+
186
+ Args:
187
+ function (SectionCallableInterface): The input handler function.
188
+ name (str): The name of the input section.
189
+
190
+ Example:
191
+ def input_encoder(subset: PreprocessResponse, index: int) -> np.ndarray:
192
+ # Return the processed input data for the given index and given subset response
193
+ img_path = subset.`data["images"][idx]
194
+ img = read_img(img_path)
195
+ img = normalize(img)
196
+ return img
197
+
198
+ leap_binder.set_input(input_encoder, name='input_encoder')
199
+ """
99
200
  function = to_numpy_return_wrapper(function)
100
201
  self.setup_container.inputs.append(InputHandler(name, function))
101
202
 
102
203
  self._encoder_names.append(name)
103
204
 
104
205
  def add_custom_loss(self, function: CustomCallableInterface, name: str) -> None:
206
+ """
207
+ Add a custom loss function to the setup.
208
+
209
+ Args:
210
+ function (CustomCallableInterface): The custom loss function.
211
+ This function receives:
212
+ - y_true: The true labels or values.
213
+ - y_pred: The predicted labels or values.
214
+ This function should return:
215
+ - A numeric value representing the loss.
216
+ name (str): The name of the custom loss function.
217
+
218
+ Example:
219
+ def custom_loss_function(y_true, y_pred):
220
+ # Calculate mean squared error as custom loss
221
+ return np.mean(np.square(y_true - y_pred))
222
+
223
+ leap_binder.add_custom_loss(custom_loss_function, name='custom_loss')
224
+ """
105
225
  arg_names = inspect.getfullargspec(function)[0]
106
226
  self.setup_container.custom_loss_handlers.append(CustomLossHandler(name, function, arg_names))
107
227
 
@@ -111,23 +231,132 @@ class LeapBinder:
111
231
  ConfusionMatrixCallableInterfaceMultiArgs],
112
232
  name: str,
113
233
  direction: Optional[MetricDirection] = MetricDirection.Downward) -> None:
234
+ """
235
+ Add a custom metric to the setup.
236
+
237
+ Args:
238
+ function (Union[CustomCallableInterfaceMultiArgs, CustomMultipleReturnCallableInterfaceMultiArgs, ConfusionMatrixCallableInterfaceMultiArgs]): The custom metric function.
239
+ name (str): The name of the custom metric.
240
+ direction (Optional[MetricDirection]): The direction of the metric, either MetricDirection.Upward or MetricDirection.Downward.
241
+ - MetricDirection.Upward: Indicates that higher values of the metric are better and should be maximized.
242
+ - MetricDirection.Downward: Indicates that lower values of the metric are better and should be minimized.
243
+
244
+
245
+ Example:
246
+ def custom_metric_function(y_true, y_pred):
247
+ return np.mean(np.abs(y_true - y_pred))
248
+
249
+ leap_binder.add_custom_metric(custom_metric_function, name='custom_metric', direction=MetricDirection.Downward)
250
+ """
114
251
  arg_names = inspect.getfullargspec(function)[0]
115
252
  self.setup_container.metrics.append(MetricHandler(name, function, arg_names, direction))
116
253
 
117
254
  def add_prediction(self, name: str, labels: List[str], channel_dim: int = -1) -> None:
255
+ """
256
+ Add prediction labels to the setup.
257
+
258
+ Args:
259
+ name (str): The name of the prediction.
260
+ labels (List[str]): The list of labels for the prediction.
261
+ channel_dim (int): The axis along which the prediction scores are located, default is -1.
262
+
263
+ Must satisfy len(labels) == len(output[channel_dim]).
264
+
265
+ Example:
266
+ leap_binder.add_prediction(name='class_labels', labels=['cat', 'dog'])
267
+ """
118
268
  self.setup_container.prediction_types.append(PredictionTypeHandler(name, labels, channel_dim))
119
269
 
120
270
  def set_ground_truth(self, function: SectionCallableInterface, name: str) -> None:
271
+ """
272
+ Set the ground truth handler function.
273
+
274
+ Args:
275
+ function: The ground truth handler function.
276
+ This function receives two parameters:
277
+ - subset: A `PreprocessResponse` object that contains the preprocessed data.
278
+ - index: The index of the sample within the subset.
279
+ This function should return:
280
+ - A numpy array representing the ground truth for the given sample.
281
+
282
+ name (str): The name of the ground truth section.
283
+
284
+ Example:
285
+ def ground_truth_handler(subset, index):
286
+ label = subset.data['labels'][index]
287
+ # Assuming labels are integers starting from 0
288
+ num_classes = 10 # Example number of classes
289
+ one_hot_label = np.zeros(num_classes)
290
+ one_hot_label[label] = 1
291
+ return one_hot_label
292
+
293
+ leap_binder.set_ground_truth(ground_truth_handler, name='ground_truth')
294
+ """
295
+
121
296
  function = to_numpy_return_wrapper(function)
122
297
  self.setup_container.ground_truths.append(GroundTruthHandler(name, function))
123
298
 
124
299
  self._encoder_names.append(name)
125
300
 
126
301
  def set_metadata(self, function: MetadataSectionCallableInterface, name: str) -> None:
302
+ """
303
+ Set the metadata handler function. This function is used for measuring and analyzing external variable values per sample, which is recommended for analysis within the Tensorleap platform.
304
+
305
+ Args:
306
+ function (MetadataSectionCallableInterface): The metadata handler function.
307
+ This function receives:
308
+ subset (PreprocessResponse): The subset of the data.
309
+ index (int): The index of the sample within the subset.
310
+ This function should return one of the following:
311
+ int: A single integer value.
312
+ Dict[str, int]: A dictionary with string keys and integer values.
313
+ str: A single string value.
314
+ Dict[str, str]: A dictionary with string keys and string values.
315
+ bool: A single boolean value.
316
+ Dict[str, bool]: A dictionary with string keys and boolean values.
317
+ float: A single float value.
318
+ Dict[str, float]: A dictionary with string keys and float values.
319
+
320
+ name (str): The name of the metadata section.
321
+
322
+ Example:
323
+ def metadata_handler_index(subset: PreprocessResponse, index: int) -> int:
324
+ return subset.data['metadata'][index]
325
+
326
+
327
+ def metadata_handler_image_mean(subset: PreprocessResponse, index: int) -> float:
328
+ fpath = subset.data['images'][index]
329
+ image = load_image(fpath)
330
+ mean_value = np.mean(image)
331
+ return mean_value
332
+
333
+ leap_binder.set_metadata(metadata_handler_index, name='metadata_index')
334
+ leap_binder.set_metadata(metadata_handler_image_mean, name='metadata_image_mean')
335
+ """
127
336
  self.setup_container.metadata.append(MetadataHandler(name, function))
128
337
 
129
338
  def set_custom_layer(self, custom_layer: Type[Any], name: str, inspect_layer: bool = False,
130
339
  kernel_index: Optional[int] = None, use_custom_latent_space: bool = False) -> None:
340
+ """
341
+ Set a custom layer for the model.
342
+
343
+ Args:
344
+ custom_layer (Type[Any]): The custom layer class.
345
+ name (str): The name of the custom layer.
346
+ inspect_layer (bool): Whether to inspect the layer, default is False.
347
+ kernel_index (Optional[int]): The index of the kernel to inspect, if inspect_layer is True.
348
+ use_custom_latent_space (bool): Whether to use a custom latent space, default is False.
349
+
350
+ Example:
351
+ class CustomLayer:
352
+ def __init__(self, units: int):
353
+ self.units = units
354
+
355
+ def call(self, inputs):
356
+ return inputs * self.units
357
+
358
+ leap_binder.set_custom_layer(CustomLayer, name='custom_layer', inspect_layer=True, kernel_index=0)
359
+ """
131
360
  if inspect_layer and kernel_index is not None:
132
361
  custom_layer.kernel_index = kernel_index
133
362
 
@@ -30,7 +30,6 @@ class LeapLoader:
30
30
  @lru_cache()
31
31
  def exec_script(self) -> None:
32
32
  try:
33
- print("executing script")
34
33
  self.evaluate_module()
35
34
  except TypeError as e:
36
35
  import traceback
@@ -42,34 +41,25 @@ class LeapLoader:
42
41
  raise DatasetScriptException(getattr(e, 'message', repr(e))) from e
43
42
 
44
43
  def evaluate_module(self) -> None:
45
- print("evaluate_module")
46
-
47
44
  def append_path_recursively(full_path: str) -> None:
48
45
  if '/' not in full_path or full_path == '/':
49
46
  return
50
47
 
51
48
  parent_path = str(Path(full_path).parent)
52
- print(f"evaluate_module.append_path_recursively full_path: {full_path}, parent_path: {parent_path}")
53
49
  append_path_recursively(parent_path)
54
-
55
50
  sys.path.append(parent_path)
56
51
 
57
52
  file_path = Path(self.code_path, self.code_entry_name)
58
- print(f"evaluate_module: self.code_path: {self.code_path} self.code_entry_name: {self.code_entry_name}, "
59
- f"file_path for append_path_recursively: {str(file_path)}")
60
53
  append_path_recursively(str(file_path))
61
54
 
62
55
  spec = importlib.util.spec_from_file_location(self.code_path, file_path)
63
- print(f"evaluate_module: spec: {str(spec)}")
64
56
  if spec is None or spec.loader is None:
65
57
  raise DatasetScriptException(f'Something is went wrong with spec file from: {file_path}')
66
58
 
67
59
  file = importlib.util.module_from_spec(spec)
68
- print(f"evaluate_module: file module_from_spec: {str(file)}")
69
60
  if file is None:
70
61
  raise DatasetScriptException(f'Something is went wrong with import module from: {file_path}')
71
62
 
72
- print(f"evaluate_module: spec.loader.exec_module")
73
63
  spec.loader.exec_module(file)
74
64
 
75
65
  @lru_cache()
@@ -181,8 +171,7 @@ class LeapLoader:
181
171
  preprocess_response, test_result, dataset_base_handler)
182
172
  except Exception as e:
183
173
  line_number, file_name, stacktrace = get_root_exception_file_and_line_number()
184
- test_result[0].display[
185
- state_name] = f"{repr(e)} in file {file_name}, line_number: {line_number}\nStacktrace:\n{stacktrace}"
174
+ test_result[0].display[state_name] = f"{repr(e)} in file {file_name}, line_number: {line_number}\nStacktrace:\n{stacktrace}"
186
175
  test_result[0].is_passed = False
187
176
 
188
177
  result_payloads.extend(test_result)
@@ -354,8 +343,7 @@ class LeapLoader:
354
343
  if isinstance(handler_result, dict):
355
344
  for single_metadata_name, single_metadata_result in handler_result.items():
356
345
  handler_name = f'{handler.name}_{single_metadata_name}'
357
- result_agg[handler_name] = self._convert_metadata_to_correct_type(handler_name,
358
- single_metadata_result)
346
+ result_agg[handler_name] = self._convert_metadata_to_correct_type(handler_name, single_metadata_result)
359
347
  else:
360
348
  handler_name = handler.name
361
349
  result_agg[handler_name] = self._convert_metadata_to_correct_type(handler_name, handler_result)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "1.0.41"
3
+ version = "1.0.42"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"
File without changes
File without changes