code-loader 0.2.88__tar.gz → 0.2.89__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {code_loader-0.2.88 → code_loader-0.2.89}/PKG-INFO +1 -1
  2. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/leap_loader_parallelized_base.py +2 -2
  3. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/leaploader.py +1 -0
  4. code_loader-0.2.89/code_loader/visualizer_calculator_parallelized.py +63 -0
  5. {code_loader-0.2.88 → code_loader-0.2.89}/pyproject.toml +1 -1
  6. {code_loader-0.2.88 → code_loader-0.2.89}/LICENSE +0 -0
  7. {code_loader-0.2.88 → code_loader-0.2.89}/README.md +0 -0
  8. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/__init__.py +0 -0
  9. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/__init__.py +0 -0
  10. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/datasetclasses.py +0 -0
  11. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/enums.py +0 -0
  12. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/exceptions.py +0 -0
  13. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/responsedataclasses.py +0 -0
  14. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/contract/visualizer_classes.py +0 -0
  15. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/__init__.py +0 -0
  16. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/__init__.py +0 -0
  17. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/utils.py +0 -0
  18. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/__init__.py +0 -0
  19. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/decoder.py +0 -0
  20. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/enums.py +0 -0
  21. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/grid.py +0 -0
  22. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/loss.py +0 -0
  23. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/pytorch_utils.py +0 -0
  24. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/detection/yolo/utils.py +0 -0
  25. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/instancesegmentation/__init__.py +0 -0
  26. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/helpers/instancesegmentation/utils.py +0 -0
  27. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/leap_binder/__init__.py +0 -0
  28. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/leap_binder/leapbinder.py +0 -0
  29. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/metric_calculator_parallelized.py +0 -0
  30. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/metrics/__init__.py +0 -0
  31. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/metrics/default_metrics.py +0 -0
  32. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/samples_generator_parallelized.py +0 -0
  33. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/utils.py +0 -0
  34. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/visualizers/__init__.py +0 -0
  35. {code_loader-0.2.88 → code_loader-0.2.89}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 0.2.88
3
+ Version: 0.2.89
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -32,13 +32,13 @@ class LeapLoaderParallelizedBase(ABC):
32
32
  self._generate_inputs_thread: Optional[Thread] = None
33
33
  self._should_stop_thread = False
34
34
 
35
- def _calculate_n_workers_by_hardware(self) -> int:
35
+ def _calculate_n_workers_bpyproject.tomly_hardware(self) -> int:
36
36
  p = psutil.Process(self.processes[0].pid)
37
37
  memory_usage_in_bytes = p.memory_info().rss
38
38
  total_memory_in_bytes = psutil.virtual_memory().total
39
39
 
40
40
  n_workers = min(int(multiprocessing.cpu_count()),
41
- int(total_memory_in_bytes * 0.7 / memory_usage_in_bytes))
41
+ int(total_memory_in_bytes * 0.5 / memory_usage_in_bytes))
42
42
  n_workers = max(n_workers, 1)
43
43
  return n_workers
44
44
 
@@ -327,6 +327,7 @@ class LeapLoader:
327
327
 
328
328
  @lru_cache()
329
329
  def _preprocess_result(self) -> Dict[DataStateEnum, PreprocessResponse]:
330
+ self.exec_script()
330
331
  preprocess = global_leap_binder.setup_container.preprocess
331
332
  # TODO: add caching of subset result
332
333
  assert preprocess is not None
@@ -0,0 +1,63 @@
1
+ # mypy: ignore-errors
2
+ from typing import Optional, List, Tuple, Dict
3
+ from multiprocessing import Process, Queue
4
+
5
+ import numpy as np
6
+
7
+ from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
8
+ from dataclasses import dataclass
9
+ import tensorflow as tf
10
+ from code_loader.leaploader import LeapLoader
11
+
12
+
13
+ @dataclass
14
+ class VisualizerSerializableError:
15
+ visualizer_id: str
16
+ visualizer_name: str
17
+ index_in_batch: int
18
+ exception_as_str: str
19
+
20
+
21
+ class VisualizerCalculatorParallelized(LeapLoaderParallelizedBase):
22
+ def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
23
+ max_samples_in_queue: int = 128) -> None:
24
+ super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue, "spawn")
25
+
26
+ @staticmethod
27
+ def _process_func(code_path: str, code_entry_name: str,
28
+ visualizers_to_process: Queue, ready_visualizations: Queue) -> None:
29
+ import os
30
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
31
+
32
+ leap_loader = LeapLoader(code_path, code_entry_name)
33
+
34
+ # running preprocessing to sync preprocessing in main thread (can be valuable when preprocess is filling a
35
+ # global param that visualizer is using)
36
+ leap_loader._preprocess_result()
37
+ leap_loader._preprocess_result.cache_clear()
38
+
39
+ while True:
40
+ index_in_batch, visualizer_id, visualizer_name, input_arg_name_to_tensor = \
41
+ visualizers_to_process.get(block=True)
42
+ try:
43
+ with tf.device('/cpu:0'):
44
+ visualizer_result = \
45
+ leap_loader.visualizer_by_name()[visualizer_name].function(**input_arg_name_to_tensor)
46
+ except Exception as e:
47
+ ready_visualizations.put(VisualizerSerializableError(
48
+ visualizer_id, visualizer_name, index_in_batch, str(e)))
49
+ continue
50
+
51
+ ready_visualizations.put((index_in_batch, visualizer_id, visualizer_result))
52
+
53
+ def _create_and_start_process(self) -> Process:
54
+ process = self.multiprocessing_context.Process(
55
+ target=VisualizerCalculatorParallelized._process_func,
56
+ args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
57
+ self._ready_processed_results))
58
+ process.daemon = True
59
+ process.start()
60
+ return process
61
+
62
+ def calculate_visualizers(self, input_arg_name_to_tensor_list: List[Tuple[int, str, str, Dict[str, np.array]]]):
63
+ return self.start_process_inputs(input_arg_name_to_tensor_list)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "0.2.88"
3
+ version = "0.2.89"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"
File without changes
File without changes