code-loader 0.2.84.dev23__tar.gz → 0.2.84.dev31__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/PKG-INFO +1 -2
  2. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/leap_loader_parallelized_base.py +7 -9
  3. code_loader-0.2.84.dev31/code_loader/metric_calculator_parallelized.py +50 -0
  4. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/samples_generator_parallelized.py +3 -4
  5. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/pyproject.toml +1 -2
  6. code_loader-0.2.84.dev23/code_loader/bla.py +0 -33
  7. code_loader-0.2.84.dev23/code_loader/metric_calculator_parallelized.py +0 -26
  8. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/LICENSE +0 -0
  9. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/README.md +0 -0
  10. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/__init__.py +0 -0
  11. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/__init__.py +0 -0
  12. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/datasetclasses.py +0 -0
  13. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/enums.py +0 -0
  14. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/exceptions.py +0 -0
  15. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/responsedataclasses.py +0 -0
  16. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/contract/visualizer_classes.py +0 -0
  17. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/__init__.py +0 -0
  18. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/__init__.py +0 -0
  19. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/utils.py +0 -0
  20. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/__init__.py +0 -0
  21. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/decoder.py +0 -0
  22. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/enums.py +0 -0
  23. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/grid.py +0 -0
  24. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/loss.py +0 -0
  25. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/pytorch_utils.py +0 -0
  26. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/detection/yolo/utils.py +0 -0
  27. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/instancesegmentation/__init__.py +0 -0
  28. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/helpers/instancesegmentation/utils.py +0 -0
  29. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/leap_binder/__init__.py +0 -0
  30. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/leap_binder/leapbinder.py +0 -0
  31. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/leaploader.py +0 -0
  32. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/metrics/__init__.py +0 -0
  33. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/metrics/default_metrics.py +0 -0
  34. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/utils.py +0 -0
  35. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/visualizers/__init__.py +0 -0
  36. {code_loader-0.2.84.dev23 → code_loader-0.2.84.dev31}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 0.2.84.dev23
3
+ Version: 0.2.84.dev31
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -12,7 +12,6 @@ Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.8
13
13
  Classifier: Programming Language :: Python :: 3.9
14
14
  Classifier: Programming Language :: Python :: 3.10
15
- Requires-Dist: multiprocess (>=0.70.15,<0.71.0)
16
15
  Requires-Dist: numpy (>=1.22.3,<2.0.0)
17
16
  Requires-Dist: psutil (>=5.9.5,<6.0.0)
18
17
  Requires-Dist: tensorflow (>=2.11.0,<3.0.0) ; platform_machine == "x86_64"
@@ -11,11 +11,10 @@ import psutil
11
11
 
12
12
  class LeapLoaderParallelizedBase(ABC):
13
13
  def __init__(self, code_path: str, code_entry_name: str,
14
- n_workers: Optional[int] = 2, max_ready_results_in_queue: int = 128, bb = "spawn") -> None:
15
- self.bb = bb
16
- self.mp = multiprocessing
17
- if self.bb is not None:
18
- self.mp = multiprocessing.get_context(self.bb)
14
+ n_workers: Optional[int] = 2, max_ready_results_in_queue: int = 128,
15
+ multiprocessing_context: Optional[str] = None) -> None:
16
+ self.multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
17
+
19
18
  self.code_entry_name = code_entry_name
20
19
  self.code_path = code_path
21
20
 
@@ -36,7 +35,7 @@ class LeapLoaderParallelizedBase(ABC):
36
35
  memory_usage_in_bytes = p.memory_info().rss
37
36
  total_memory_in_bytes = psutil.virtual_memory().total
38
37
 
39
- n_workers = min(int(multiprocessing.cpu_count()),
38
+ n_workers = min(int(self.multiprocessing_context.cpu_count()),
40
39
  int(total_memory_in_bytes * 0.7 / memory_usage_in_bytes))
41
40
  n_workers = max(n_workers, 1)
42
41
  return n_workers
@@ -50,9 +49,8 @@ class LeapLoaderParallelizedBase(ABC):
50
49
 
51
50
  @lru_cache()
52
51
  def start(self) -> None:
53
- # multiprocessing.set_start_method(self.bb, force=True)
54
- self._inputs_waiting_to_be_process = self.mp.Queue(5000)
55
- self._ready_processed_results = self.mp.Queue(self.max_ready_results_in_queue)
52
+ self._inputs_waiting_to_be_process = self.multiprocessing_context.Queue(5000)
53
+ self._ready_processed_results = self.multiprocessing_context.Queue(self.max_ready_results_in_queue)
56
54
 
57
55
  self._run_and_warm_first_process()
58
56
  n_workers = self.n_workers
@@ -0,0 +1,50 @@
1
+ # mypy: ignore-errors
2
+ from typing import Optional, List, Tuple, Dict
3
+ from multiprocessing import Process, Queue
4
+ from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
5
+ import traceback
6
+ from dataclasses import dataclass
7
+ import tensorflow as tf
8
+ from code_loader.leaploader import LeapLoader
9
+
10
+
11
+ @dataclass
12
+ class MetricSerializableError:
13
+ metric_id: str
14
+ metric_name: str
15
+ leap_script_trace: str
16
+ exception_as_str: str
17
+
18
+
19
+ class MetricCalculatorParallelized(LeapLoaderParallelizedBase):
20
+ def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
21
+ max_samples_in_queue: int = 128) -> None:
22
+ super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue, "spawn")
23
+
24
+ @staticmethod
25
+ def _process_func(code_path: str, code_entry_name: str,
26
+ metrics_to_process: Queue, ready_samples: Queue) -> None:
27
+ leap_loader = LeapLoader(code_path, code_entry_name)
28
+ while True:
29
+ metric_id, metric_name, input_arg_name_to_tensor = metrics_to_process.get(block=True)
30
+ try:
31
+ with tf.device('/cpu:0'):
32
+ metric_result = leap_loader.metric_by_name()[metric_name].function(**input_arg_name_to_tensor)
33
+ except Exception as e:
34
+ leap_script_trace = traceback.format_exc().split('File "<string>"')[-1]
35
+ ready_samples.put(MetricSerializableError(metric_id, metric_name, leap_script_trace, str(e)))
36
+ continue
37
+
38
+ ready_samples.put((metric_id, metric_result))
39
+
40
+ def _create_and_start_process(self) -> Process:
41
+ process = self.multiprocessing_context.Process(
42
+ target=MetricCalculatorParallelized._process_func,
43
+ args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
44
+ self._ready_processed_results))
45
+ process.daemon = True
46
+ process.start()
47
+ return process
48
+
49
+ def calculate_metrics(self, input_arg_name_to_tensor_list: List[Tuple[str, str, Dict[str, tf.Tensor]]]):
50
+ return self.start_process_inputs(input_arg_name_to_tensor_list)
@@ -2,9 +2,8 @@
2
2
  import traceback
3
3
  from dataclasses import dataclass
4
4
 
5
- from typing import List, Tuple, Optional, Dict
5
+ from typing import List, Tuple, Optional
6
6
 
7
- import multiprocessing
8
7
  from multiprocessing import Process, Queue
9
8
 
10
9
  from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
@@ -23,10 +22,10 @@ class SampleSerializableError:
23
22
  class SamplesGeneratorParallelized(LeapLoaderParallelizedBase):
24
23
  def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
25
24
  max_samples_in_queue: int = 128) -> None:
26
- super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue, None)
25
+ super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue)
27
26
 
28
27
  def _create_and_start_process(self) -> Process:
29
- process = self.mp.Process(
28
+ process = self.multiprocessing_context.Process(
30
29
  target=SamplesGeneratorParallelized._process_func,
31
30
  args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
32
31
  self._ready_processed_results))
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "0.2.84.dev23"
3
+ version = "0.2.84.dev31"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"
@@ -19,7 +19,6 @@ tensorflow-macos = {version = "^2.11.0", markers = "platform_machine == 'arm64'
19
19
  typeguard = "^2.13.3"
20
20
  psutil = "^5.9.5"
21
21
  torch = "1.12.1"
22
- multiprocess = "^0.70.15"
23
22
 
24
23
  [tool.poetry.dev-dependencies]
25
24
  pytest = "^7.1.1"
@@ -1,33 +0,0 @@
1
- # mypy: ignore-errors
2
- import traceback
3
- from dataclasses import dataclass
4
- from multiprocess import Queue
5
- import tensorflow as tf
6
-
7
- from code_loader.leaploader import LeapLoader
8
-
9
-
10
- @dataclass
11
- class MetricSerializableError:
12
- metric_id: str
13
- metric_name: str
14
- leap_script_trace: str
15
- exception_as_str: str
16
-
17
-
18
- def _process_func(code_path: str, code_entry_name: str,
19
- metrics_to_process: Queue, ready_samples: Queue) -> None:
20
- import os
21
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
22
- leap_loader = LeapLoader(code_path, code_entry_name)
23
- while True:
24
- metric_id, metric_name, input_arg_name_to_tensor = metrics_to_process.get(block=True)
25
- try:
26
- with tf.device('/cpu:0'):
27
- metric_result = leap_loader.metric_by_name()[metric_name].function(**input_arg_name_to_tensor)
28
- except Exception as e:
29
- leap_script_trace = traceback.format_exc().split('File "<string>"')[-1]
30
- ready_samples.put(MetricSerializableError(metric_id, metric_name, leap_script_trace, str(e)))
31
- continue
32
-
33
- ready_samples.put((metric_id, metric_result))
@@ -1,26 +0,0 @@
1
- # mypy: ignore-errors
2
- import multiprocessing
3
- from typing import Optional, List, Tuple, Dict
4
- from multiprocessing import Process
5
-
6
- import tensorflow as tf
7
-
8
- from code_loader.bla import _process_func
9
- from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
10
-
11
- class MetricCalculatorParallelized(LeapLoaderParallelizedBase):
12
- def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
13
- max_samples_in_queue: int = 128) -> None:
14
- super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue)
15
-
16
- def _create_and_start_process(self) -> Process:
17
- process = self.mp.Process(
18
- target=_process_func,
19
- args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
20
- self._ready_processed_results))
21
- process.daemon = True
22
- process.start()
23
- return process
24
-
25
- def calculate_metrics(self, input_arg_name_to_tensor_list: List[Tuple[str, str, Dict[str, tf.Tensor]]]):
26
- return self.start_process_inputs(input_arg_name_to_tensor_list)