code-loader 0.2.84.dev40__tar.gz → 0.2.84.dev50__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/PKG-INFO +1 -2
  2. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/leap_loader_parallelized_base.py +8 -8
  3. code_loader-0.2.84.dev50/code_loader/metric_calculator_parallelized.py +53 -0
  4. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/samples_generator_parallelized.py +3 -4
  5. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/pyproject.toml +1 -2
  6. code_loader-0.2.84.dev40/code_loader/bla.py +0 -33
  7. code_loader-0.2.84.dev40/code_loader/metric_calculator_parallelized.py +0 -26
  8. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/LICENSE +0 -0
  9. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/README.md +0 -0
  10. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/__init__.py +0 -0
  11. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/__init__.py +0 -0
  12. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/datasetclasses.py +0 -0
  13. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/enums.py +0 -0
  14. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/exceptions.py +0 -0
  15. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/responsedataclasses.py +0 -0
  16. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/contract/visualizer_classes.py +0 -0
  17. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/__init__.py +0 -0
  18. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/__init__.py +0 -0
  19. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/utils.py +0 -0
  20. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/__init__.py +0 -0
  21. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/decoder.py +0 -0
  22. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/enums.py +0 -0
  23. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/grid.py +0 -0
  24. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/loss.py +0 -0
  25. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/pytorch_utils.py +0 -0
  26. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/detection/yolo/utils.py +0 -0
  27. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/instancesegmentation/__init__.py +0 -0
  28. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/helpers/instancesegmentation/utils.py +0 -0
  29. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/leap_binder/__init__.py +0 -0
  30. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/leap_binder/leapbinder.py +0 -0
  31. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/leaploader.py +0 -0
  32. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/metrics/__init__.py +0 -0
  33. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/metrics/default_metrics.py +0 -0
  34. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/utils.py +0 -0
  35. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/visualizers/__init__.py +0 -0
  36. {code_loader-0.2.84.dev40 → code_loader-0.2.84.dev50}/code_loader/visualizers/default_visualizers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: code-loader
3
- Version: 0.2.84.dev40
3
+ Version: 0.2.84.dev50
4
4
  Summary:
5
5
  Home-page: https://github.com/tensorleap/code-loader
6
6
  License: MIT
@@ -12,7 +12,6 @@ Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.8
13
13
  Classifier: Programming Language :: Python :: 3.9
14
14
  Classifier: Programming Language :: Python :: 3.10
15
- Requires-Dist: multiprocess (>=0.70.15,<0.71.0)
16
15
  Requires-Dist: numpy (>=1.22.3,<2.0.0)
17
16
  Requires-Dist: psutil (>=5.9.5,<6.0.0)
18
17
  Requires-Dist: tensorflow (>=2.11.0,<3.0.0) ; platform_machine == "x86_64"
@@ -11,11 +11,12 @@ import psutil
11
11
 
12
12
  class LeapLoaderParallelizedBase(ABC):
13
13
  def __init__(self, code_path: str, code_entry_name: str,
14
- n_workers: Optional[int] = 2, max_ready_results_in_queue: int = 128, bb = "spawn") -> None:
15
- self.bb = bb
16
- self.mp = multiprocessing
17
- if self.bb is not None:
18
- self.mp = multiprocessing.get_context(self.bb)
14
+ n_workers: Optional[int] = 2, max_ready_results_in_queue: int = 128,
15
+ multiprocessing_context: Optional[str] = None) -> None:
16
+ self.multiprocessing_context = multiprocessing
17
+ if multiprocessing_context is not None:
18
+ self.multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
19
+
19
20
  self.code_entry_name = code_entry_name
20
21
  self.code_path = code_path
21
22
 
@@ -50,9 +51,8 @@ class LeapLoaderParallelizedBase(ABC):
50
51
 
51
52
  @lru_cache()
52
53
  def start(self) -> None:
53
- # multiprocessing.set_start_method(self.bb, force=True)
54
- self._inputs_waiting_to_be_process = self.mp.Queue(5000)
55
- self._ready_processed_results = self.mp.Queue(self.max_ready_results_in_queue)
54
+ self._inputs_waiting_to_be_process = self.multiprocessing_context.Queue(5000)
55
+ self._ready_processed_results = self.multiprocessing_context.Queue(self.max_ready_results_in_queue)
56
56
 
57
57
  self._run_and_warm_first_process()
58
58
  n_workers = self.n_workers
@@ -0,0 +1,53 @@
1
+ # mypy: ignore-errors
2
+ from typing import Optional, List, Tuple, Dict
3
+ from multiprocessing import Process, Queue
4
+ from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
5
+ import traceback
6
+ from dataclasses import dataclass
7
+ import tensorflow as tf
8
+ from code_loader.leaploader import LeapLoader
9
+
10
+
11
+ @dataclass
12
+ class MetricSerializableError:
13
+ metric_id: str
14
+ metric_name: str
15
+ leap_script_trace: str
16
+ exception_as_str: str
17
+
18
+
19
+ class MetricCalculatorParallelized(LeapLoaderParallelizedBase):
20
+ def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
21
+ max_samples_in_queue: int = 128) -> None:
22
+ super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue, "spawn")
23
+
24
+ @staticmethod
25
+ def _process_func(code_path: str, code_entry_name: str,
26
+ metrics_to_process: Queue, ready_samples: Queue) -> None:
27
+ import os
28
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
29
+
30
+ leap_loader = LeapLoader(code_path, code_entry_name)
31
+ while True:
32
+ metric_id, metric_name, input_arg_name_to_tensor = metrics_to_process.get(block=True)
33
+ try:
34
+ with tf.device('/cpu:0'):
35
+ metric_result = leap_loader.metric_by_name()[metric_name].function(**input_arg_name_to_tensor)
36
+ except Exception as e:
37
+ leap_script_trace = traceback.format_exc().split('File "<string>"')[-1]
38
+ ready_samples.put(MetricSerializableError(metric_id, metric_name, leap_script_trace, str(e)))
39
+ continue
40
+
41
+ ready_samples.put((metric_id, metric_result))
42
+
43
+ def _create_and_start_process(self) -> Process:
44
+ process = self.multiprocessing_context.Process(
45
+ target=MetricCalculatorParallelized._process_func,
46
+ args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
47
+ self._ready_processed_results))
48
+ process.daemon = True
49
+ process.start()
50
+ return process
51
+
52
+ def calculate_metrics(self, input_arg_name_to_tensor_list: List[Tuple[str, str, Dict[str, tf.Tensor]]]):
53
+ return self.start_process_inputs(input_arg_name_to_tensor_list)
@@ -2,9 +2,8 @@
2
2
  import traceback
3
3
  from dataclasses import dataclass
4
4
 
5
- from typing import List, Tuple, Optional, Dict
5
+ from typing import List, Tuple, Optional
6
6
 
7
- import multiprocessing
8
7
  from multiprocessing import Process, Queue
9
8
 
10
9
  from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
@@ -23,10 +22,10 @@ class SampleSerializableError:
23
22
  class SamplesGeneratorParallelized(LeapLoaderParallelizedBase):
24
23
  def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
25
24
  max_samples_in_queue: int = 128) -> None:
26
- super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue, None)
25
+ super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue)
27
26
 
28
27
  def _create_and_start_process(self) -> Process:
29
- process = self.mp.Process(
28
+ process = self.multiprocessing_context.Process(
30
29
  target=SamplesGeneratorParallelized._process_func,
31
30
  args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
32
31
  self._ready_processed_results))
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "code-loader"
3
- version = "0.2.84.dev40"
3
+ version = "0.2.84.dev50"
4
4
  description = ""
5
5
  authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
6
6
  license = "MIT"
@@ -19,7 +19,6 @@ tensorflow-macos = {version = "^2.11.0", markers = "platform_machine == 'arm64'
19
19
  typeguard = "^2.13.3"
20
20
  psutil = "^5.9.5"
21
21
  torch = "1.12.1"
22
- multiprocess = "^0.70.15"
23
22
 
24
23
  [tool.poetry.dev-dependencies]
25
24
  pytest = "^7.1.1"
@@ -1,33 +0,0 @@
1
- # mypy: ignore-errors
2
- import traceback
3
- from dataclasses import dataclass
4
- from multiprocess import Queue
5
- import tensorflow as tf
6
-
7
- from code_loader.leaploader import LeapLoader
8
-
9
-
10
- @dataclass
11
- class MetricSerializableError:
12
- metric_id: str
13
- metric_name: str
14
- leap_script_trace: str
15
- exception_as_str: str
16
-
17
-
18
- def _process_func(code_path: str, code_entry_name: str,
19
- metrics_to_process: Queue, ready_samples: Queue) -> None:
20
- # import os
21
- # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
22
- leap_loader = LeapLoader(code_path, code_entry_name)
23
- while True:
24
- metric_id, metric_name, input_arg_name_to_tensor = metrics_to_process.get(block=True)
25
- try:
26
- with tf.device('/cpu:0'):
27
- metric_result = leap_loader.metric_by_name()[metric_name].function(**input_arg_name_to_tensor)
28
- except Exception as e:
29
- leap_script_trace = traceback.format_exc().split('File "<string>"')[-1]
30
- ready_samples.put(MetricSerializableError(metric_id, metric_name, leap_script_trace, str(e)))
31
- continue
32
-
33
- ready_samples.put((metric_id, metric_result))
@@ -1,26 +0,0 @@
1
- # mypy: ignore-errors
2
- import multiprocessing
3
- from typing import Optional, List, Tuple, Dict
4
- from multiprocessing import Process
5
-
6
- import tensorflow as tf
7
-
8
- from code_loader.bla import _process_func
9
- from code_loader.leap_loader_parallelized_base import LeapLoaderParallelizedBase
10
-
11
- class MetricCalculatorParallelized(LeapLoaderParallelizedBase):
12
- def __init__(self, code_path: str, code_entry_name: str, n_workers: Optional[int] = 2,
13
- max_samples_in_queue: int = 128) -> None:
14
- super().__init__(code_path, code_entry_name, n_workers, max_samples_in_queue)
15
-
16
- def _create_and_start_process(self) -> Process:
17
- process = self.mp.Process(
18
- target=_process_func,
19
- args=(self.code_path, self.code_entry_name, self._inputs_waiting_to_be_process,
20
- self._ready_processed_results))
21
- process.daemon = True
22
- process.start()
23
- return process
24
-
25
- def calculate_metrics(self, input_arg_name_to_tensor_list: List[Tuple[str, str, Dict[str, tf.Tensor]]]):
26
- return self.start_process_inputs(input_arg_name_to_tensor_list)