sinapsis-data-readers 0.1.14__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. sinapsis_data_readers/helpers/sktime_datasets_subset.py +1 -4
  2. sinapsis_data_readers/templates/audio_readers/audio_reader_pydub.py +4 -2
  3. sinapsis_data_readers/templates/audio_readers/audio_reader_soundfile.py +1 -2
  4. sinapsis_data_readers/templates/audio_readers/audio_reader_to_bytes.py +4 -4
  5. sinapsis_data_readers/templates/audio_readers/base_audio_reader.py +8 -0
  6. sinapsis_data_readers/templates/base_file_data_loader.py +5 -1
  7. sinapsis_data_readers/templates/datasets_readers/csv_datasets.py +10 -6
  8. sinapsis_data_readers/templates/datasets_readers/sklearn_datasets.py +2 -2
  9. sinapsis_data_readers/templates/datasets_readers/sktime_datasets.py +3 -2
  10. sinapsis_data_readers/templates/image_readers/base_image_folder_data_loader.py +2 -1
  11. sinapsis_data_readers/templates/image_readers/coco_dataset_reader.py +2 -1
  12. sinapsis_data_readers/templates/image_readers/csv_dataset_reader.py +6 -4
  13. sinapsis_data_readers/templates/image_readers/image_folder_reader_cv2.py +0 -1
  14. sinapsis_data_readers/templates/video_readers/base_video_reader.py +3 -1
  15. sinapsis_data_readers/templates/video_readers/video_reader_cv2.py +4 -2
  16. sinapsis_data_readers/templates/video_readers/video_reader_dali.py +4 -4
  17. sinapsis_data_readers/templates/video_readers/video_reader_ffmpeg.py +5 -3
  18. sinapsis_data_readers/templates/video_readers/video_reader_torchcodec.py +4 -1
  19. {sinapsis_data_readers-0.1.14.dist-info → sinapsis_data_readers-0.1.17.dist-info}/METADATA +1 -1
  20. {sinapsis_data_readers-0.1.14.dist-info → sinapsis_data_readers-0.1.17.dist-info}/RECORD +23 -24
  21. sinapsis_data_readers/helpers/image_color_space_converter.py +0 -53
  22. {sinapsis_data_readers-0.1.14.dist-info → sinapsis_data_readers-0.1.17.dist-info}/WHEEL +0 -0
  23. {sinapsis_data_readers-0.1.14.dist-info → sinapsis_data_readers-0.1.17.dist-info}/licenses/LICENSE +0 -0
  24. {sinapsis_data_readers-0.1.14.dist-info → sinapsis_data_readers-0.1.17.dist-info}/top_level.txt +0 -0
@@ -1,12 +1,9 @@
1
1
  # -*- coding: utf-8 -*-
2
- """ Excluded sktime loaders"""
3
2
  from typing import Callable
4
3
 
5
4
  from sktime import datasets
6
5
 
7
- class_datasets = [
8
- "Airline",
9
- "Longley",
6
+ class_datasets = ["Airline","Longley",
10
7
  "Lynx",
11
8
  "Macroeconomic",
12
9
  "ShampooSales",
@@ -62,7 +62,7 @@ class AudioReaderPydub(_AudioBaseReader):
62
62
  """
63
63
 
64
64
  sample_rate_khz: int = 16
65
- from_bytes: bool = True
65
+ from_bytes: bool = False
66
66
  audio_reader_format: Literal["wav", "raw", "pcm"] | None = None
67
67
 
68
68
  UIProperties = AudioReaderPydubUIProperties
@@ -84,7 +84,9 @@ class AudioReaderPydub(_AudioBaseReader):
84
84
  audio_segment = AudioSegment.from_file(io.BytesIO(audio_bytes))
85
85
 
86
86
  else:
87
- audio_file_path: str = cast(str, self.attributes.audio_file_path)
87
+
88
+ audio_file_path = self.get_full_path()
89
+ #audio_file_path = os.path.join(self.attributes.root_dir, audio_file_path)
88
90
  if os.path.exists(audio_file_path):
89
91
  audio_segment = AudioSegment.from_file(audio_file_path, format=self.attributes.audio_reader_format)
90
92
 
@@ -1,7 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
2
 
3
3
  import os
4
- from typing import cast
5
4
 
6
5
  import soundfile as sf
7
6
  from sinapsis_core.data_containers.data_packet import AudioPacket, DataContainer
@@ -60,7 +59,7 @@ class AudioReaderSoundfile(_AudioBaseReader):
60
59
  AudioPacket|None: An AudioPacket containing the audio data and
61
60
  sample rate, or None if the file could not be read or was invalid.
62
61
  """
63
- audio_path = cast(str, self.attributes.audio_file_path)
62
+ audio_path = self.get_full_path()
64
63
  if os.path.exists(audio_path):
65
64
  try:
66
65
  audio_content, sample_rate = sf.read(audio_path)
@@ -38,19 +38,19 @@ class AudioReaderToBytes(_AudioBaseReader):
38
38
  FileNotFoundError: If the specified audio file does not exist.
39
39
  IOError: If there is an error reading the audio file.
40
40
  """
41
-
41
+ full_path = self.get_full_path()
42
42
  try:
43
- with open(self.attributes.audio_file_path, "rb") as audio_file:
43
+ with open(full_path, "rb") as audio_file:
44
44
  audio_content = audio_file.read()
45
45
 
46
46
  audio_file.close()
47
47
  audio_packet = AudioPacket(
48
- source=self.attributes.audio_file_path,
48
+ source=full_path,
49
49
  content=audio_content,
50
50
  )
51
51
  return audio_packet
52
52
  except FileNotFoundError:
53
- self.logger.error(f"Audio file not found: {self.attributes.audio_file_path}")
53
+ self.logger.error(f"Audio file not found: {full_path}")
54
54
  return None
55
55
  except IOError as e:
56
56
  self.logger.error(f"Error reading audio file: {e}")
@@ -1,6 +1,8 @@
1
1
  # -*- coding: utf-8 -*-
2
2
 
3
3
  import abc
4
+ import os
5
+ from typing import cast
4
6
  from uuid import uuid4
5
7
 
6
8
  from sinapsis_core.data_containers.data_packet import AudioPacket, DataContainer
@@ -28,6 +30,8 @@ class _AudioBaseReader(_BaseDataReader):
28
30
  The source identifier, defaults to "streamlit".
29
31
  """
30
32
 
33
+
34
+ root_dir: str | None = None
31
35
  audio_file_path: str
32
36
  source: str = str(uuid4())
33
37
 
@@ -42,6 +46,10 @@ class _AudioBaseReader(_BaseDataReader):
42
46
  Returns:
43
47
  AudioPacket: The audio data wrapped in an AudioPacket.
44
48
  """
49
+ def get_full_path(self):
50
+ audio_file_path = cast(str, self.attributes.audio_file_path)
51
+ full_path = os.path.join(self.attributes.root_dir, audio_file_path)
52
+ return full_path
45
53
 
46
54
  def has_elements(self) -> bool:
47
55
  """Flag to indicate if there is still content to process"""
@@ -8,6 +8,7 @@ from sinapsis_core.template_base.base_models import (
8
8
  TemplateAttributes,
9
9
  TemplateAttributeType,
10
10
  )
11
+ from sinapsis_core.utils.env_var_keys import SINAPSIS_CACHE_DIR
11
12
 
12
13
 
13
14
  def base_documentation() -> str:
@@ -80,7 +81,7 @@ class _BaseDataReader(Template, abc.ABC):
80
81
  __doc__ = f"""
81
82
  {base_attributes_documentation()}
82
83
  """
83
-
84
+ root_dir : str | None = None
84
85
  data_dir: str
85
86
  pattern: str = "**/*"
86
87
  batch_size: int = 1
@@ -93,8 +94,10 @@ class _BaseDataReader(Template, abc.ABC):
93
94
  def __init__(self, attributes: TemplateAttributeType) -> None:
94
95
  super().__init__(attributes)
95
96
  self.counter = 0
97
+ self.attributes.root_dir = self.attributes.root_dir or SINAPSIS_CACHE_DIR
96
98
  self.data_collection = self.make_data_entries()
97
99
 
100
+
98
101
  @abc.abstractmethod
99
102
  def make_data_entries(self) -> list[Packet]:
100
103
  """
@@ -181,4 +184,5 @@ class _BaseDataReader(Template, abc.ABC):
181
184
  self.append_packets_to_container(container)
182
185
  else:
183
186
  self.logger.debug(f"{self.class_name} has no more data to load.")
187
+
184
188
  return container
@@ -1,26 +1,30 @@
1
1
  # -*- coding: utf-8 -*-
2
- from sinapsis_core.data_containers.data_packet import DataContainer, TextPacket, TimeSeriesPacket
2
+ import os
3
+
4
+ from sinapsis_core.data_containers.data_packet import DataContainer, TimeSeriesPacket
3
5
  from sinapsis_core.template_base.base_models import TemplateAttributes, TemplateAttributeType
4
6
  from sinapsis_core.template_base.template import Template
7
+ from sinapsis_core.utils.env_var_keys import SINAPSIS_CACHE_DIR
5
8
 
6
9
  from sinapsis_data_readers.helpers.csv_reader import read_file
7
10
 
8
11
 
9
12
  class CSVDatasetReader(Template):
10
13
  class AttributesBaseModel(TemplateAttributes):
14
+ root_dir : str | None = None
11
15
  path_to_csv: str
12
16
  store_as_time_series: bool = False
13
- store_as_text_packet: bool = True
14
17
 
15
18
  def __init__(self, attributes: TemplateAttributeType) -> None:
16
19
  super().__init__(attributes)
17
- self.csv_file = read_file(self.attributes.path_to_csv)
20
+ self.attributes.root_dir = self.attributes.root_dir or SINAPSIS_CACHE_DIR
21
+ self.csv_file = read_file(os.path.join(self.attributes.root_dir, self.attributes.path_to_csv))
18
22
 
19
23
  def execute(self, container: DataContainer) -> DataContainer:
20
24
  if self.attributes.store_as_time_series:
21
25
  packet = TimeSeriesPacket(content=self.csv_file)
22
26
  container.time_series.append(packet)
23
- if self.attributes.store_as_text_packet:
24
- packet = TextPacket(content=self.csv_file)
25
- container.texts.append(packet)
27
+ else:
28
+ self._set_generic_data(container, self.csv_file)
29
+
26
30
  return container
@@ -135,7 +135,7 @@ class SKLearnDatasets(BaseDynamicWrapperTemplate):
135
135
  @staticmethod
136
136
  def split_dataset(
137
137
  results: pd.DataFrame, feature_name_cols: list, target_name_cols: list, n_features: int, split_size: float
138
- ) -> TabularDatasetSplit:
138
+ ) -> dict:
139
139
  """Method to split the dataset into training and testing samples"""
140
140
  if feature_name_cols:
141
141
  X = results[feature_name_cols]
@@ -154,7 +154,7 @@ class SKLearnDatasets(BaseDynamicWrapperTemplate):
154
154
  y_test=pd.DataFrame(y_test),
155
155
  )
156
156
 
157
- return split_data
157
+ return split_data.model_dump()
158
158
 
159
159
  def execute(self, container: DataContainer) -> DataContainer:
160
160
  sklearn_dataset = self.wrapped_callable.__func__(**self.dataset_attributes.model_dump())
@@ -94,7 +94,7 @@ class SKTimeDatasets(BaseDynamicWrapperTemplate):
94
94
 
95
95
  def initialize_attributes(self):
96
96
  return getattr(self.attributes, self.wrapped_callable.__name__)
97
- def split_time_series_dataset(self, dataset: Any) -> TabularDatasetSplit:
97
+ def split_time_series_dataset(self, dataset: Any) -> dict:
98
98
  """Split a time series dataset into training and testing sets
99
99
 
100
100
  Args:
@@ -104,12 +104,13 @@ class SKTimeDatasets(BaseDynamicWrapperTemplate):
104
104
  TabularDatasetSplit: Object containing the split time series data
105
105
  """
106
106
  y_train, y_test = temporal_train_test_split(dataset, train_size=self.attributes.train_size)
107
- return TabularDatasetSplit(
107
+ split_data = TabularDatasetSplit(
108
108
  x_train=pd.DataFrame(index=y_train.index),
109
109
  x_test=pd.DataFrame(index=y_test.index),
110
110
  y_train=pd.DataFrame(y_train),
111
111
  y_test=pd.DataFrame(y_test),
112
112
  )
113
+ return split_data.model_dump()
113
114
 
114
115
  def split_classification_dataset(self, X: Any, y: Any) -> TabularDatasetSplit:
115
116
  """Split a classification dataset into training and testing sets
@@ -115,9 +115,10 @@ class ImageBaseDataReader(_BaseDataReader, abc.ABC):
115
115
  Returns:
116
116
  list[str| path]: the path as string or Path object
117
117
  """
118
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.data_dir)
118
119
  data_items = [
119
120
  img_path if not return_as_str else str(img_path.resolve())
120
- for img_path in Path(self.attributes.data_dir).glob(self.attributes.pattern)
121
+ for img_path in Path(full_path).glob(self.attributes.pattern)
121
122
  if img_path.suffix.lower() in SUPPORTED_IMAGE_TYPES
122
123
  ]
123
124
 
@@ -16,6 +16,7 @@ from sinapsis_core.data_containers.annotations import (
16
16
  )
17
17
  from sinapsis_core.data_containers.data_packet import ImagePacket
18
18
  from sinapsis_core.template_base.base_models import TemplateAttributeType
19
+ from sinapsis_core.utils.env_var_keys import SINAPSIS_CACHE_DIR
19
20
 
20
21
  from sinapsis_data_readers.helpers.coco_dataclasses import (
21
22
  CocoAnnotationsKeys,
@@ -54,7 +55,7 @@ class CocoImageDatasetBaseCV2(FolderImageDatasetCV2):
54
55
  annotations_path: str
55
56
 
56
57
  def __init__(self, attributes: TemplateAttributeType) -> None:
57
- self.annotations_file = os.path.join(attributes.get("data_dir"), attributes.get("annotations_path"))
58
+ self.annotations_file = os.path.join(attributes.get("root_dir", SINAPSIS_CACHE_DIR), attributes.get("data_dir"), attributes.get("annotations_path"))
58
59
  self.raw_annotations_dict: list[dict[str, dict[str, Any]]] = self.read_annotations_file(self.annotations_file)
59
60
  self.annotations = self.images_annotations()
60
61
  super().__init__(attributes)
@@ -1,6 +1,7 @@
1
1
  # -*- coding: utf-8 -*-
2
2
 
3
- from typing import cast
3
+
4
+ import os
4
5
 
5
6
  import numpy as np
6
7
  from sinapsis_core.data_containers.annotations import ImageAnnotations
@@ -10,6 +11,7 @@ from sinapsis_core.template_base.base_models import (
10
11
  TemplateAttributeType,
11
12
  UIPropertiesMetadata,
12
13
  )
14
+ from sinapsis_core.utils.env_var_keys import SINAPSIS_CACHE_DIR
13
15
 
14
16
  from sinapsis_data_readers.helpers.csv_reader import read_file
15
17
  from sinapsis_data_readers.helpers.tags import Tags
@@ -75,11 +77,11 @@ class CSVImageDataset(_BaseDataReader):
75
77
  """
76
78
  # Ensure 'data_dir' is available and convert it to string if it's not None
77
79
  data_dir = getattr(self.attributes, "data_dir", None)
78
-
80
+ root_dir = getattr(self.attributes, "root_dir", SINAPSIS_CACHE_DIR)
79
81
  if data_dir is None:
80
82
  raise ValueError("The 'data_dir' attribute cannot be None.")
81
-
82
- self.data_points = read_file(cast(str, data_dir))
83
+ full_path = os.path.join(root_dir, data_dir)
84
+ self.data_points = read_file(full_path)
83
85
  super().__init__(attributes)
84
86
 
85
87
  def read_packet_content(self, packet: ImagePacket) -> None:
@@ -21,7 +21,6 @@ def read_image_file(file_path: str | PosixPath | bytes) -> np.ndarray:
21
21
  np.ndarray: the image as a numpy array
22
22
  """
23
23
  np_image: np.ndarray
24
-
25
24
  if isinstance(file_path, bytes):
26
25
  image_arr = np.frombuffer(file_path, np.uint8)
27
26
  np_image = cv2.imdecode(image_arr, cv2.IMREAD_COLOR)
@@ -22,6 +22,7 @@ from sinapsis_core.template_base.base_models import (
22
22
  TemplateAttributeType,
23
23
  UIPropertiesMetadata,
24
24
  )
25
+ from sinapsis_core.utils.env_var_keys import SINAPSIS_CACHE_DIR
25
26
 
26
27
  from sinapsis_data_readers.helpers.file_path_helpers import parse_file_paths
27
28
  from sinapsis_data_readers.helpers.tags import Tags
@@ -40,7 +41,7 @@ class BaseVideoReaderAttributes(TemplateAttributes):
40
41
  device (Literal["cpu", "gpu"]): Device to be used for loading the video. Default is "cpu".
41
42
  loop_forever (bool): Whether to loop the video indefinitely. Default is False.
42
43
  """
43
-
44
+ root_dir: str | None = None
44
45
  video_file_path: str | list[str]
45
46
  batch_size: int = 1
46
47
  video_source: int | str | None = str(get_uuid())
@@ -59,6 +60,7 @@ class BaseVideoReader(Template):
59
60
 
60
61
  def __init__(self, attributes: TemplateAttributeType) -> None:
61
62
  super().__init__(attributes)
63
+ self.attributes.root_dir = self.attributes.root_dir or SINAPSIS_CACHE_DIR
62
64
  self.frame_count = 0
63
65
  self.video_reader: Any
64
66
  self.total_frames: int
@@ -1,6 +1,8 @@
1
1
  # -*- coding: utf-8 -*-
2
2
 
3
3
 
4
+ import os
5
+
4
6
  import cv2
5
7
  from sinapsis_core.data_containers.data_packet import ImagePacket
6
8
 
@@ -55,8 +57,8 @@ class VideoReaderCV2(BaseVideoReader):
55
57
  - If successful, returns (video_reader, num_frames),
56
58
  where video_reader is the OpenCV VideoCapture object.
57
59
  """
58
-
59
- video_reader = cv2.VideoCapture(self.attributes.video_file_path)
60
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.video_file_path)
61
+ video_reader = cv2.VideoCapture(full_path)
60
62
  num_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
61
63
 
62
64
  if not video_reader.isOpened():
@@ -1,6 +1,5 @@
1
1
  # -*- coding: utf-8 -*-
2
-
3
-
2
+ import os.path
4
3
  from typing import Literal, cast
5
4
 
6
5
  import nvidia.dali.fn as fn
@@ -111,13 +110,13 @@ class VideoReaderDali(BaseVideoReader):
111
110
  and the number of frames per epoch. If the pipeline cannot be created,
112
111
  it returns None and 0.
113
112
  """
114
-
113
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.video_file_path)
115
114
  try:
116
115
  pipe: Pipeline = video_pipe(
117
116
  batch_size=self.attributes.batch_size,
118
117
  num_threads=self.attributes.num_threads,
119
118
  device_id=0,
120
- filenames=self.attributes.video_file_path,
119
+ filenames=full_path,
121
120
  seed=12345,
122
121
  device=self.attributes.device,
123
122
  random_shuffle=self.attributes.random_shuffle,
@@ -154,6 +153,7 @@ class VideoReaderDali(BaseVideoReader):
154
153
  _ = template_name
155
154
  if self.attributes.device == "gpu":
156
155
  torch.cuda.empty_cache()
156
+ torch.cuda.ipc_collect()
157
157
  super().reset_state(template_name)
158
158
 
159
159
 
@@ -1,5 +1,5 @@
1
1
  # -*- coding: utf-8 -*-
2
-
2
+ import os.path
3
3
  import subprocess
4
4
 
5
5
  import ffmpeg
@@ -54,8 +54,9 @@ class VideoReaderFFMPEG(BaseVideoReader):
54
54
  Returns:
55
55
  tuple[int, int, int]: the values for height, width and frames as integers
56
56
  """
57
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.video_file_path)
57
58
  try:
58
- probe = ffmpeg.probe(self.attributes.video_file_path)
59
+ probe = ffmpeg.probe(full_path)
59
60
  except ffmpeg.Error as e:
60
61
  self.logger.warning("ffmpeg error: %s", str(e))
61
62
  return (0, 0, 0)
@@ -72,8 +73,9 @@ class VideoReaderFFMPEG(BaseVideoReader):
72
73
 
73
74
  def make_video_reader(self) -> tuple[subprocess.Popen, int] | NotSetType:
74
75
  """This method asynchronously runs a subprocess to stream the video frames"""
76
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.video_file_path)
75
77
  video_reader = (
76
- ffmpeg.input(self.attributes.video_file_path)
78
+ ffmpeg.input(full_path)
77
79
  .output(
78
80
  "pipe:",
79
81
  format="rawvideo",
@@ -1,4 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
+ import os.path
3
+
2
4
  import torch
3
5
  from sinapsis_core.data_containers.data_packet import ImagePacket
4
6
  from torchcodec.decoders import SimpleVideoDecoder
@@ -56,8 +58,9 @@ class VideoReaderTorchCodec(BaseVideoReader):
56
58
  Raises:
57
59
  ValueError: If there is an issue decoding the video file.
58
60
  """
61
+ full_path = os.path.join(self.attributes.root_dir, self.attributes.video_file_path)
59
62
  try:
60
- video_reader = SimpleVideoDecoder(self.attributes.video_file_path)
63
+ video_reader = SimpleVideoDecoder(full_path)
61
64
  except ValueError as e:
62
65
  self.logger.warning(f"Was unable to decode video file: {e}")
63
66
  return NotSet
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sinapsis-data-readers
3
- Version: 0.1.14
3
+ Version: 0.1.17
4
4
  Summary: Templates to read data in different formats
5
5
  Author-email: SinapsisAI <dev@sinapsis.tech>
6
6
  Project-URL: Homepage, https://sinapsis.tech
@@ -4,39 +4,38 @@ sinapsis_data_readers/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
4
4
  sinapsis_data_readers/helpers/coco_dataclasses.py,sha256=D5HVWQP95TdHpa9UnTYAClfaqsIYrODKDGbITCvYXsc,2290
5
5
  sinapsis_data_readers/helpers/csv_reader.py,sha256=f_fk2Wgukdh93Um1Q5qczUD27iC3A71vKbhWxXe6Fyk,558
6
6
  sinapsis_data_readers/helpers/file_path_helpers.py,sha256=ayuFe-AAEa4immcY19FcubAtKzZ3BtYkBus-QP5dADo,2205
7
- sinapsis_data_readers/helpers/image_color_space_converter.py,sha256=SABsol7jp6veA_T13MSr0fQFVrI-NJdXkNYyX4YL90E,2099
8
7
  sinapsis_data_readers/helpers/sklearn_dataset_subset.py,sha256=XpzdVTBr5OgG57oOz3W7eLpoj2vWlTKbX6NIH0OP3qc,792
9
- sinapsis_data_readers/helpers/sktime_datasets_subset.py,sha256=vu3CKY6QW6SwaDZxdVoaI-vvWdfNZD8XN0xiZsOi4cM,430
8
+ sinapsis_data_readers/helpers/sktime_datasets_subset.py,sha256=aMmsaPuHuWkEP5BCAzeP6vV--y0UehBZL_fRHlDJweA,397
10
9
  sinapsis_data_readers/helpers/tags.py,sha256=YeddHmiX9kq2wDUhQ6-elIDNxt3ojBs8oHQugzyjM3s,620
11
10
  sinapsis_data_readers/helpers/text_input_helpers.py,sha256=XKr9AZkK2Ro6s8qxjdrqmWdKFdJ-aql18-e7X90MAdo,576
12
11
  sinapsis_data_readers/templates/__init__.py,sha256=qAYD770roN5DAW-tN6e_j5Py_peDHVUmZaQDyWodUlI,3196
13
- sinapsis_data_readers/templates/base_file_data_loader.py,sha256=zWcEjIRGCGlsGz1VMK6OzheWaE9C0itACAz3FSc7OiE,6992
12
+ sinapsis_data_readers/templates/base_file_data_loader.py,sha256=DwMWHKlMr6eqrhm8-8q22-dZSW4N2IM7Tef7YsjVdKI,7176
14
13
  sinapsis_data_readers/templates/audio_readers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- sinapsis_data_readers/templates/audio_readers/audio_reader_pydub.py,sha256=RH2SbuGlWui6TT65BOiqKe5RXBLSZtL5L-FHl3ezNHk,6157
16
- sinapsis_data_readers/templates/audio_readers/audio_reader_soundfile.py,sha256=YjOxb0CpJPjnQUXBsNV34-xA5F19mbkGaX6mJ_TJwm4,5278
17
- sinapsis_data_readers/templates/audio_readers/audio_reader_to_bytes.py,sha256=37NfwnAbHrPjikd7_hsuKgkyO0rXfWBChaMXCTyvGlE,1800
18
- sinapsis_data_readers/templates/audio_readers/base_audio_reader.py,sha256=2Khu4FIxWiyDwdXE623lWXk05Z4tudIUiM1vmH7qJuE,2014
14
+ sinapsis_data_readers/templates/audio_readers/audio_reader_pydub.py,sha256=m3hURsCHVd29cpmlJti1SNC6mtlx9KDe2RqyEvLyw4M,6219
15
+ sinapsis_data_readers/templates/audio_readers/audio_reader_soundfile.py,sha256=b7kzHMthuBp_1MW7uYFrDtjB0vkpD5qrr36yA6Sd0Lo,5232
16
+ sinapsis_data_readers/templates/audio_readers/audio_reader_to_bytes.py,sha256=exbMkY17avmwKHnSuXVeUpMEZd3yzQub1BHJPY2WWlM,1774
17
+ sinapsis_data_readers/templates/audio_readers/base_audio_reader.py,sha256=-3biMclztIxDCwPaQq4kUkcY2BaP13saxWdIQdm-e-E,2284
19
18
  sinapsis_data_readers/templates/datasets_readers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- sinapsis_data_readers/templates/datasets_readers/csv_datasets.py,sha256=E5AAtflkj4wnK-mi3AEqBiq4UK6aAnWcupkYfh9ddjc,1101
19
+ sinapsis_data_readers/templates/datasets_readers/csv_datasets.py,sha256=jn2x8QXpDn-wLoXML24xcfJ29WHT2OhRuFNrU105lOs,1210
21
20
  sinapsis_data_readers/templates/datasets_readers/dataset_splitter.py,sha256=6FqN1x6V748Q_ESFfxfaCRJKJQY8cK-gwRHvzuYdVqI,8860
22
- sinapsis_data_readers/templates/datasets_readers/sklearn_datasets.py,sha256=yQqQFL_mfcRzdPX6Ebq1ZflvMVt5Fpq573_ZhOGWEx8,8325
23
- sinapsis_data_readers/templates/datasets_readers/sktime_datasets.py,sha256=lkH55Fhc4u6w0FlRW23Z41K_g3iTkxeXTAWrUy3Vb8s,9450
21
+ sinapsis_data_readers/templates/datasets_readers/sklearn_datasets.py,sha256=6okT60UR4rrQxhEEKhI9drRPLuFpWGYlD47MmDtMlYI,8323
22
+ sinapsis_data_readers/templates/datasets_readers/sktime_datasets.py,sha256=o5ywBcqiTtd9_La5LaxEMISsgzr-IgvYwhMbvDXSBws,9481
24
23
  sinapsis_data_readers/templates/image_readers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- sinapsis_data_readers/templates/image_readers/base_image_folder_data_loader.py,sha256=JpcgU9zgUDKsWgEpk6aAZSC1oLzfssgXdo-HStF00YY,4762
26
- sinapsis_data_readers/templates/image_readers/coco_dataset_reader.py,sha256=BAMcRJEGJjd1Rh2JhFNv20wPmD_ulYNORFaKh1zwEaA,14563
27
- sinapsis_data_readers/templates/image_readers/csv_dataset_reader.py,sha256=kCK3VfKCol7UeIiL3GNYKUpkCGcq727is7t_-ju2MbA,4798
28
- sinapsis_data_readers/templates/image_readers/image_folder_reader_cv2.py,sha256=N699V5yZstNgir28OWQ_l4zCEo4DpeprRHd-kKdOzSo,2338
24
+ sinapsis_data_readers/templates/image_readers/base_image_folder_data_loader.py,sha256=UM2SmBX7B2RE1OgwRtBn-GZdKmrgZILdN32itHIJKQI,4832
25
+ sinapsis_data_readers/templates/image_readers/coco_dataset_reader.py,sha256=cFVP50LeRSu9tppy4RYZbAmN775f6AuGUdDTzAHRf3k,14675
26
+ sinapsis_data_readers/templates/image_readers/csv_dataset_reader.py,sha256=k2eVDSLnX3dDkkq0ILgaB6X_sf0wgPyYbKTFX3rGBxo,4966
27
+ sinapsis_data_readers/templates/image_readers/image_folder_reader_cv2.py,sha256=XLoYT_5AZvqxaltek_MMUgA0gtJIzyKLL7YpUIvHaIU,2337
29
28
  sinapsis_data_readers/templates/image_readers/image_folder_reader_kornia.py,sha256=LxrveCcVjtM7d3-tely03sfmATfSXOm0Smzdm7EugZI,2466
30
29
  sinapsis_data_readers/templates/text_readers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
30
  sinapsis_data_readers/templates/text_readers/text_input.py,sha256=d3NXeErhNqY9dYjm7LGXq1C1hRxlBat7daC2FHc6Ltc,2147
32
31
  sinapsis_data_readers/templates/video_readers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- sinapsis_data_readers/templates/video_readers/base_video_reader.py,sha256=jErIl2I9Xr2XBgbx-zJxi6Xzh4L1kZ7waIu97oceUnI,14245
34
- sinapsis_data_readers/templates/video_readers/video_reader_cv2.py,sha256=3jsSyKtkM0fOBWkjCMzLr69lZ6wE9gnHKlbDHoH3ha8,3957
35
- sinapsis_data_readers/templates/video_readers/video_reader_dali.py,sha256=BXvczZPjfwuJRo7p6mAJ5NQ5LB8iruHvG6EfZ8r78bk,8696
36
- sinapsis_data_readers/templates/video_readers/video_reader_ffmpeg.py,sha256=SOrb6UR634M2qacFWuN2TKxm7roFR9-bABBWWAerTz8,4892
37
- sinapsis_data_readers/templates/video_readers/video_reader_torchcodec.py,sha256=4Rs7Zpra8pVxfrqfW0-Pyo8kUHav2nJ7O6tsjG8C1jA,3950
38
- sinapsis_data_readers-0.1.14.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
39
- sinapsis_data_readers-0.1.14.dist-info/METADATA,sha256=iXAVJKuGBiNY7ByNQoptzNC0H1FiaU_VgRVOIGPP3Y0,6520
40
- sinapsis_data_readers-0.1.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
41
- sinapsis_data_readers-0.1.14.dist-info/top_level.txt,sha256=3R3oDiABqDVBW2Fc-SpWXYHkRdYFGZjT_wo6Q0Uqnhw,41
42
- sinapsis_data_readers-0.1.14.dist-info/RECORD,,
32
+ sinapsis_data_readers/templates/video_readers/base_video_reader.py,sha256=QhW_VDzqA5YCDKdOxqzFFyEW8-LC1XAdeRGuQYWvMlw,14422
33
+ sinapsis_data_readers/templates/video_readers/video_reader_cv2.py,sha256=n3EYBYgKNC2zua7IHF6KCcIj41Mqpjp5sFRZLuPNeUs,4037
34
+ sinapsis_data_readers/templates/video_readers/video_reader_dali.py,sha256=0nmHGPLCC4DXolwLpYdL5vExms3jOGyY_54HGPUXWHM,8815
35
+ sinapsis_data_readers/templates/video_readers/video_reader_ffmpeg.py,sha256=uAnV02i9gy7p9mxTVcx20F6ily4JhBtJSDJ93Reyi4w,5046
36
+ sinapsis_data_readers/templates/video_readers/video_reader_torchcodec.py,sha256=wMCjRCaMknDYPOsspuf7NVEhy49h818lVq__AHTsFA8,4036
37
+ sinapsis_data_readers-0.1.17.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
38
+ sinapsis_data_readers-0.1.17.dist-info/METADATA,sha256=t0IqjDi-4w0jWeXY3Gz7xmfOmq_ycSuYxrVWFQ2_i_8,6520
39
+ sinapsis_data_readers-0.1.17.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
40
+ sinapsis_data_readers-0.1.17.dist-info/top_level.txt,sha256=3R3oDiABqDVBW2Fc-SpWXYHkRdYFGZjT_wo6Q0Uqnhw,41
41
+ sinapsis_data_readers-0.1.17.dist-info/RECORD,,
@@ -1,53 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from enum import Enum
3
-
4
- import cv2
5
- from sinapsis_core.data_containers.data_packet import ImageColor, ImagePacket
6
- from sinapsis_core.utils.logging_utils import sinapsis_logger
7
-
8
- color_mapping = {
9
- (ImageColor.RGB, ImageColor.BGR): cv2.COLOR_RGB2BGR,
10
- (ImageColor.RGB, ImageColor.GRAY): cv2.COLOR_RGB2GRAY,
11
- (ImageColor.RGB, ImageColor.RGBA): cv2.COLOR_RGB2RGBA,
12
- (ImageColor.BGR, ImageColor.RGB): cv2.COLOR_BGR2RGB,
13
- (ImageColor.BGR, ImageColor.GRAY): cv2.COLOR_BGR2GRAY,
14
- (ImageColor.BGR, ImageColor.RGBA): cv2.COLOR_BGR2RGBA,
15
- (ImageColor.GRAY, ImageColor.RGB): cv2.COLOR_GRAY2RGB,
16
- (ImageColor.GRAY, ImageColor.BGR): cv2.COLOR_GRAY2BGR,
17
- (ImageColor.GRAY, ImageColor.RGBA): cv2.COLOR_GRAY2RGBA,
18
- (ImageColor.RGBA, ImageColor.RGB): cv2.COLOR_RGBA2RGB,
19
- (ImageColor.RGBA, ImageColor.BGR): cv2.COLOR_RGBA2BGR,
20
- (ImageColor.RGBA, ImageColor.GRAY): cv2.COLOR_RGBA2GRAY,
21
- }
22
-
23
-
24
- def convert_color_space_cv(image: ImagePacket, desired_color_space: Enum) -> ImagePacket:
25
- """Converts an image from one color space to another, provided
26
- they are in the color mapping options.
27
-
28
- Args:
29
- image (ImagePacket): Image packet to apply the conversion
30
- desired_color_space (Enum): Color space to convert the image
31
-
32
- Returns:
33
- ImagePacket: Updated ImagePacket with content converted into the new color space
34
-
35
- Raises:
36
- ValueError: If the conversion is not possible, return an error.
37
-
38
- """
39
- current_color_space = image.color_space
40
-
41
- if (current_color_space, desired_color_space) in color_mapping:
42
- conversion_code = color_mapping[(current_color_space, desired_color_space)]
43
- try:
44
- image.content = cv2.cvtColor(image.content, conversion_code)
45
- image.color_space = desired_color_space
46
-
47
- except cv2.error:
48
- sinapsis_logger.error(f"Invalid conversion between {current_color_space} and {desired_color_space}")
49
-
50
- else:
51
- raise ValueError(f"Conversion from {current_color_space} to {desired_color_space} is not supported.")
52
-
53
- return image