sciveo 0.1.36__tar.gz → 0.1.38__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. {sciveo-0.1.36 → sciveo-0.1.38}/PKG-INFO +1 -1
  2. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/base.py +3 -3
  3. sciveo-0.1.38/sciveo/media/pipelines/processors/image/object_detection.py +81 -0
  4. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/images/embeddings.py +3 -1
  5. {sciveo-0.1.36/sciveo/media/pipelines/processors/image → sciveo-0.1.38/sciveo/ml/images}/object_detection.py +103 -48
  6. sciveo-0.1.36/sciveo/ml/images/transforms.py → sciveo-0.1.38/sciveo/ml/images/transformers.py +85 -29
  7. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/nlp/embeddings.py +3 -1
  8. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/logger.py +1 -1
  9. sciveo-0.1.38/sciveo/version.py +2 -0
  10. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/PKG-INFO +1 -1
  11. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/SOURCES.txt +1 -1
  12. sciveo-0.1.36/sciveo/ml/images/object_detection.py +0 -90
  13. sciveo-0.1.36/sciveo/version.py +0 -2
  14. {sciveo-0.1.36 → sciveo-0.1.38}/README.md +0 -0
  15. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/__init__.py +0 -0
  16. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/api/__init__.py +0 -0
  17. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/api/base.py +0 -0
  18. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/api/upload.py +0 -0
  19. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/cli.py +0 -0
  20. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/common/__init__.py +0 -0
  21. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/common/configuration.py +0 -0
  22. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/common/model.py +0 -0
  23. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/common/optimizers.py +0 -0
  24. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/common/sampling.py +0 -0
  25. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/content/__init__.py +0 -0
  26. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/content/dataset.py +0 -0
  27. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/content/experiment.py +0 -0
  28. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/content/project.py +0 -0
  29. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/content/runner.py +0 -0
  30. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/__init__.py +0 -0
  31. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/__init__.py +0 -0
  32. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/base.py +0 -0
  33. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/encoders/__init__.py +0 -0
  34. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/encoders/base.py +0 -0
  35. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/encoders/normalizer.py +0 -0
  36. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/nlp/__init__.py +0 -0
  37. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/nlp/search.py +0 -0
  38. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/time_series/__init__.py +0 -0
  39. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/time_series/dataset.py +0 -0
  40. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/time_series/predictor.py +0 -0
  41. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/time_series/trainer.py +0 -0
  42. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/ml/time_series/window_generator.py +0 -0
  43. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/__init__.py +0 -0
  44. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/job_daemon.py +0 -0
  45. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/layouts/__init__.py +0 -0
  46. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/layouts/base.py +0 -0
  47. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/pipeline.py +0 -0
  48. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/postprocessors/__init__.py +0 -0
  49. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/postprocessors/base.py +0 -0
  50. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/postprocessors/default.py +0 -0
  51. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/__init__.py +0 -0
  52. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/audio/__init__.py +0 -0
  53. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/audio/audio.py +0 -0
  54. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/audio/audio_extractor_process.py +0 -0
  55. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/aws.py +0 -0
  56. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/base.py +0 -0
  57. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/file/__init__.py +0 -0
  58. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/file/archive.py +0 -0
  59. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/__init__.py +0 -0
  60. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/album.py +0 -0
  61. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/album_in_image.py +0 -0
  62. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/depth_esimation.py +0 -0
  63. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/embeddings.py +0 -0
  64. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/filters.py +0 -0
  65. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/generators.py +0 -0
  66. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/histogram.py +0 -0
  67. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/mask.py +0 -0
  68. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/resize.py +0 -0
  69. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/segmentation.py +0 -0
  70. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/image/watermark.py +0 -0
  71. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/media_info.py +0 -0
  72. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/nlp/__init__.py +0 -0
  73. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/nlp/address.py +0 -0
  74. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/qr.py +0 -0
  75. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/__init__.py +0 -0
  76. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/base.py +0 -0
  77. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/dataset.py +0 -0
  78. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/time_series/__init__.py +0 -0
  79. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/time_series/predictor.py +0 -0
  80. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/sci/time_series/trainer.py +0 -0
  81. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/tpu_base.py +0 -0
  82. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/__init__.py +0 -0
  83. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/generators.py +0 -0
  84. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/motion_detection.py +0 -0
  85. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/resize.py +0 -0
  86. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/video_album.py +0 -0
  87. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/video_frames.py +0 -0
  88. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/processors/video/video_resample.py +0 -0
  89. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/queues.py +0 -0
  90. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/server.py +0 -0
  91. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/web/__init__.py +0 -0
  92. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/media/pipelines/web/server.py +0 -0
  93. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/__init__.py +0 -0
  94. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/base.py +0 -0
  95. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/evaluation/__init__.py +0 -0
  96. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/evaluation/object_detection.py +0 -0
  97. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/images/__init__.py +0 -0
  98. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/images/base.py +0 -0
  99. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/images/description.py +0 -0
  100. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/images/tools.py +0 -0
  101. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/nlp/__init__.py +0 -0
  102. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/video/__init__.py +0 -0
  103. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/ml/video/description.py +0 -0
  104. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/monitoring/__init__.py +0 -0
  105. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/monitoring/monitor.py +0 -0
  106. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/monitoring/start.py +0 -0
  107. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/network/__init__.py +0 -0
  108. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/network/camera.py +0 -0
  109. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/network/sniffer.py +0 -0
  110. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/network/tools.py +0 -0
  111. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/__init__.py +0 -0
  112. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/array.py +0 -0
  113. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/aws/__init__.py +0 -0
  114. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/aws/priority_queue.py +0 -0
  115. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/aws/s3.py +0 -0
  116. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/common.py +0 -0
  117. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/compress.py +0 -0
  118. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/configuration.py +0 -0
  119. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/crypto.py +0 -0
  120. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/daemon.py +0 -0
  121. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/formating.py +0 -0
  122. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/hardware.py +0 -0
  123. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/http.py +0 -0
  124. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/os.py +0 -0
  125. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/random.py +0 -0
  126. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/remote.py +0 -0
  127. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/simple_counter.py +0 -0
  128. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/synchronized.py +0 -0
  129. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo/tools/timers.py +0 -0
  130. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/dependency_links.txt +0 -0
  131. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/entry_points.txt +0 -0
  132. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/requires.txt +0 -0
  133. {sciveo-0.1.36 → sciveo-0.1.38}/sciveo.egg-info/top_level.txt +0 -0
  134. {sciveo-0.1.36 → sciveo-0.1.38}/setup.cfg +0 -0
  135. {sciveo-0.1.36 → sciveo-0.1.38}/setup.py +0 -0
  136. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_compress.py +0 -0
  137. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_configuration.py +0 -0
  138. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_crypto.py +0 -0
  139. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_monitoring.py +0 -0
  140. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_runner.py +0 -0
  141. {sciveo-0.1.36 → sciveo-0.1.38}/test/test_sampling.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sciveo
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Description-Content-Type: text/markdown
5
5
  Provides-Extra: mon
6
6
  Provides-Extra: net
@@ -35,7 +35,7 @@ class BaseContentProcessor:
35
35
  k = k.replace(".MOV", e)
36
36
  return k
37
37
 
38
- def next_content(self, media, tag=None, local_path=None, content_type=None, key=None, w=100, h=100, name=""):
38
+ def next_content(self, media, tag=None, local_path=None, content_type=None, key=None, w=100, h=100, name="", args={}):
39
39
  if content_type is None:
40
40
  content_type = media['content_type']
41
41
  if key is None:
@@ -53,7 +53,7 @@ class BaseContentProcessor:
53
53
 
54
54
  media.setdefault("next", [])
55
55
 
56
- media["next"].append({
56
+ media["next"].append(dict({
57
57
  "guid": guid,
58
58
  "content_type": content_type,
59
59
  "name": name,
@@ -67,7 +67,7 @@ class BaseContentProcessor:
67
67
  "processor": self.name(),
68
68
  "processors": media["processors"],
69
69
  "layout": {"name": self.name(), **self["layout"]}
70
- })
70
+ }, **args))
71
71
 
72
72
  def content_type(self):
73
73
  return "media"
@@ -0,0 +1,81 @@
1
+ #
2
+ # Pavlin Georgiev, Softel Labs
3
+ #
4
+ # This is a proprietary file and may not be copied,
5
+ # distributed, or modified without express permission
6
+ # from the owner. For licensing inquiries, please
7
+ # contact pavlin@softel.bg.
8
+ #
9
+ # 2024
10
+ #
11
+
12
+ import numpy as np
13
+ import cv2
14
+
15
+ from sciveo.tools.logger import *
16
+ from sciveo.tools.simple_counter import Timer
17
+ from sciveo.tools.common import *
18
+ from sciveo.ml.images.object_detection import ObjectDetectorYOLO
19
+ from sciveo.media.pipelines.processors.base import *
20
+
21
+
22
+ class ImageObjectDetectionProcessor(BaseProcessor):
23
+ def __init__(self, processor_config, max_progress) -> None:
24
+ super().__init__(processor_config, max_progress)
25
+ self.default.update({"JPEG_QUALITY": 80, "min_confidence": 0.5, "model_type": 0, "height": 720})
26
+
27
+ def init_run(self):
28
+ self.predictor = ObjectDetectorYOLO(model_path=[
29
+ "yolo11x.pt", "yolo11l.pt", "yolo11m.pt", "yolo11s.pt", "yolo11n.pt",
30
+ 'softel-surveillance-yolo11X.pt', 'softel-surveillance-yolo11L.pt', 'softel-surveillance-yolo11M.pt',
31
+ 'softel-surveillance-yolo11S.pt', 'softel-surveillance-yolo11N.pt',
32
+ ][self["model_type"]])
33
+
34
+ def process(self, media):
35
+ try:
36
+ self.media = media
37
+ self.local_path = media["local_path"]
38
+
39
+ tag = "object-detections"
40
+ image = self.predictor.load(self.local_path)
41
+
42
+ image_resized = self.predictor.resize(image)
43
+ detections = self.predictor.predict_one([image_resized], confidence_threshold=self["min_confidence"])
44
+
45
+ image_resized = self.predictor.resize(image, h=self["height"])
46
+ image_resized = cv2.cvtColor(image_resized, cv2.COLOR_RGB2BGR)
47
+ self.predictor.draw(image_resized, detections[0])
48
+ result_image_local_path = self.add_suffix_to_filename(self.local_path, tag)
49
+ cv2.imwrite(result_image_local_path, image_resized, [cv2.IMWRITE_JPEG_QUALITY, self["JPEG_QUALITY"]])
50
+
51
+ self.next_content(self.media, tag, result_image_local_path, w=image_resized.shape[1], h=image_resized.shape[0])
52
+ self.next_comment(detections[0])
53
+ except Exception as e:
54
+ exception(e, self.media)
55
+ return self.media
56
+
57
+ def next_comment(self, detections):
58
+ boxes = detections.boxes
59
+ class_names = detections.names
60
+ detections_json = {}
61
+ for i, box in enumerate(boxes):
62
+ confidence = box.conf[0].item()
63
+ class_id = int(box.cls[0].item())
64
+ label = class_names[class_id]
65
+ detections_json.setdefault(label, 0)
66
+ detections_json[label] += 1
67
+
68
+ detections_str = ""
69
+ for k, v in detections_json.items():
70
+ detections_str += f"{v} {k}\n"
71
+
72
+ self.next_content(self.media, tag="OD", content_type="comment", name=f"OD {self.predictor.model_name}", args={
73
+ "description": detections_str,
74
+ "content_text": f"{detections_str}\n\n{str(detections_json)}"
75
+ })
76
+
77
+ def content_type(self):
78
+ return "image"
79
+
80
+ def name(self):
81
+ return "image-object-detection"
@@ -95,6 +95,8 @@ class ImageEmbedding(BaseImageML):
95
95
  self.init()
96
96
  predictions = []
97
97
  for current_x in X:
98
- embedding = self.predict_one(current_x).tolist()
98
+ embedding = self.predict_one(current_x)
99
+ if not isinstance(embedding, list):
100
+ embedding = embedding.tolist()
99
101
  predictions.append(embedding)
100
102
  return predictions
@@ -9,19 +9,103 @@
9
9
  # 2024
10
10
  #
11
11
 
12
+ import os
13
+ import boto3
14
+ import math
12
15
  import numpy as np
13
16
  import cv2
14
17
 
18
+ from sciveo.ml.images.tools import *
19
+ from sciveo.ml.base import BaseML
15
20
  from sciveo.tools.logger import *
16
- from sciveo.tools.simple_counter import Timer
17
- from sciveo.tools.common import *
18
- from sciveo.media.pipelines.processors.base import *
19
21
 
20
22
 
21
- class ObjectDetectorBase:
22
- def __init__(self, model_path, device='cpu', colors=None):
23
+ """
24
+ Object Detection Bounding Boxes (bbox) of type [x, y, w, h]
25
+
26
+ If need to use [x1, y1, x2, y2] need to use the inverted convertor bbox_convert_inverted().
27
+
28
+ IoU between 2 object detections: iou(bbox1, bbox2)
29
+
30
+ """
31
+
32
+ # convert from [x, y, w, h] -> [x1, y1, x2, y2]
33
+ def bbox_convert(bbox):
34
+ x1 = bbox[0]
35
+ y1 = bbox[1]
36
+ x2 = x1 + bbox[2]
37
+ y2 = y1 + bbox[3]
38
+ return [x1, y1, x2, y2]
39
+
40
+ # convert from [x1, y1, x2, y2] -> [x, y, w, h]
41
+ def bbox_convert_inverted(bbox):
42
+ x = bbox[0]
43
+ y = bbox[1]
44
+ w = bbox[2] - x
45
+ h = bbox[3] - y
46
+ return [x, y, w, h]
47
+
48
+ def bbox_norm(bbox, w, h):
49
+ return (bbox[0] / w, bbox[1] / h, bbox[2] / w, bbox[3] / h)
50
+
51
+ def bbox_denorm(bbox, w, h):
52
+ return (int(bbox[0] * w), int(bbox[1] * h), int(bbox[2] * w), int(bbox[3] * h))
53
+
54
+ def bbox_center(bbox):
55
+ return (int(bbox[0] + bbox[2] / 2), int(bbox[1] + bbox[3] / 2))
56
+
57
+ def bbox_area(bbox):
58
+ return bbox[2] * bbox[3]
59
+
60
+ def iou(bbox1, bbox2):
61
+ x1 = max(bbox1[0], bbox2[0])
62
+ y1 = max(bbox1[1], bbox2[1])
63
+ x2 = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
64
+ y2 = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
65
+
66
+ if x1 < x2 and y1 < y2:
67
+ a = (x2 - x1) * (y2 - y1)
68
+ else:
69
+ a = 0
70
+
71
+ a1 = bbox_area(bbox1)
72
+ a2 = bbox_area(bbox2)
73
+ return a / (a1 + a2 - a)
74
+
75
+ def bbox_distance(bbox1, bbox2):
76
+ return points_distance(bbox_center(bbox1), bbox_center(bbox2))
77
+
78
+
79
+ """
80
+
81
+ Simple Draw object detectios helpers
82
+
83
+ """
84
+ def image_shape(image):
85
+ return image.shape[1], image.shape[0]
86
+
87
+ # Draw label bounding boxes of type [x, y, w, h], if [x1, y1, x2, y2] then set convert=False
88
+ def draw_label_bboxes(image, bboxes, color, convert=True):
89
+ w, h = image_shape(image)
90
+ for bbox in bboxes:
91
+ if convert:
92
+ bbox = bbox_convert(bbox)
93
+ bbox = bbox_denorm(bbox, w, h)
94
+ cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
95
+ return image
96
+
97
+
98
+ """
99
+
100
+ Object Detectors
101
+
102
+ """
103
+
104
+ class ObjectDetectorBase(BaseML):
105
+ def __init__(self, model_path, cache_dir=None, device='cpu', colors=None):
106
+ super().__init__(model_path, cache_dir=cache_dir, device=device)
23
107
  self.model_path = model_path
24
- self.device = device
108
+
25
109
  if colors is None:
26
110
  self.colors = [
27
111
  (60, 180, 75), # Green
@@ -99,9 +183,18 @@ class ObjectDetectorBase:
99
183
 
100
184
 
101
185
  class ObjectDetectorYOLO(ObjectDetectorBase):
102
- def __init__(self, model_path="yolo11m.pt", device='cpu', colors=None):
103
- super().__init__(model_path, device=device, colors=colors)
186
+ def __init__(self, model_path="yolo11m.pt", cache_dir=None, device='cpu', colors=None):
187
+ super().__init__(model_path, cache_dir=cache_dir, device=device, colors=colors)
104
188
  from ultralytics import YOLO
189
+ if self.model_name.startswith("softel"):
190
+ self.model_path = os.path.join(self.cache_dir, self.model_name.replace("/", "---"))
191
+ if os.path.isfile(self.model_path):
192
+ debug(self.model_name, "available", self.model_path)
193
+ else:
194
+ debug("DWN", self.model_name)
195
+ s3 = boto3.client('s3')
196
+ s3.download_file("sciveo-model", self.model_name, self.model_path)
197
+
105
198
  self.model = YOLO(self.model_path)
106
199
 
107
200
  def predict_one(self, x, confidence_threshold=0.5):
@@ -127,8 +220,8 @@ class ObjectDetectorYOLO(ObjectDetectorBase):
127
220
 
128
221
  return predictions
129
222
 
130
- def resize(self, image):
131
- return super().resize(image, 640)
223
+ def resize(self, image, h=640):
224
+ return super().resize(image, h)
132
225
 
133
226
  def draw(self, image, detections, colors=None):
134
227
  if colors is None:
@@ -146,41 +239,3 @@ class ObjectDetectorYOLO(ObjectDetectorBase):
146
239
 
147
240
  color = colors[i % len(colors)]
148
241
  self.draw_object_rectangle_xyxy(image, bbox, label_text, color)
149
-
150
-
151
- class ImageObjectDetectionProcessor(BaseProcessor):
152
- def __init__(self, processor_config, max_progress) -> None:
153
- super().__init__(processor_config, max_progress)
154
- self.default.update({"JPEG_QUALITY": 80, "min_confidence": 0.5, "model_type": 0, "height": 720})
155
-
156
- def init_run(self):
157
- TPU = os.environ.get("MEDIA_PROCESSING_BACKEND", "cpu")
158
- self.predictor = ObjectDetectorYOLO(model_path=["yolo11x.pt", "yolo11l.pt", "yolo11m.pt", "yolo11s.pt"][self["model_type"]], device=TPU)
159
-
160
- def process(self, media):
161
- try:
162
- self.media = media
163
- self.local_path = media["local_path"]
164
-
165
- tag = "object-detections"
166
- image = self.predictor.load(self.local_path)
167
- image_resized = self.predictor.resize(image)
168
-
169
- detections = self.predictor.predict_one([image_resized], confidence_threshold=self["min_confidence"])
170
-
171
- # image_resized = self.predictor.super().resize(image, h=self["height"])
172
- image_resized = cv2.cvtColor(image_resized, cv2.COLOR_RGB2BGR)
173
- self.predictor.draw(image_resized, detections[0])
174
- result_image_local_path = self.add_suffix_to_filename(self.local_path, tag)
175
- cv2.imwrite(result_image_local_path, image_resized, [cv2.IMWRITE_JPEG_QUALITY, self["JPEG_QUALITY"]])
176
-
177
- self.next_content(self.media, tag, result_image_local_path, w=image_resized.shape[1], h=image_resized.shape[0])
178
- except Exception as e:
179
- exception(e, self.media)
180
- return self.media
181
-
182
- def content_type(self):
183
- return "image"
184
-
185
- def name(self):
186
- return "image-object-detection"
@@ -17,11 +17,11 @@ class BaseImageTransformer:
17
17
  def __init__(self, param=0):
18
18
  self.param = param
19
19
 
20
- def result(self, image, labels):
21
- if labels is None:
22
- return image
23
- else:
24
- return image, labels
20
+ def __call__(self, image, labels=None):
21
+ return image, labels
22
+
23
+ def transform(self, image, labels=None):
24
+ return self(image, labels)
25
25
 
26
26
 
27
27
  class RandomTransformer(BaseImageTransformer):
@@ -38,7 +38,7 @@ class RandomTransformer(BaseImageTransformer):
38
38
  self.transformer.param = np.random.uniform(self.lower, self.upper)
39
39
  return self.transformer(image, labels)
40
40
  else:
41
- return self.result(image, labels)
41
+ return image, labels
42
42
 
43
43
 
44
44
  class ConvertColor(BaseImageTransformer):
@@ -61,20 +61,19 @@ class ConvertColor(BaseImageTransformer):
61
61
  image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY)
62
62
  if self.keep_3ch:
63
63
  image = np.stack([image] * 3, axis=-1)
64
- return self.result(image, labels)
64
+ return image, labels
65
65
 
66
66
 
67
67
  class ConvertDataType(BaseImageTransformer):
68
- def __init__(self, to='uint8'):
69
- if not (to == 'uint8' or to == 'float32'): raise ValueError("uint8 or float32 only")
68
+ def __init__(self, to=np.uint8):
70
69
  self.to = to
71
70
 
72
71
  def __call__(self, image, labels=None):
73
- if self.to == 'uint8':
74
- image = np.round(image, decimals=0).astype(np.uint8)
72
+ if self.to == np.uint8:
73
+ image = np.round(image, decimals=0).astype(self.to)
75
74
  else:
76
75
  image = image.astype(np.float32)
77
- return self.result(image, labels)
76
+ return image, labels
78
77
 
79
78
 
80
79
  class ConvertTo3Channels(BaseImageTransformer):
@@ -89,27 +88,29 @@ class ConvertTo3Channels(BaseImageTransformer):
89
88
  image = np.concatenate([image] * 3, axis=-1)
90
89
  elif image.shape[2] == 4:
91
90
  image = image[:,:,:3]
92
- return self.result(image, labels)
91
+ return image, labels
93
92
 
94
93
 
95
94
  class Hue(BaseImageTransformer):
96
95
  def __init__(self, delta):
97
- if not (-180 <= delta <= 180): raise ValueError("delta shoulbe in [-180, 180]")
96
+ if not (-180 <= delta <= 180):
97
+ delta = 0
98
98
  super().__init__(delta)
99
99
 
100
100
  def __call__(self, image, labels=None):
101
101
  image[:, :, 0] = (image[:, :, 0] + self.param) % 180.0
102
- return self.result(image, labels)
102
+ return image, labels
103
103
 
104
104
 
105
105
  class Saturation(BaseImageTransformer):
106
106
  def __init__(self, factor):
107
- if factor <= 0.0: raise ValueError("It must be `factor > 0`.")
107
+ if factor <= 0.0:
108
+ factor = 1e-10
108
109
  super().__init__(factor)
109
110
 
110
111
  def __call__(self, image, labels=None):
111
112
  image[:,:,1] = np.clip(image[:,:,1] * self.param, 0, 255)
112
- return self.result(image, labels)
113
+ return image, labels
113
114
 
114
115
 
115
116
  class Brightness(BaseImageTransformer):
@@ -118,7 +119,7 @@ class Brightness(BaseImageTransformer):
118
119
 
119
120
  def __call__(self, image, labels=None):
120
121
  image = np.clip(image + self.param, 0, 255)
121
- return self.result(image, labels)
122
+ return image, labels
122
123
 
123
124
 
124
125
  class Contrast(BaseImageTransformer):
@@ -128,7 +129,7 @@ class Contrast(BaseImageTransformer):
128
129
 
129
130
  def __call__(self, image, labels=None):
130
131
  image = np.clip(127.5 + self.param * (image - 127.5), 0, 255)
131
- return self.result(image, labels)
132
+ return image, labels
132
133
 
133
134
 
134
135
  class Gamma(BaseImageTransformer):
@@ -140,7 +141,7 @@ class Gamma(BaseImageTransformer):
140
141
 
141
142
  def __call__(self, image, labels=None):
142
143
  image = cv2.LUT(image, self.lut)
143
- return self.result(image, labels)
144
+ return image, labels
144
145
 
145
146
 
146
147
  class RandomGamma(BaseImageTransformer):
@@ -157,7 +158,7 @@ class RandomGamma(BaseImageTransformer):
157
158
  change_gamma = Gamma(gamma=gamma)
158
159
  return change_gamma(image, labels)
159
160
  else:
160
- return self.result(image, labels)
161
+ return image, labels
161
162
 
162
163
 
163
164
  class HistogramEqualization(BaseImageTransformer):
@@ -166,7 +167,7 @@ class HistogramEqualization(BaseImageTransformer):
166
167
 
167
168
  def __call__(self, image, labels=None):
168
169
  image[:,:,2] = cv2.equalizeHist(image[:,:,2])
169
- return self.result(image, labels)
170
+ return image, labels
170
171
 
171
172
 
172
173
  class RandomHistogramEqualization(BaseImageTransformer):
@@ -179,7 +180,7 @@ class RandomHistogramEqualization(BaseImageTransformer):
179
180
  if p >= (1.0-self.prob):
180
181
  return self.equalize(image, labels)
181
182
  else:
182
- return self.result(image, labels)
183
+ return image, labels
183
184
 
184
185
 
185
186
  class ChannelSwap(BaseImageTransformer):
@@ -188,7 +189,7 @@ class ChannelSwap(BaseImageTransformer):
188
189
 
189
190
  def __call__(self, image, labels=None):
190
191
  image = image[:,:,self.order]
191
- return self.result(image, labels)
192
+ return image, labels
192
193
 
193
194
 
194
195
  class RandomChannelSwap(BaseImageTransformer):
@@ -204,7 +205,7 @@ class RandomChannelSwap(BaseImageTransformer):
204
205
  self.swap_channels.order = self.permutations[i]
205
206
  return self.swap_channels(image, labels)
206
207
  else:
207
- return self.result(image, labels)
208
+ return image, labels
208
209
 
209
210
 
210
211
  class BlackWhite(BaseImageTransformer):
@@ -213,7 +214,7 @@ class BlackWhite(BaseImageTransformer):
213
214
 
214
215
  def __call__(self, image, labels=None):
215
216
  image = np.transpose(np.tile(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (3, 1, 1)), [1, 2, 0])
216
- return self.result(image, labels)
217
+ return image, labels
217
218
 
218
219
 
219
220
  class GaussianNoise(BaseImageTransformer):
@@ -235,7 +236,7 @@ class GaussianNoise(BaseImageTransformer):
235
236
  image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, dtype=-1)
236
237
  image = image.astype(np.uint8)
237
238
 
238
- return self.result(image, labels)
239
+ return image, labels
239
240
 
240
241
 
241
242
  class Pixelisator(BaseImageTransformer):
@@ -248,7 +249,7 @@ class Pixelisator(BaseImageTransformer):
248
249
  k = 1.0 / self.param
249
250
  image = cv2.resize(image, (int(w * k), int(h * k)))
250
251
  image = cv2.resize(image, (w, h))
251
- return self.result(image, labels)
252
+ return image, labels
252
253
 
253
254
 
254
255
  class Blur(BaseImageTransformer):
@@ -257,4 +258,59 @@ class Blur(BaseImageTransformer):
257
258
 
258
259
  def __call__(self, image, labels=None):
259
260
  image = cv2.blur(image, (int(self.param), int(self.param)))
260
- return self.result(image, labels)
261
+ return image, labels
262
+
263
+
264
+ class PhotometricTransformer:
265
+ def __init__(self):
266
+ self.sequences = [
267
+ [
268
+ ConvertTo3Channels(),
269
+ RandomTransformer(Brightness, lower=-32, upper=32, proba=0.5),
270
+ ConvertDataType(to=np.float32),
271
+ RandomTransformer(Contrast, lower=0.5, upper=1.5, proba=0.5),
272
+ ConvertDataType(to=np.uint8),
273
+ RandomTransformer(BlackWhite, lower=0, upper=0, proba=0.2),
274
+ RandomTransformer(GaussianNoise, lower=32, upper=256, proba=0.2),
275
+ RandomTransformer(Pixelisator, lower=1.0, upper=3.0, proba=0.2),
276
+ RandomTransformer(Blur, lower=2.0, upper=10.0, proba=0.2),
277
+ ConvertColor(current='RGB', to='HSV'),
278
+ ConvertDataType(to=np.float32),
279
+ RandomTransformer(Saturation, lower=0.5, upper=1.5, proba=0.5),
280
+ RandomTransformer(Hue, lower=-18, upper=18, proba=0.5),
281
+ ConvertDataType(to=np.uint8),
282
+ ConvertColor(current='HSV', to='RGB'),
283
+ RandomChannelSwap(prob=0.0)
284
+ ],
285
+ [
286
+ ConvertTo3Channels(),
287
+ ConvertDataType(to=np.uint8),
288
+ RandomTransformer(Brightness, lower=-64, upper=64, proba=0.5),
289
+ RandomTransformer(BlackWhite, lower=0, upper=0, proba=0.4),
290
+ RandomTransformer(GaussianNoise, lower=32, upper=256, proba=0.4),
291
+ RandomTransformer(Pixelisator, lower=1.0, upper=4.0, proba=0.2),
292
+ RandomTransformer(Blur, lower=2.0, upper=20.0, proba=0.2),
293
+ ConvertColor(current='RGB', to='HSV'),
294
+ ConvertDataType(to=np.float32),
295
+ RandomTransformer(Saturation, lower=0.5, upper=2.5, proba=0.5),
296
+ RandomTransformer(Hue, lower=-64, upper=64, proba=0.5),
297
+ ConvertDataType(to=np.uint8),
298
+ ConvertColor(current='HSV', to='RGB'),
299
+ ConvertDataType(to=np.float32),
300
+ RandomTransformer(Contrast, lower=0.5, upper=2.5, proba=0.5),
301
+ ConvertDataType(to=np.uint8),
302
+ RandomChannelSwap(prob=0.0)
303
+ ]
304
+ ]
305
+
306
+ def load_image(self, image_path):
307
+ return cv2.imread(image_path)
308
+
309
+ def __call__(self, image, labels=None):
310
+ if isinstance(image, str):
311
+ image = self.load_image(image)
312
+
313
+ idx = np.random.choice(len(self.sequences))
314
+ for transformer in self.sequences[idx]:
315
+ image, labels = transformer(image, labels)
316
+ return image, labels
@@ -123,6 +123,8 @@ class TextEmbedding(BaseML):
123
123
  self.init()
124
124
  predictions = []
125
125
  for current_x in X:
126
- embedding = self.predict_one(current_x).tolist()
126
+ embedding = self.predict_one(current_x)
127
+ if not isinstance(embedding, list):
128
+ embedding = embedding.tolist()
127
129
  predictions.append(embedding)
128
130
  return predictions
@@ -31,7 +31,7 @@ def _sciveo_get_logger(name):
31
31
  log_min_level = logging.getLevelName(_sciveo_log_min_level)
32
32
  if (isinstance(log_min_level, str) and log_min_level.startswith("Level")):
33
33
  log_min_level = "DEBUG"
34
- if isinstance(log_min_level, int) and log_min_level < 10:
34
+ elif isinstance(log_min_level, int) and log_min_level < 10:
35
35
  log_min_level = "DEBUG"
36
36
  logger.setLevel(log_min_level)
37
37
 
@@ -0,0 +1,2 @@
1
+
2
+ __version__ = '0.1.38'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sciveo
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Description-Content-Type: text/markdown
5
5
  Provides-Extra: mon
6
6
  Provides-Extra: net
@@ -97,7 +97,7 @@ sciveo/ml/images/description.py
97
97
  sciveo/ml/images/embeddings.py
98
98
  sciveo/ml/images/object_detection.py
99
99
  sciveo/ml/images/tools.py
100
- sciveo/ml/images/transforms.py
100
+ sciveo/ml/images/transformers.py
101
101
  sciveo/ml/nlp/__init__.py
102
102
  sciveo/ml/nlp/embeddings.py
103
103
  sciveo/ml/video/__init__.py
@@ -1,90 +0,0 @@
1
- #
2
- # Pavlin Georgiev, Softel Labs
3
- #
4
- # This is a proprietary file and may not be copied,
5
- # distributed, or modified without express permission
6
- # from the owner. For licensing inquiries, please
7
- # contact pavlin@softel.bg.
8
- #
9
- # 2024
10
- #
11
-
12
- import math
13
- import numpy as np
14
-
15
- from sciveo.ml.images.tools import *
16
-
17
-
18
- """
19
- Object Detection Bounding Boxes (bbox) of type [x, y, w, h]
20
-
21
- If need to use [x1, y1, x2, y2] need to use the inverted convertor bbox_convert_inverted().
22
-
23
- IoU between 2 object detections: iou(bbox1, bbox2)
24
-
25
- """
26
-
27
- # convert from [x, y, w, h] -> [x1, y1, x2, y2]
28
- def bbox_convert(bbox):
29
- x1 = bbox[0]
30
- y1 = bbox[1]
31
- x2 = x1 + bbox[2]
32
- y2 = y1 + bbox[3]
33
- return [x1, y1, x2, y2]
34
-
35
- # convert from [x1, y1, x2, y2] -> [x, y, w, h]
36
- def bbox_convert_inverted(bbox):
37
- x = bbox[0]
38
- y = bbox[1]
39
- w = bbox[2] - x
40
- h = bbox[3] - y
41
- return [x, y, w, h]
42
-
43
- def bbox_norm(bbox, w, h):
44
- return (bbox[0] / w, bbox[1] / h, bbox[2] / w, bbox[3] / h)
45
-
46
- def bbox_denorm(bbox, w, h):
47
- return (int(bbox[0] * w), int(bbox[1] * h), int(bbox[2] * w), int(bbox[3] * h))
48
-
49
- def bbox_center(bbox):
50
- return (int(bbox[0] + bbox[2] / 2), int(bbox[1] + bbox[3] / 2))
51
-
52
- def bbox_area(bbox):
53
- return bbox[2] * bbox[3]
54
-
55
- def iou(bbox1, bbox2):
56
- x1 = max(bbox1[0], bbox2[0])
57
- y1 = max(bbox1[1], bbox2[1])
58
- x2 = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
59
- y2 = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
60
-
61
- if x1 < x2 and y1 < y2:
62
- a = (x2 - x1) * (y2 - y1)
63
- else:
64
- a = 0
65
-
66
- a1 = bbox_area(bbox1)
67
- a2 = bbox_area(bbox2)
68
- return a / (a1 + a2 - a)
69
-
70
- def bbox_distance(bbox1, bbox2):
71
- return points_distance(bbox_center(bbox1), bbox_center(bbox2))
72
-
73
-
74
- """
75
-
76
- Simple Draw object detectios helpers
77
-
78
- """
79
- def image_shape(image):
80
- return image.shape[1], image.shape[0]
81
-
82
- # Draw label bounding boxes of type [x, y, w, h], if [x1, y1, x2, y2] then set convert=False
83
- def draw_label_bboxes(image, bboxes, color, convert=True):
84
- w, h = image_shape(image)
85
- for bbox in bboxes:
86
- if convert:
87
- bbox = bbox_convert(bbox)
88
- bbox = bbox_denorm(bbox, w, h)
89
- cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
90
- return image
@@ -1,2 +0,0 @@
1
-
2
- __version__ = '0.1.36'
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes