sciveo 0.1.26__tar.gz → 0.1.28__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sciveo-0.1.26 → sciveo-0.1.28}/PKG-INFO +1 -1
- {sciveo-0.1.26/sciveo/media/ml → sciveo-0.1.28/sciveo}/__init__.py +2 -2
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/cli.py +15 -4
- sciveo-0.1.28/sciveo/media/ml/nlp/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/ml/time_series/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/layouts/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/postprocessors/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/audio/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/audio/audio.py +4 -4
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/audio/audio_extractor_process.py +1 -16
- sciveo-0.1.28/sciveo/media/pipelines/processors/file/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/image/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/depth_esimation.py +1 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/nlp/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/sci/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/sci/time_series/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/processors/video/__init__.py +0 -0
- sciveo-0.1.28/sciveo/media/pipelines/web/__init__.py +0 -0
- sciveo-0.1.28/sciveo/ml/__init__.py +0 -0
- sciveo-0.1.28/sciveo/ml/evaluation/__init__.py +0 -0
- sciveo-0.1.28/sciveo/ml/evaluation/object_detection.py +33 -0
- sciveo-0.1.28/sciveo/ml/images/__init__.py +0 -0
- sciveo-0.1.28/sciveo/ml/images/object_detection.py +90 -0
- sciveo-0.1.28/sciveo/ml/images/tools.py +83 -0
- sciveo-0.1.28/sciveo/ml/images/transforms.py +260 -0
- sciveo-0.1.28/sciveo/ml/nlp/__init__.py +0 -0
- sciveo-0.1.28/sciveo/monitoring/__init__.py +0 -0
- sciveo-0.1.28/sciveo/network/__init__.py +0 -0
- sciveo-0.1.28/sciveo/tools/__init__.py +0 -0
- sciveo-0.1.28/sciveo/tools/aws/__init__.py +0 -0
- sciveo-0.1.28/sciveo/version.py +2 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo.egg-info/PKG-INFO +1 -1
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo.egg-info/SOURCES.txt +8 -0
- sciveo-0.1.28/sciveo.egg-info/requires.txt +79 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/setup.py +6 -4
- sciveo-0.1.26/sciveo/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/ml/encoders/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/ml/nlp/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/ml/time_series/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/layouts/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/postprocessors/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/audio/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/file/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/image/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/nlp/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/sci/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/sci/time_series/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/processors/video/__init__.py +0 -56
- sciveo-0.1.26/sciveo/media/pipelines/web/__init__.py +0 -56
- sciveo-0.1.26/sciveo/tools/aws/__init__.py +0 -56
- sciveo-0.1.26/sciveo/version.py +0 -2
- sciveo-0.1.26/sciveo.egg-info/requires.txt +0 -79
- {sciveo-0.1.26 → sciveo-0.1.28}/README.md +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/api/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/api/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/api/upload.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/common/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/common/configuration.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/common/model.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/common/optimizers.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/common/sampling.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/content/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/content/dataset.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/content/experiment.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/content/project.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/content/runner.py +0 -0
- {sciveo-0.1.26/sciveo/monitoring → sciveo-0.1.28/sciveo/media}/__init__.py +0 -0
- {sciveo-0.1.26/sciveo/network → sciveo-0.1.28/sciveo/media/ml}/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/base.py +0 -0
- {sciveo-0.1.26/sciveo/tools → sciveo-0.1.28/sciveo/media/ml/encoders}/__init__.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/encoders/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/encoders/normalizer.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/nlp/search.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/time_series/dataset.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/time_series/predictor.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/time_series/trainer.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/ml/time_series/window_generator.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/job_daemon.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/layouts/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/pipeline.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/postprocessors/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/postprocessors/default.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/aws.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/file/archive.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/album.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/album_in_image.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/embeddings.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/filters.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/generators.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/histogram.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/mask.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/resize.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/segmentation.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/image/watermark.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/media_info.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/nlp/address.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/qr.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/sci/base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/sci/dataset.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/sci/time_series/predictor.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/sci/time_series/trainer.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/tpu_base.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/generators.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/motion_detection.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/resize.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/video_album.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/video_frames.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/video/video_resample.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/queues.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/server.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/web/server.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/monitoring/monitor.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/monitoring/start.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/network/camera.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/network/sniffer.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/network/tools.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/array.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/aws/priority_queue.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/aws/s3.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/common.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/compress.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/configuration.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/crypto.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/daemon.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/formating.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/hardware.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/http.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/logger.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/os.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/random.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/remote.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/simple_counter.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/synchronized.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo/tools/timers.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo.egg-info/dependency_links.txt +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo.egg-info/entry_points.txt +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/sciveo.egg-info/top_level.txt +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/setup.cfg +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_compress.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_configuration.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_crypto.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_monitoring.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_runner.py +0 -0
- {sciveo-0.1.26 → sciveo-0.1.28}/test/test_sampling.py +0 -0
|
@@ -20,8 +20,6 @@ try:
|
|
|
20
20
|
from sciveo.tools.daemon import TasksDaemon, __upload_content__
|
|
21
21
|
from sciveo.content.runner import ProjectRunner
|
|
22
22
|
from sciveo.content.dataset import Dataset
|
|
23
|
-
from sciveo.monitoring.start import MonitorStart
|
|
24
|
-
from sciveo.network.tools import NetworkTools
|
|
25
23
|
from sciveo.version import __version__
|
|
26
24
|
|
|
27
25
|
|
|
@@ -46,10 +44,12 @@ try:
|
|
|
46
44
|
|
|
47
45
|
# Monitoring start
|
|
48
46
|
def monitor(**kwargs):
|
|
47
|
+
from sciveo.monitoring.start import MonitorStart
|
|
49
48
|
MonitorStart(**kwargs)()
|
|
50
49
|
|
|
51
50
|
# Network tools
|
|
52
51
|
def network(**kwargs):
|
|
52
|
+
from sciveo.network.tools import NetworkTools
|
|
53
53
|
return NetworkTools(**kwargs)
|
|
54
54
|
|
|
55
55
|
except ImportError as e:
|
|
@@ -14,9 +14,6 @@ import os
|
|
|
14
14
|
import argparse
|
|
15
15
|
|
|
16
16
|
from sciveo.tools.logger import *
|
|
17
|
-
from sciveo.tools.timers import Timer
|
|
18
|
-
from sciveo.monitoring.start import MonitorStart
|
|
19
|
-
from sciveo.network.tools import NetworkTools
|
|
20
17
|
from sciveo.tools.configuration import GlobalConfiguration
|
|
21
18
|
|
|
22
19
|
|
|
@@ -24,7 +21,8 @@ def main():
|
|
|
24
21
|
config = GlobalConfiguration.get()
|
|
25
22
|
|
|
26
23
|
parser = argparse.ArgumentParser(description='sciveo CLI')
|
|
27
|
-
parser.add_argument('command', choices=['init', 'monitor', 'scan', 'media-server'], help='Command to execute')
|
|
24
|
+
parser.add_argument('command', choices=['init', 'monitor', 'scan', 'media-server', 'media-run'], help='Command to execute')
|
|
25
|
+
|
|
28
26
|
parser.add_argument('--period', type=int, default=120, help='Period in seconds')
|
|
29
27
|
parser.add_argument('--block', type=bool, default=True, help='Block flag')
|
|
30
28
|
parser.add_argument('--auth', type=str, default=config['secret_access_key'], help='Auth secret access key')
|
|
@@ -32,11 +30,20 @@ def main():
|
|
|
32
30
|
parser.add_argument('--net', type=str, default=None, help='Network like 192.168.10.0/24')
|
|
33
31
|
parser.add_argument('--port', type=int, default=22, help='Host port number, used for network ops')
|
|
34
32
|
parser.add_argument('--localhost', type=bool, default=False, help='Add localhost to list of hosts')
|
|
33
|
+
parser.add_argument('--input-path', type=str, help='Input Path')
|
|
34
|
+
parser.add_argument('--output-path', type=str, help='Output Path')
|
|
35
|
+
parser.add_argument('--width', type=str, default=None, help='width')
|
|
36
|
+
parser.add_argument('--height', type=str, default=None, help='height')
|
|
37
|
+
parser.add_argument('--rate', type=int, help='Rate number')
|
|
38
|
+
parser.add_argument('--processor', type=str, help='Processor name')
|
|
39
|
+
|
|
35
40
|
args = parser.parse_args()
|
|
36
41
|
|
|
37
42
|
if args.command == 'monitor':
|
|
43
|
+
from sciveo.monitoring.start import MonitorStart
|
|
38
44
|
MonitorStart(period=args.period, block=args.block)()
|
|
39
45
|
elif args.command == 'scan':
|
|
46
|
+
from sciveo.network.tools import NetworkTools
|
|
40
47
|
NetworkTools(timeout=args.timeout, localhost=args.localhost).scan_port(port=args.port, network=args.net)
|
|
41
48
|
elif args.command == 'init':
|
|
42
49
|
home = os.path.expanduser('~')
|
|
@@ -56,6 +63,10 @@ def main():
|
|
|
56
63
|
elif args.command == 'media-server':
|
|
57
64
|
from sciveo.media.pipelines.server import __START_SCIVEO_MEDIA_SERVER__
|
|
58
65
|
__START_SCIVEO_MEDIA_SERVER__()
|
|
66
|
+
elif args.command == 'media-run':
|
|
67
|
+
if args.processor == "audio-plot":
|
|
68
|
+
from sciveo.media.pipelines.processors.audio.audio_extractor_process import plot_audio
|
|
69
|
+
plot_audio(args.width, args.height, args.rate, args.input_path, args.output_path)
|
|
59
70
|
else:
|
|
60
71
|
warning(args.command, "not implemented")
|
|
61
72
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -50,11 +50,11 @@ class AudioExtract(BaseProcessor):
|
|
|
50
50
|
exception(e, aac_audio_local_path)
|
|
51
51
|
|
|
52
52
|
def plot_new_process(self, w, h, sampling_rate, aac_audio_local_path, image_audio_local_path):
|
|
53
|
-
cmd = "
|
|
53
|
+
cmd = "sciveo media-run --processor audio-plot "
|
|
54
54
|
cmd += f"--width {w} --height {h} "
|
|
55
|
-
cmd += f"--
|
|
56
|
-
cmd += f"--
|
|
57
|
-
cmd += f"--
|
|
55
|
+
cmd += f"--rate {sampling_rate} "
|
|
56
|
+
cmd += f"--input-path '{aac_audio_local_path}' "
|
|
57
|
+
cmd += f"--output-path '{image_audio_local_path}' "
|
|
58
58
|
cmd += " 1>audio.log 2>audio-error.log"
|
|
59
59
|
debug("plot_new_process cmd", cmd)
|
|
60
60
|
os.system(cmd)
|
{sciveo-0.1.26 → sciveo-0.1.28}/sciveo/media/pipelines/processors/audio/audio_extractor_process.py
RENAMED
|
@@ -9,15 +9,13 @@
|
|
|
9
9
|
# 2024
|
|
10
10
|
#
|
|
11
11
|
|
|
12
|
-
import
|
|
13
|
-
import argparse
|
|
12
|
+
import numpy as np
|
|
14
13
|
from scipy.io import wavfile
|
|
15
14
|
from scipy.signal import butter, lfilter
|
|
16
15
|
import matplotlib.pyplot as plt
|
|
17
16
|
|
|
18
17
|
from sciveo.tools.logger import *
|
|
19
18
|
from sciveo.tools.common import *
|
|
20
|
-
from sciveo.media.pipelines.processors.base import *
|
|
21
19
|
|
|
22
20
|
|
|
23
21
|
class AudioFFT:
|
|
@@ -136,16 +134,3 @@ def plot_audio(width, height, sampling_rate, aac_audio_local_path, image_audio_l
|
|
|
136
134
|
plt.savefig(image_audio_local_path, dpi=dpi, bbox_inches='tight', pad_inches=0)
|
|
137
135
|
except Exception as e:
|
|
138
136
|
exception(e, aac_audio_local_path)
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
parser = argparse.ArgumentParser(description='Audio Extractor')
|
|
142
|
-
|
|
143
|
-
parser.add_argument('--width', type=str, help='width')
|
|
144
|
-
parser.add_argument('--height', type=str, help='height')
|
|
145
|
-
parser.add_argument('--sampling_rate', type=str, help='aduio sampling rate')
|
|
146
|
-
parser.add_argument('--aac_audio_local_path', type=str, help='aac_audio_local_path')
|
|
147
|
-
parser.add_argument('--image_audio_local_path', type=str, help='image_audio_local_path')
|
|
148
|
-
|
|
149
|
-
args = parser.parse_args()
|
|
150
|
-
|
|
151
|
-
plot_audio(args.width, args.height, args.sampling_rate, args.aac_audio_local_path, args.image_audio_local_path)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from sciveo.ml.images.object_detection import *
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
Object Detection Evaluation
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class EvaluateObjectDetection:
|
|
26
|
+
def __init__(self, X, Y_true, Y_predicted):
|
|
27
|
+
self.X = X
|
|
28
|
+
self.Y_true = Y_true
|
|
29
|
+
self.Y_predicted = Y_predicted
|
|
30
|
+
|
|
31
|
+
def evaluate(self):
|
|
32
|
+
pass
|
|
33
|
+
|
|
File without changes
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from sciveo.ml.images.tools import *
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
Object Detection Bounding Boxes (bbox) of type [x, y, w, h]
|
|
20
|
+
|
|
21
|
+
If need to use [x1, y1, x2, y2] need to use the inverted convertor bbox_convert_inverted().
|
|
22
|
+
|
|
23
|
+
IoU between 2 object detections: iou(bbox1, bbox2)
|
|
24
|
+
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
# convert from [x, y, w, h] -> [x1, y1, x2, y2]
|
|
28
|
+
def bbox_convert(bbox):
|
|
29
|
+
x1 = bbox[0]
|
|
30
|
+
y1 = bbox[1]
|
|
31
|
+
x2 = x1 + bbox[2]
|
|
32
|
+
y2 = y1 + bbox[3]
|
|
33
|
+
return [x1, y1, x2, y2]
|
|
34
|
+
|
|
35
|
+
# convert from [x1, y1, x2, y2] -> [x, y, w, h]
|
|
36
|
+
def bbox_convert_inverted(bbox):
|
|
37
|
+
x = bbox[0]
|
|
38
|
+
y = bbox[1]
|
|
39
|
+
w = bbox[2] - x
|
|
40
|
+
h = bbox[3] - y
|
|
41
|
+
return [x, y, w, h]
|
|
42
|
+
|
|
43
|
+
def bbox_norm(bbox, w, h):
|
|
44
|
+
return (bbox[0] / w, bbox[1] / h, bbox[2] / w, bbox[3] / h)
|
|
45
|
+
|
|
46
|
+
def bbox_denorm(bbox, w, h):
|
|
47
|
+
return (int(bbox[0] * w), int(bbox[1] * h), int(bbox[2] * w), int(bbox[3] * h))
|
|
48
|
+
|
|
49
|
+
def bbox_center(bbox):
|
|
50
|
+
return (int(bbox[0] + bbox[2] / 2), int(bbox[1] + bbox[3] / 2))
|
|
51
|
+
|
|
52
|
+
def bbox_area(bbox):
|
|
53
|
+
return bbox[2] * bbox[3]
|
|
54
|
+
|
|
55
|
+
def iou(bbox1, bbox2):
|
|
56
|
+
x1 = max(bbox1[0], bbox2[0])
|
|
57
|
+
y1 = max(bbox1[1], bbox2[1])
|
|
58
|
+
x2 = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
|
|
59
|
+
y2 = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
|
|
60
|
+
|
|
61
|
+
if x1 < x2 and y1 < y2:
|
|
62
|
+
a = (x2 - x1) * (y2 - y1)
|
|
63
|
+
else:
|
|
64
|
+
a = 0
|
|
65
|
+
|
|
66
|
+
a1 = bbox_area(bbox1)
|
|
67
|
+
a2 = bbox_area(bbox2)
|
|
68
|
+
return a / (a1 + a2 - a)
|
|
69
|
+
|
|
70
|
+
def bbox_distance(bbox1, bbox2):
|
|
71
|
+
return points_distance(bbox_center(bbox1), bbox_center(bbox2))
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
Simple Draw object detectios helpers
|
|
77
|
+
|
|
78
|
+
"""
|
|
79
|
+
def image_shape(image):
|
|
80
|
+
return image.shape[1], image.shape[0]
|
|
81
|
+
|
|
82
|
+
# Draw label bounding boxes of type [x, y, w, h], if [x1, y1, x2, y2] then set convert=False
|
|
83
|
+
def draw_label_bboxes(image, bboxes, color, convert=True):
|
|
84
|
+
w, h = image_shape(image)
|
|
85
|
+
for bbox in bboxes:
|
|
86
|
+
if convert:
|
|
87
|
+
bbox = bbox_convert(bbox)
|
|
88
|
+
bbox = bbox_denorm(bbox, w, h)
|
|
89
|
+
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
|
|
90
|
+
return image
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def points_distance(p1, p2):
|
|
17
|
+
p1 = np.array(p1)
|
|
18
|
+
p2 = np.array(p2)
|
|
19
|
+
return np.linalg.norm(p1 - p2)
|
|
20
|
+
|
|
21
|
+
def validate_point(p, w, h):
|
|
22
|
+
x = max(p[0], 0)
|
|
23
|
+
x = min(x, w)
|
|
24
|
+
y = max(p[1], 0)
|
|
25
|
+
y = min(y, h)
|
|
26
|
+
return (x, y)
|
|
27
|
+
|
|
28
|
+
# Calc diameter point from a circle point and circle center
|
|
29
|
+
def diameter_point(p1, p2):
|
|
30
|
+
x = 2 * p2[0] - p1[0]
|
|
31
|
+
y = 2 * p2[1] - p1[1]
|
|
32
|
+
return (x, y)
|
|
33
|
+
|
|
34
|
+
# Center point for a line section (with some ratio for general purpose)
|
|
35
|
+
def line_section_center_point(p1, p2, ratio=0.5):
|
|
36
|
+
x = int(p1[0] + ratio * (p2[0] - p1[0]))
|
|
37
|
+
y = int(p1[1] + ratio * (p2[1] - p1[1]))
|
|
38
|
+
return (x, y)
|
|
39
|
+
|
|
40
|
+
# Line equation ax + by + c = 0 coefficients
|
|
41
|
+
def line_coeff(l):
|
|
42
|
+
a = l[0][1] - l[1][1]
|
|
43
|
+
b = l[1][0] - l[0][0]
|
|
44
|
+
c = l[0][0] * l[1][1] - l[1][0] * l[0][1]
|
|
45
|
+
return a, b, c
|
|
46
|
+
|
|
47
|
+
# 2 Lines intersection point (None, None) when parallel
|
|
48
|
+
def lines_intersection(l1, l2):
|
|
49
|
+
a1, b1, c1 = line_coeff(l1)
|
|
50
|
+
a2, b2, c2 = line_coeff(l2)
|
|
51
|
+
|
|
52
|
+
d = (a1 * b2 - a2 * b1)
|
|
53
|
+
if d != 0.0:
|
|
54
|
+
x = (b1 * c2 - b2 * c1) / d
|
|
55
|
+
y = (c1 * a2 - c2 * a1) / d
|
|
56
|
+
else:
|
|
57
|
+
x, y = None, None
|
|
58
|
+
return x, y
|
|
59
|
+
|
|
60
|
+
# Check if p is between p1 and p2 on a line
|
|
61
|
+
def line_point_between(p, p1, p2):
|
|
62
|
+
return (
|
|
63
|
+
p[0] >= min(p1[0], p2[0]) and p[0] <= max(p1[0], p2[0])
|
|
64
|
+
and p[1] >= min(p1[1], p2[1]) and p[1] <= max(p1[1], p2[1])
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Check a point p is intersection for 2 line segments l1 and l2
|
|
68
|
+
def is_point_segments_intersection(p, l1, l2):
|
|
69
|
+
return line_point_between(p, l1[0], l1[1]) and line_point_between(p, l2[0], l2[1])
|
|
70
|
+
|
|
71
|
+
# Angle from radians to degrees convertor
|
|
72
|
+
def angle_rad2deg(angle):
|
|
73
|
+
return math.fabs(angle * 180 / math.pi)
|
|
74
|
+
|
|
75
|
+
# Angle between lines in radians
|
|
76
|
+
def lines_angle(l1, l2):
|
|
77
|
+
v1 = (l1[0][0] - l1[1][0], l1[0][1] - l1[1][1])
|
|
78
|
+
v2 = (l2[0][0] - l2[1][0], l2[0][1] - l2[1][1])
|
|
79
|
+
return math.atan2(v1[0], v1[1]) - math.atan2(v2[0], v2[1])
|
|
80
|
+
|
|
81
|
+
# Angle between lines in degrees
|
|
82
|
+
def lines_angle_deg(l1, l2):
|
|
83
|
+
return angle_rad2deg(lines_angle(l1, l2))
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
import cv2
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseImageTransformer:
|
|
17
|
+
def __init__(self, param=0):
|
|
18
|
+
self.param = param
|
|
19
|
+
|
|
20
|
+
def result(self, image, labels):
|
|
21
|
+
if labels is None:
|
|
22
|
+
return image
|
|
23
|
+
else:
|
|
24
|
+
return image, labels
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class RandomTransformer(BaseImageTransformer):
|
|
28
|
+
def __init__(self, class_name, lower, upper, proba):
|
|
29
|
+
self.class_name = class_name
|
|
30
|
+
self.lower = lower
|
|
31
|
+
self.upper = upper
|
|
32
|
+
self.proba = proba
|
|
33
|
+
self.transformer = class_name(1.0)
|
|
34
|
+
|
|
35
|
+
def __call__(self, image, labels=None):
|
|
36
|
+
p = np.random.uniform(0, 1)
|
|
37
|
+
if p >= (1.0 - self.proba):
|
|
38
|
+
self.transformer.param = np.random.uniform(self.lower, self.upper)
|
|
39
|
+
return self.transformer(image, labels)
|
|
40
|
+
else:
|
|
41
|
+
return self.result(image, labels)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ConvertColor(BaseImageTransformer):
|
|
45
|
+
def __init__(self, current='RGB', to='HSV', keep_3ch=True):
|
|
46
|
+
if not ((current in {'RGB', 'HSV'}) and (to in {'RGB', 'HSV', 'GRAY'})): raise NotImplementedError
|
|
47
|
+
self.current = current
|
|
48
|
+
self.to = to
|
|
49
|
+
self.keep_3ch = keep_3ch
|
|
50
|
+
|
|
51
|
+
def __call__(self, image, labels=None):
|
|
52
|
+
if self.current == 'RGB' and self.to == 'HSV':
|
|
53
|
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
|
|
54
|
+
elif self.current == 'RGB' and self.to == 'GRAY':
|
|
55
|
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
|
56
|
+
if self.keep_3ch:
|
|
57
|
+
image = np.stack([image] * 3, axis=-1)
|
|
58
|
+
elif self.current == 'HSV' and self.to == 'RGB':
|
|
59
|
+
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
|
|
60
|
+
elif self.current == 'HSV' and self.to == 'GRAY':
|
|
61
|
+
image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY)
|
|
62
|
+
if self.keep_3ch:
|
|
63
|
+
image = np.stack([image] * 3, axis=-1)
|
|
64
|
+
return self.result(image, labels)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ConvertDataType(BaseImageTransformer):
|
|
68
|
+
def __init__(self, to='uint8'):
|
|
69
|
+
if not (to == 'uint8' or to == 'float32'): raise ValueError("uint8 or float32 only")
|
|
70
|
+
self.to = to
|
|
71
|
+
|
|
72
|
+
def __call__(self, image, labels=None):
|
|
73
|
+
if self.to == 'uint8':
|
|
74
|
+
image = np.round(image, decimals=0).astype(np.uint8)
|
|
75
|
+
else:
|
|
76
|
+
image = image.astype(np.float32)
|
|
77
|
+
return self.result(image, labels)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ConvertTo3Channels(BaseImageTransformer):
|
|
81
|
+
def __init__(self):
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
def __call__(self, image, labels=None):
|
|
85
|
+
if image.ndim == 2:
|
|
86
|
+
image = np.stack([image] * 3, axis=-1)
|
|
87
|
+
elif image.ndim == 3:
|
|
88
|
+
if image.shape[2] == 1:
|
|
89
|
+
image = np.concatenate([image] * 3, axis=-1)
|
|
90
|
+
elif image.shape[2] == 4:
|
|
91
|
+
image = image[:,:,:3]
|
|
92
|
+
return self.result(image, labels)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class Hue(BaseImageTransformer):
|
|
96
|
+
def __init__(self, delta):
|
|
97
|
+
if not (-180 <= delta <= 180): raise ValueError("delta shoulbe in [-180, 180]")
|
|
98
|
+
super().__init__(delta)
|
|
99
|
+
|
|
100
|
+
def __call__(self, image, labels=None):
|
|
101
|
+
image[:, :, 0] = (image[:, :, 0] + self.param) % 180.0
|
|
102
|
+
return self.result(image, labels)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class Saturation(BaseImageTransformer):
|
|
106
|
+
def __init__(self, factor):
|
|
107
|
+
if factor <= 0.0: raise ValueError("It must be `factor > 0`.")
|
|
108
|
+
super().__init__(factor)
|
|
109
|
+
|
|
110
|
+
def __call__(self, image, labels=None):
|
|
111
|
+
image[:,:,1] = np.clip(image[:,:,1] * self.param, 0, 255)
|
|
112
|
+
return self.result(image, labels)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class Brightness(BaseImageTransformer):
|
|
116
|
+
def __init__(self, delta):
|
|
117
|
+
super().__init__(delta)
|
|
118
|
+
|
|
119
|
+
def __call__(self, image, labels=None):
|
|
120
|
+
image = np.clip(image + self.param, 0, 255)
|
|
121
|
+
return self.result(image, labels)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class Contrast(BaseImageTransformer):
|
|
125
|
+
def __init__(self, factor):
|
|
126
|
+
if factor <= 0.0: raise ValueError("factor <= 0.0")
|
|
127
|
+
super().__init__(factor)
|
|
128
|
+
|
|
129
|
+
def __call__(self, image, labels=None):
|
|
130
|
+
image = np.clip(127.5 + self.param * (image - 127.5), 0, 255)
|
|
131
|
+
return self.result(image, labels)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class Gamma(BaseImageTransformer):
|
|
135
|
+
def __init__(self, gamma):
|
|
136
|
+
if gamma <= 0.0: raise ValueError("gamma <= 0.0")
|
|
137
|
+
self.gamma = gamma
|
|
138
|
+
self.gamma_inv = 1.0 / gamma
|
|
139
|
+
self.lut = np.array([((i / 255.0) ** self.gamma_inv) * 255 for i in np.arange(0, 256)]).astype("uint8")
|
|
140
|
+
|
|
141
|
+
def __call__(self, image, labels=None):
|
|
142
|
+
image = cv2.LUT(image, self.lut)
|
|
143
|
+
return self.result(image, labels)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class RandomGamma(BaseImageTransformer):
|
|
147
|
+
def __init__(self, lower=0.25, upper=2.0, prob=0.5):
|
|
148
|
+
if lower >= upper: raise ValueError("lower >= upper")
|
|
149
|
+
self.lower = lower
|
|
150
|
+
self.upper = upper
|
|
151
|
+
self.prob = prob
|
|
152
|
+
|
|
153
|
+
def __call__(self, image, labels=None):
|
|
154
|
+
p = np.random.uniform(0,1)
|
|
155
|
+
if p >= (1.0-self.prob):
|
|
156
|
+
gamma = np.random.uniform(self.lower, self.upper)
|
|
157
|
+
change_gamma = Gamma(gamma=gamma)
|
|
158
|
+
return change_gamma(image, labels)
|
|
159
|
+
else:
|
|
160
|
+
return self.result(image, labels)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class HistogramEqualization(BaseImageTransformer):
|
|
164
|
+
def __init__(self):
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
def __call__(self, image, labels=None):
|
|
168
|
+
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
|
|
169
|
+
return self.result(image, labels)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class RandomHistogramEqualization(BaseImageTransformer):
|
|
173
|
+
def __init__(self, prob=0.5):
|
|
174
|
+
self.prob = prob
|
|
175
|
+
self.equalize = HistogramEqualization()
|
|
176
|
+
|
|
177
|
+
def __call__(self, image, labels=None):
|
|
178
|
+
p = np.random.uniform(0,1)
|
|
179
|
+
if p >= (1.0-self.prob):
|
|
180
|
+
return self.equalize(image, labels)
|
|
181
|
+
else:
|
|
182
|
+
return self.result(image, labels)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
class ChannelSwap(BaseImageTransformer):
|
|
186
|
+
def __init__(self, order):
|
|
187
|
+
self.order = order
|
|
188
|
+
|
|
189
|
+
def __call__(self, image, labels=None):
|
|
190
|
+
image = image[:,:,self.order]
|
|
191
|
+
return self.result(image, labels)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class RandomChannelSwap(BaseImageTransformer):
|
|
195
|
+
def __init__(self, prob=0.5):
|
|
196
|
+
self.prob = prob
|
|
197
|
+
self.permutations = [(0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
|
|
198
|
+
self.swap_channels = ChannelSwap(order=(0, 1, 2))
|
|
199
|
+
|
|
200
|
+
def __call__(self, image, labels=None):
|
|
201
|
+
p = np.random.uniform(0,1)
|
|
202
|
+
if p >= (1.0-self.prob):
|
|
203
|
+
i = np.random.randint(5)
|
|
204
|
+
self.swap_channels.order = self.permutations[i]
|
|
205
|
+
return self.swap_channels(image, labels)
|
|
206
|
+
else:
|
|
207
|
+
return self.result(image, labels)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class BlackWhite(BaseImageTransformer):
|
|
211
|
+
def __init__(self, param=0):
|
|
212
|
+
super().__init__(param)
|
|
213
|
+
|
|
214
|
+
def __call__(self, image, labels=None):
|
|
215
|
+
image = np.transpose(np.tile(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (3, 1, 1)), [1, 2, 0])
|
|
216
|
+
return self.result(image, labels)
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class GaussianNoise(BaseImageTransformer):
|
|
220
|
+
def __init__(self, variance=64, mean=0):
|
|
221
|
+
super().__init__(variance)
|
|
222
|
+
self.mean = mean
|
|
223
|
+
|
|
224
|
+
def __call__(self, image, labels=None):
|
|
225
|
+
if image.ndim < 3:
|
|
226
|
+
image = np.expand_dims(image, axis=-1)
|
|
227
|
+
image = image.astype(np.float32)
|
|
228
|
+
|
|
229
|
+
sigma = self.param ** 0.5
|
|
230
|
+
noise = np.random.normal(self.mean, sigma, (image.shape[0], image.shape[1]))
|
|
231
|
+
noise = np.transpose(np.tile(noise, (image.shape[-1], 1, 1)), [1, 2, 0])
|
|
232
|
+
|
|
233
|
+
image += noise
|
|
234
|
+
|
|
235
|
+
image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, dtype=-1)
|
|
236
|
+
image = image.astype(np.uint8)
|
|
237
|
+
|
|
238
|
+
return self.result(image, labels)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class Pixelisator(BaseImageTransformer):
|
|
242
|
+
def __init__(self, k):
|
|
243
|
+
super().__init__(k)
|
|
244
|
+
|
|
245
|
+
def __call__(self, image, labels=None):
|
|
246
|
+
w = image.shape[0]
|
|
247
|
+
h = image.shape[1]
|
|
248
|
+
k = 1.0 / self.param
|
|
249
|
+
image = cv2.resize(image, (int(w * k), int(h * k)))
|
|
250
|
+
image = cv2.resize(image, (w, h))
|
|
251
|
+
return self.result(image, labels)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class Blur(BaseImageTransformer):
|
|
255
|
+
def __init__(self, k):
|
|
256
|
+
super().__init__(k)
|
|
257
|
+
|
|
258
|
+
def __call__(self, image, labels=None):
|
|
259
|
+
image = cv2.blur(image, (int(self.param), int(self.param)))
|
|
260
|
+
return self.result(image, labels)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|