sciveo 0.1.25__tar.gz → 0.1.27__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sciveo-0.1.25 → sciveo-0.1.27}/PKG-INFO +1 -1
- {sciveo-0.1.25/sciveo/media/ml → sciveo-0.1.27/sciveo}/__init__.py +2 -2
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/cli.py +2 -3
- sciveo-0.1.27/sciveo/media/ml/nlp/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/ml/time_series/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/layouts/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/postprocessors/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/audio/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/file/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/image/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/nlp/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/sci/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/sci/time_series/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/sci/time_series/predictor.py +108 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/sci/time_series/trainer.py +185 -0
- sciveo-0.1.27/sciveo/media/pipelines/processors/video/__init__.py +0 -0
- sciveo-0.1.27/sciveo/media/pipelines/web/__init__.py +0 -0
- sciveo-0.1.27/sciveo/ml/__init__.py +0 -0
- sciveo-0.1.27/sciveo/ml/evaluation/__init__.py +0 -0
- sciveo-0.1.27/sciveo/ml/evaluation/object_detection.py +33 -0
- sciveo-0.1.27/sciveo/ml/images/__init__.py +0 -0
- sciveo-0.1.27/sciveo/ml/images/object_detection.py +90 -0
- sciveo-0.1.27/sciveo/ml/images/tools.py +83 -0
- sciveo-0.1.27/sciveo/ml/images/transforms.py +260 -0
- sciveo-0.1.27/sciveo/ml/nlp/__init__.py +0 -0
- sciveo-0.1.27/sciveo/monitoring/__init__.py +0 -0
- sciveo-0.1.27/sciveo/network/__init__.py +0 -0
- sciveo-0.1.27/sciveo/tools/__init__.py +0 -0
- sciveo-0.1.27/sciveo/tools/aws/__init__.py +0 -0
- sciveo-0.1.27/sciveo/version.py +2 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/PKG-INFO +1 -1
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/SOURCES.txt +11 -0
- sciveo-0.1.25/sciveo/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/ml/encoders/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/ml/nlp/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/ml/time_series/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/layouts/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/postprocessors/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/audio/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/file/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/image/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/nlp/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/sci/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/processors/video/__init__.py +0 -56
- sciveo-0.1.25/sciveo/media/pipelines/web/__init__.py +0 -56
- sciveo-0.1.25/sciveo/tools/aws/__init__.py +0 -56
- sciveo-0.1.25/sciveo/version.py +0 -2
- {sciveo-0.1.25 → sciveo-0.1.27}/README.md +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/api/__init__.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/api/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/api/upload.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/common/__init__.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/common/configuration.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/common/model.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/common/optimizers.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/common/sampling.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/content/__init__.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/content/dataset.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/content/experiment.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/content/project.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/content/runner.py +0 -0
- {sciveo-0.1.25/sciveo/monitoring → sciveo-0.1.27/sciveo/media}/__init__.py +0 -0
- {sciveo-0.1.25/sciveo/network → sciveo-0.1.27/sciveo/media/ml}/__init__.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/base.py +0 -0
- {sciveo-0.1.25/sciveo/tools → sciveo-0.1.27/sciveo/media/ml/encoders}/__init__.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/encoders/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/encoders/normalizer.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/nlp/search.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/time_series/dataset.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/time_series/predictor.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/time_series/trainer.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/ml/time_series/window_generator.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/job_daemon.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/layouts/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/pipeline.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/postprocessors/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/postprocessors/default.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/audio/audio.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/audio/audio_extractor_process.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/aws.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/file/archive.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/album.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/album_in_image.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/depth_esimation.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/embeddings.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/filters.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/generators.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/histogram.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/mask.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/resize.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/segmentation.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/image/watermark.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/media_info.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/nlp/address.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/qr.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/sci/base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/sci/dataset.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/tpu_base.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/generators.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/motion_detection.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/resize.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/video_album.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/video_frames.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/processors/video/video_resample.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/queues.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/server.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/media/pipelines/web/server.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/monitoring/monitor.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/monitoring/start.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/network/camera.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/network/sniffer.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/network/tools.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/array.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/aws/priority_queue.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/aws/s3.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/common.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/compress.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/configuration.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/crypto.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/daemon.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/formating.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/hardware.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/http.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/logger.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/os.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/random.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/remote.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/simple_counter.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/synchronized.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo/tools/timers.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/dependency_links.txt +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/entry_points.txt +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/requires.txt +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/sciveo.egg-info/top_level.txt +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/setup.cfg +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/setup.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_compress.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_configuration.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_crypto.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_monitoring.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_runner.py +0 -0
- {sciveo-0.1.25 → sciveo-0.1.27}/test/test_sampling.py +0 -0
|
@@ -20,8 +20,6 @@ try:
|
|
|
20
20
|
from sciveo.tools.daemon import TasksDaemon, __upload_content__
|
|
21
21
|
from sciveo.content.runner import ProjectRunner
|
|
22
22
|
from sciveo.content.dataset import Dataset
|
|
23
|
-
from sciveo.monitoring.start import MonitorStart
|
|
24
|
-
from sciveo.network.tools import NetworkTools
|
|
25
23
|
from sciveo.version import __version__
|
|
26
24
|
|
|
27
25
|
|
|
@@ -46,10 +44,12 @@ try:
|
|
|
46
44
|
|
|
47
45
|
# Monitoring start
|
|
48
46
|
def monitor(**kwargs):
|
|
47
|
+
from sciveo.monitoring.start import MonitorStart
|
|
49
48
|
MonitorStart(**kwargs)()
|
|
50
49
|
|
|
51
50
|
# Network tools
|
|
52
51
|
def network(**kwargs):
|
|
52
|
+
from sciveo.network.tools import NetworkTools
|
|
53
53
|
return NetworkTools(**kwargs)
|
|
54
54
|
|
|
55
55
|
except ImportError as e:
|
|
@@ -14,9 +14,6 @@ import os
|
|
|
14
14
|
import argparse
|
|
15
15
|
|
|
16
16
|
from sciveo.tools.logger import *
|
|
17
|
-
from sciveo.tools.timers import Timer
|
|
18
|
-
from sciveo.monitoring.start import MonitorStart
|
|
19
|
-
from sciveo.network.tools import NetworkTools
|
|
20
17
|
from sciveo.tools.configuration import GlobalConfiguration
|
|
21
18
|
|
|
22
19
|
|
|
@@ -35,8 +32,10 @@ def main():
|
|
|
35
32
|
args = parser.parse_args()
|
|
36
33
|
|
|
37
34
|
if args.command == 'monitor':
|
|
35
|
+
from sciveo.monitoring.start import MonitorStart
|
|
38
36
|
MonitorStart(period=args.period, block=args.block)()
|
|
39
37
|
elif args.command == 'scan':
|
|
38
|
+
from sciveo.network.tools import NetworkTools
|
|
40
39
|
NetworkTools(timeout=args.timeout, localhost=args.localhost).scan_port(port=args.port, network=args.net)
|
|
41
40
|
elif args.command == 'init':
|
|
42
41
|
home = os.path.expanduser('~')
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import boto3
|
|
14
|
+
|
|
15
|
+
from sciveo.tools.logger import *
|
|
16
|
+
from sciveo.media.pipelines.processors.sci.base import *
|
|
17
|
+
from sciveo.media.ml.time_series.predictor import *
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TimeSeriesPredictorProcessor(SciBaseProcessor):
|
|
21
|
+
def __init__(self, processor_config, max_progress) -> None:
|
|
22
|
+
super().__init__(processor_config, max_progress)
|
|
23
|
+
|
|
24
|
+
self.default.update({
|
|
25
|
+
"time_column": "Date Time",
|
|
26
|
+
"time_format": '%d.%m.%Y %H:%M:%S'
|
|
27
|
+
})
|
|
28
|
+
|
|
29
|
+
def run(self, job, input):
|
|
30
|
+
progress_per_media = self.max_progress / max(1, len(input))
|
|
31
|
+
debug("run", job["id"], progress_per_media, "input", input)
|
|
32
|
+
next_media = []
|
|
33
|
+
try:
|
|
34
|
+
if job['content_type'] == "project":
|
|
35
|
+
project_path = os.path.join(self.base_tmp_path, "projects", job['content_id'])
|
|
36
|
+
mkdirs(project_path)
|
|
37
|
+
|
|
38
|
+
self.download_content(job, project_path)
|
|
39
|
+
MediaJobState.queue().inc_progress(job["id"], 10)
|
|
40
|
+
|
|
41
|
+
list_content = {}
|
|
42
|
+
|
|
43
|
+
dataset_df = []
|
|
44
|
+
for content in job['content']:
|
|
45
|
+
list_content.setdefault(content["content_type"], []).append(content)
|
|
46
|
+
|
|
47
|
+
if content["content_type"] == "file":
|
|
48
|
+
if content['key'].split(".")[-1] == "timeseries":
|
|
49
|
+
list_content.setdefault("timeseries", []).append(content)
|
|
50
|
+
|
|
51
|
+
if content["content_type"] == "dataset":
|
|
52
|
+
# Datasets with S3 located files
|
|
53
|
+
if "bucket" in content and "key" in content:
|
|
54
|
+
remote_path, local_path, local_file_name = self.content_path(content, project_path)
|
|
55
|
+
file_extension = local_file_name.split(".")[-1]
|
|
56
|
+
|
|
57
|
+
if file_extension == "csv":
|
|
58
|
+
df = pd.read_csv(local_path)
|
|
59
|
+
x_col = content['data'].get("dataset", {}).get("x_col", self["time_column"])
|
|
60
|
+
df.set_index(x_col, inplace=True)
|
|
61
|
+
dataset_df.append(df)
|
|
62
|
+
|
|
63
|
+
dataset_df = pd.concat(dataset_df).drop_duplicates(keep='first').sort_index().reset_index()
|
|
64
|
+
|
|
65
|
+
content_project = list_content["project"][0]
|
|
66
|
+
|
|
67
|
+
MediaJobState.queue().inc_progress(job["id"], 5)
|
|
68
|
+
|
|
69
|
+
if "timeseries" in list_content:
|
|
70
|
+
content_predictor = self.new_content(content_project, "prediction", name=f"Prediction on {dataset_df.shape}")
|
|
71
|
+
next_media.append(content_predictor)
|
|
72
|
+
|
|
73
|
+
text_content = self.content_text(content_predictor, "timeseries predictions plots", f"""Time series predictions.\n\n{self.describe_df(dataset_df)}""")
|
|
74
|
+
next_media.append(text_content)
|
|
75
|
+
|
|
76
|
+
content = list_content["timeseries"][0]
|
|
77
|
+
remote_path, local_path, local_file_name = self.content_path(content, project_path)
|
|
78
|
+
predictor = TimeSeriesPredictor(local_path)
|
|
79
|
+
ds = TimeSeriesDataSet(dataset_df, self["time_column"], format=self["time_format"])
|
|
80
|
+
predictions, X, x_plot = predictor.predict(ds.data)
|
|
81
|
+
|
|
82
|
+
MediaJobState.queue().inc_progress(job["id"], 5)
|
|
83
|
+
|
|
84
|
+
columns = predictor.model_data["columns"][:11] # TODO: Should be configurable in the Predictor, so plot only few columns
|
|
85
|
+
plot_progress_inc = 20 / len(columns)
|
|
86
|
+
for i, y_col in enumerate(columns):
|
|
87
|
+
image_content = self.content_image(text_content, y_col, project_path)
|
|
88
|
+
predictor.plot(predictions, X, x_plot, i, image_local_path=image_content["local_path"])
|
|
89
|
+
image_content["data"] = {
|
|
90
|
+
"info": {
|
|
91
|
+
"size": os.path.getsize(image_content["local_path"]),
|
|
92
|
+
"plot": y_col
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
next_media.append(image_content)
|
|
96
|
+
MediaJobState.queue().inc_progress(job["id"], plot_progress_inc)
|
|
97
|
+
MediaJobState.queue().inc_progress(job["id"], 10)
|
|
98
|
+
|
|
99
|
+
if self["output"]:
|
|
100
|
+
job["output"] += next_media
|
|
101
|
+
if self["append-content"]:
|
|
102
|
+
job["append-content"] += next_media
|
|
103
|
+
except Exception as e:
|
|
104
|
+
exception(e)
|
|
105
|
+
return next_media
|
|
106
|
+
|
|
107
|
+
def name(self):
|
|
108
|
+
return "sci-timeseries-predictor"
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import pandas as pd
|
|
14
|
+
import tensorflow as tf
|
|
15
|
+
from tensorflow.keras.callbacks import Callback
|
|
16
|
+
|
|
17
|
+
from sciveo.tools.logger import *
|
|
18
|
+
from sciveo.media.pipelines.processors.sci.base import *
|
|
19
|
+
from sciveo.media.ml.time_series.predictor import *
|
|
20
|
+
from sciveo.media.ml.time_series.trainer import *
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TrainerProgressCallback(Callback):
|
|
24
|
+
def __init__(self, job, epochs, max_progress):
|
|
25
|
+
self.job = job
|
|
26
|
+
self.progress_per_epoch = 2 * max_progress / epochs
|
|
27
|
+
|
|
28
|
+
def on_epoch_end(self, epoch, logs=None):
|
|
29
|
+
debug("epoch", epoch, "finished")
|
|
30
|
+
MediaJobState.queue().inc_progress(self.job["id"], self.progress_per_epoch)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class TimeSeriesTrainerProcessor(SciBaseProcessor):
|
|
34
|
+
def __init__(self, processor_config, max_progress) -> None:
|
|
35
|
+
super().__init__(processor_config, max_progress)
|
|
36
|
+
|
|
37
|
+
self.default.update({
|
|
38
|
+
"model_name": "none",
|
|
39
|
+
"time_column": "Date Time",
|
|
40
|
+
"time_format": '%d.%m.%Y %H:%M:%S',
|
|
41
|
+
"ratios": [["train", 0.70], ["val", 0.20], ["test", 0.10]],
|
|
42
|
+
"max_epochs": 20, "patience": 2,
|
|
43
|
+
"window_size": 24,
|
|
44
|
+
"test_plot_from": 200, "test_plots": 20, "test_plot_column_id": 1
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
def run(self, job, input):
|
|
48
|
+
progress_per_media = self.max_progress / max(1, len(input))
|
|
49
|
+
debug("run", job["id"], progress_per_media, "input", input)
|
|
50
|
+
next_media = []
|
|
51
|
+
try:
|
|
52
|
+
if job['content_type'] == "project":
|
|
53
|
+
project_path = os.path.join(self.base_tmp_path, "projects", job['content_id'])
|
|
54
|
+
mkdirs(project_path)
|
|
55
|
+
|
|
56
|
+
self.download_content(job, project_path)
|
|
57
|
+
MediaJobState.queue().inc_progress(job["id"], 10)
|
|
58
|
+
|
|
59
|
+
list_content = {}
|
|
60
|
+
|
|
61
|
+
dataset_df = []
|
|
62
|
+
for content in job['content']:
|
|
63
|
+
list_content.setdefault(content["content_type"], []).append(content)
|
|
64
|
+
|
|
65
|
+
if content["content_type"] == "file":
|
|
66
|
+
if content['key'].split(".")[-1] == "timeseries":
|
|
67
|
+
list_content.setdefault("timeseries", []).append(content)
|
|
68
|
+
|
|
69
|
+
if content["content_type"] == "dataset":
|
|
70
|
+
# Datasets with S3 located files
|
|
71
|
+
if "bucket" in content and "key" in content:
|
|
72
|
+
remote_path, local_path, local_file_name = self.content_path(content, project_path)
|
|
73
|
+
file_extension = local_file_name.split(".")[-1]
|
|
74
|
+
|
|
75
|
+
if file_extension == "csv":
|
|
76
|
+
df = pd.read_csv(local_path)
|
|
77
|
+
x_col = content['data'].get("dataset", {}).get("x_col", df.keys()[0])
|
|
78
|
+
df.set_index(x_col, inplace=True)
|
|
79
|
+
dataset_df.append(df)
|
|
80
|
+
|
|
81
|
+
dataset_df = pd.concat(dataset_df).drop_duplicates(keep='first').sort_index().reset_index()
|
|
82
|
+
|
|
83
|
+
content_project = list_content["project"][0]
|
|
84
|
+
MediaJobState.queue().inc_progress(job["id"], 5)
|
|
85
|
+
|
|
86
|
+
if len(dataset_df) > 0:
|
|
87
|
+
content_trainer = self.new_content(content_project, "training", name=f"Training on {dataset_df.shape}")
|
|
88
|
+
next_media.append(content_trainer)
|
|
89
|
+
|
|
90
|
+
ds = TimeSeriesDataSet(dataset_df, self["time_column"], format=self["time_format"])
|
|
91
|
+
ds.normalize()
|
|
92
|
+
ds.split(ratios=self["ratios"])
|
|
93
|
+
|
|
94
|
+
trainer = TimeSeriesTrainer(ds, self["window_size"], self["window_size"], self["window_size"])
|
|
95
|
+
trainer.create()
|
|
96
|
+
progress_callback = TrainerProgressCallback(job, self["max_epochs"], 50)
|
|
97
|
+
history = trainer.train(self["max_epochs"], self["patience"], progress_callback)
|
|
98
|
+
trainer_eval = trainer.evaluate()
|
|
99
|
+
|
|
100
|
+
model_name, model_path = trainer.save(project_path, self["model_name"])
|
|
101
|
+
|
|
102
|
+
model_data = {
|
|
103
|
+
"eval": trainer_eval,
|
|
104
|
+
"history": {
|
|
105
|
+
"loss": history.history["loss"],
|
|
106
|
+
"val_loss": history.history["val_loss"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
model_content = self.content_file(content_trainer, model_name, model_path, data=model_data)
|
|
111
|
+
next_media.append(model_content)
|
|
112
|
+
|
|
113
|
+
text = f"""
|
|
114
|
+
Time series model.
|
|
115
|
+
....
|
|
116
|
+
....
|
|
117
|
+
model name {model_name}
|
|
118
|
+
model size {os.path.getsize(model_path)}
|
|
119
|
+
|
|
120
|
+
eval
|
|
121
|
+
{self.df_to_html(pd.DataFrame(trainer_eval))}
|
|
122
|
+
|
|
123
|
+
train loss
|
|
124
|
+
{self.df_to_html(pd.DataFrame({"loss": history.history["loss"], "val_loss": history.history["val_loss"]}))}
|
|
125
|
+
"""
|
|
126
|
+
text_content = self.content_text(content_trainer, "Train timeseries model", text)
|
|
127
|
+
next_media.append(text_content)
|
|
128
|
+
|
|
129
|
+
# TODO: Create numeric from history.history["loss"], history.history["val_loss"]
|
|
130
|
+
|
|
131
|
+
ds.denormalize("test")
|
|
132
|
+
predictor = TimeSeriesPredictor(model_path)
|
|
133
|
+
|
|
134
|
+
# Plot some test predictions
|
|
135
|
+
columns = predictor.model_data["columns"]
|
|
136
|
+
plot_progress_inc = 10 / self["test_plots"]
|
|
137
|
+
y_col = columns[self["test_plot_column_id"]]
|
|
138
|
+
for i in range(self["test_plot_from"], self["test_plot_from"] + self["test_plots"]):
|
|
139
|
+
image_content = self.content_image(text_content, y_col, project_path)
|
|
140
|
+
self.plot_chunk(ds.dataset["test"], predictor, i, self["test_plot_column_id"], image_content["local_path"])
|
|
141
|
+
image_content["data"] = {
|
|
142
|
+
"info": {
|
|
143
|
+
"size": os.path.getsize(image_content["local_path"]),
|
|
144
|
+
"plot": y_col
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
next_media.append(image_content)
|
|
148
|
+
MediaJobState.queue().inc_progress(job["id"], plot_progress_inc)
|
|
149
|
+
|
|
150
|
+
MediaJobState.queue().inc_progress(job["id"], 10)
|
|
151
|
+
|
|
152
|
+
if self["output"]:
|
|
153
|
+
job["output"] += next_media
|
|
154
|
+
if self["append-content"]:
|
|
155
|
+
job["append-content"] += next_media
|
|
156
|
+
except Exception as e:
|
|
157
|
+
exception(e)
|
|
158
|
+
return next_media
|
|
159
|
+
|
|
160
|
+
def plot_chunk(self, df, predictor, k, i, image_local_path):
|
|
161
|
+
k += 1
|
|
162
|
+
L = predictor.model_data["window"]["input_width"]
|
|
163
|
+
X = df[-(k + 1)*L:-k*L]
|
|
164
|
+
|
|
165
|
+
predictions, X, x_plot = predictor.predict(X)
|
|
166
|
+
|
|
167
|
+
idx_from = -k * L
|
|
168
|
+
idx_to = -(k - 1) * L
|
|
169
|
+
if idx_to != 0:
|
|
170
|
+
labels = df[idx_from:idx_to]
|
|
171
|
+
else:
|
|
172
|
+
labels = df[idx_from:]
|
|
173
|
+
labels = labels.values.reshape(X.shape).astype('float32')
|
|
174
|
+
labels = tf.convert_to_tensor(labels)
|
|
175
|
+
|
|
176
|
+
predictor.plot(
|
|
177
|
+
predictions, X, x_plot, i, labels,
|
|
178
|
+
image_local_path
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
def content_type(self):
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
def name(self):
|
|
185
|
+
return "sci-timeseries-trainer"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from sciveo.ml.images.object_detection import *
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
Object Detection Evaluation
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class EvaluateObjectDetection:
|
|
26
|
+
def __init__(self, X, Y_true, Y_predicted):
|
|
27
|
+
self.X = X
|
|
28
|
+
self.Y_true = Y_true
|
|
29
|
+
self.Y_predicted = Y_predicted
|
|
30
|
+
|
|
31
|
+
def evaluate(self):
|
|
32
|
+
pass
|
|
33
|
+
|
|
File without changes
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from sciveo.ml.images.tools import *
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
Object Detection Bounding Boxes (bbox) of type [x, y, w, h]
|
|
20
|
+
|
|
21
|
+
If need to use [x1, y1, x2, y2] need to use the inverted convertor bbox_convert_inverted().
|
|
22
|
+
|
|
23
|
+
IoU between 2 object detections: iou(bbox1, bbox2)
|
|
24
|
+
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
# convert from [x, y, w, h] -> [x1, y1, x2, y2]
|
|
28
|
+
def bbox_convert(bbox):
|
|
29
|
+
x1 = bbox[0]
|
|
30
|
+
y1 = bbox[1]
|
|
31
|
+
x2 = x1 + bbox[2]
|
|
32
|
+
y2 = y1 + bbox[3]
|
|
33
|
+
return [x1, y1, x2, y2]
|
|
34
|
+
|
|
35
|
+
# convert from [x1, y1, x2, y2] -> [x, y, w, h]
|
|
36
|
+
def bbox_convert_inverted(bbox):
|
|
37
|
+
x = bbox[0]
|
|
38
|
+
y = bbox[1]
|
|
39
|
+
w = bbox[2] - x
|
|
40
|
+
h = bbox[3] - y
|
|
41
|
+
return [x, y, w, h]
|
|
42
|
+
|
|
43
|
+
def bbox_norm(bbox, w, h):
|
|
44
|
+
return (bbox[0] / w, bbox[1] / h, bbox[2] / w, bbox[3] / h)
|
|
45
|
+
|
|
46
|
+
def bbox_denorm(bbox, w, h):
|
|
47
|
+
return (int(bbox[0] * w), int(bbox[1] * h), int(bbox[2] * w), int(bbox[3] * h))
|
|
48
|
+
|
|
49
|
+
def bbox_center(bbox):
|
|
50
|
+
return (int(bbox[0] + bbox[2] / 2), int(bbox[1] + bbox[3] / 2))
|
|
51
|
+
|
|
52
|
+
def bbox_area(bbox):
|
|
53
|
+
return bbox[2] * bbox[3]
|
|
54
|
+
|
|
55
|
+
def iou(bbox1, bbox2):
|
|
56
|
+
x1 = max(bbox1[0], bbox2[0])
|
|
57
|
+
y1 = max(bbox1[1], bbox2[1])
|
|
58
|
+
x2 = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
|
|
59
|
+
y2 = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
|
|
60
|
+
|
|
61
|
+
if x1 < x2 and y1 < y2:
|
|
62
|
+
a = (x2 - x1) * (y2 - y1)
|
|
63
|
+
else:
|
|
64
|
+
a = 0
|
|
65
|
+
|
|
66
|
+
a1 = bbox_area(bbox1)
|
|
67
|
+
a2 = bbox_area(bbox2)
|
|
68
|
+
return a / (a1 + a2 - a)
|
|
69
|
+
|
|
70
|
+
def bbox_distance(bbox1, bbox2):
|
|
71
|
+
return points_distance(bbox_center(bbox1), bbox_center(bbox2))
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
Simple Draw object detectios helpers
|
|
77
|
+
|
|
78
|
+
"""
|
|
79
|
+
def image_shape(image):
|
|
80
|
+
return image.shape[1], image.shape[0]
|
|
81
|
+
|
|
82
|
+
# Draw label bounding boxes of type [x, y, w, h], if [x1, y1, x2, y2] then set convert=False
|
|
83
|
+
def draw_label_bboxes(image, bboxes, color, convert=True):
|
|
84
|
+
w, h = image_shape(image)
|
|
85
|
+
for bbox in bboxes:
|
|
86
|
+
if convert:
|
|
87
|
+
bbox = bbox_convert(bbox)
|
|
88
|
+
bbox = bbox_denorm(bbox, w, h)
|
|
89
|
+
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
|
|
90
|
+
return image
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Pavlin Georgiev, Softel Labs
|
|
3
|
+
#
|
|
4
|
+
# This is a proprietary file and may not be copied,
|
|
5
|
+
# distributed, or modified without express permission
|
|
6
|
+
# from the owner. For licensing inquiries, please
|
|
7
|
+
# contact pavlin@softel.bg.
|
|
8
|
+
#
|
|
9
|
+
# 2024
|
|
10
|
+
#
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def points_distance(p1, p2):
|
|
17
|
+
p1 = np.array(p1)
|
|
18
|
+
p2 = np.array(p2)
|
|
19
|
+
return np.linalg.norm(p1 - p2)
|
|
20
|
+
|
|
21
|
+
def validate_point(p, w, h):
|
|
22
|
+
x = max(p[0], 0)
|
|
23
|
+
x = min(x, w)
|
|
24
|
+
y = max(p[1], 0)
|
|
25
|
+
y = min(y, h)
|
|
26
|
+
return (x, y)
|
|
27
|
+
|
|
28
|
+
# Calc diameter point from a circle point and circle center
|
|
29
|
+
def diameter_point(p1, p2):
|
|
30
|
+
x = 2 * p2[0] - p1[0]
|
|
31
|
+
y = 2 * p2[1] - p1[1]
|
|
32
|
+
return (x, y)
|
|
33
|
+
|
|
34
|
+
# Center point for a line section (with some ratio for general purpose)
|
|
35
|
+
def line_section_center_point(p1, p2, ratio=0.5):
|
|
36
|
+
x = int(p1[0] + ratio * (p2[0] - p1[0]))
|
|
37
|
+
y = int(p1[1] + ratio * (p2[1] - p1[1]))
|
|
38
|
+
return (x, y)
|
|
39
|
+
|
|
40
|
+
# Line equation ax + by + c = 0 coefficients
|
|
41
|
+
def line_coeff(l):
|
|
42
|
+
a = l[0][1] - l[1][1]
|
|
43
|
+
b = l[1][0] - l[0][0]
|
|
44
|
+
c = l[0][0] * l[1][1] - l[1][0] * l[0][1]
|
|
45
|
+
return a, b, c
|
|
46
|
+
|
|
47
|
+
# 2 Lines intersection point (None, None) when parallel
|
|
48
|
+
def lines_intersection(l1, l2):
|
|
49
|
+
a1, b1, c1 = line_coeff(l1)
|
|
50
|
+
a2, b2, c2 = line_coeff(l2)
|
|
51
|
+
|
|
52
|
+
d = (a1 * b2 - a2 * b1)
|
|
53
|
+
if d != 0.0:
|
|
54
|
+
x = (b1 * c2 - b2 * c1) / d
|
|
55
|
+
y = (c1 * a2 - c2 * a1) / d
|
|
56
|
+
else:
|
|
57
|
+
x, y = None, None
|
|
58
|
+
return x, y
|
|
59
|
+
|
|
60
|
+
# Check if p is between p1 and p2 on a line
|
|
61
|
+
def line_point_between(p, p1, p2):
|
|
62
|
+
return (
|
|
63
|
+
p[0] >= min(p1[0], p2[0]) and p[0] <= max(p1[0], p2[0])
|
|
64
|
+
and p[1] >= min(p1[1], p2[1]) and p[1] <= max(p1[1], p2[1])
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Check a point p is intersection for 2 line segments l1 and l2
|
|
68
|
+
def is_point_segments_intersection(p, l1, l2):
|
|
69
|
+
return line_point_between(p, l1[0], l1[1]) and line_point_between(p, l2[0], l2[1])
|
|
70
|
+
|
|
71
|
+
# Angle from radians to degrees convertor
|
|
72
|
+
def angle_rad2deg(angle):
|
|
73
|
+
return math.fabs(angle * 180 / math.pi)
|
|
74
|
+
|
|
75
|
+
# Angle between lines in radians
|
|
76
|
+
def lines_angle(l1, l2):
|
|
77
|
+
v1 = (l1[0][0] - l1[1][0], l1[0][1] - l1[1][1])
|
|
78
|
+
v2 = (l2[0][0] - l2[1][0], l2[0][1] - l2[1][1])
|
|
79
|
+
return math.atan2(v1[0], v1[1]) - math.atan2(v2[0], v2[1])
|
|
80
|
+
|
|
81
|
+
# Angle between lines in degrees
|
|
82
|
+
def lines_angle_deg(l1, l2):
|
|
83
|
+
return angle_rad2deg(lines_angle(l1, l2))
|