tsadmetrics 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. entorno/bin/activate_this.py +32 -0
  2. entorno/bin/rst2html.py +23 -0
  3. entorno/bin/rst2html4.py +26 -0
  4. entorno/bin/rst2html5.py +33 -0
  5. entorno/bin/rst2latex.py +26 -0
  6. entorno/bin/rst2man.py +27 -0
  7. entorno/bin/rst2odt.py +28 -0
  8. entorno/bin/rst2odt_prepstyles.py +20 -0
  9. entorno/bin/rst2pseudoxml.py +23 -0
  10. entorno/bin/rst2s5.py +24 -0
  11. entorno/bin/rst2xetex.py +27 -0
  12. entorno/bin/rst2xml.py +23 -0
  13. entorno/bin/rstpep2html.py +25 -0
  14. tests/__init__.py +0 -0
  15. tests/test_binary.py +759 -0
  16. tests/test_non_binary.py +371 -0
  17. tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
  18. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +86 -0
  19. tsadmetrics/_tsadeval/affiliation/_integral_interval.py +464 -0
  20. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +68 -0
  21. tsadmetrics/_tsadeval/affiliation/generics.py +135 -0
  22. tsadmetrics/_tsadeval/affiliation/metrics.py +114 -0
  23. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +175 -0
  24. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +50 -0
  25. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +184 -0
  26. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
  27. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  28. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +386 -0
  29. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +362 -0
  30. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  31. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  32. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +165 -0
  33. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +121 -0
  34. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  35. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +61 -0
  36. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +86 -0
  37. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +21 -0
  38. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +85 -0
  39. tsadmetrics/utils.py +10 -4
  40. {tsadmetrics-0.1.4.dist-info → tsadmetrics-0.1.6.dist-info}/METADATA +1 -1
  41. tsadmetrics-0.1.6.dist-info/RECORD +58 -0
  42. tsadmetrics-0.1.6.dist-info/top_level.txt +3 -0
  43. tsadmetrics-0.1.4.dist-info/RECORD +0 -20
  44. tsadmetrics-0.1.4.dist-info/top_level.txt +0 -1
  45. {tsadmetrics-0.1.4.dist-info → tsadmetrics-0.1.6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ from .generics import (
4
+ infer_Trange,
5
+ has_point_anomalies,
6
+ _len_wo_nan,
7
+ _sum_wo_nan,
8
+ read_all_as_events)
9
+ from ._affiliation_zone import (
10
+ get_all_E_gt_func,
11
+ affiliation_partition)
12
+ from ._single_ground_truth_event import (
13
+ affiliation_precision_distance,
14
+ affiliation_recall_distance,
15
+ affiliation_precision_proba,
16
+ affiliation_recall_proba)
17
+
18
+ def test_events(events):
19
+ """
20
+ Verify the validity of the input events
21
+ :param events: list of events, each represented by a couple (start, stop)
22
+ :return: None. Raise an error for incorrect formed or non ordered events
23
+ """
24
+ if type(events) is not list:
25
+ raise TypeError('Input `events` should be a list of couples')
26
+ if not all([type(x) is tuple for x in events]):
27
+ raise TypeError('Input `events` should be a list of tuples')
28
+ if not all([len(x) == 2 for x in events]):
29
+ raise ValueError('Input `events` should be a list of couples (start, stop)')
30
+ if not all([x[0] <= x[1] for x in events]):
31
+ raise ValueError('Input `events` should be a list of couples (start, stop) with start <= stop')
32
+ if not all([events[i][1] < events[i+1][0] for i in range(len(events) - 1)]):
33
+ raise ValueError('Couples of input `events` should be disjoint and ordered')
34
+
35
+ def pr_from_events(events_pred, events_gt, Trange):
36
+ """
37
+ Compute the affiliation metrics including the precision/recall in [0,1],
38
+ along with the individual precision/recall distances and probabilities
39
+
40
+ :param events_pred: list of predicted events, each represented by a couple
41
+ indicating the start and the stop of the event
42
+ :param events_gt: list of ground truth events, each represented by a couple
43
+ indicating the start and the stop of the event
44
+ :param Trange: range of the series where events_pred and events_gt are included,
45
+ represented as a couple (start, stop)
46
+ :return: dictionary with precision, recall, and the individual metrics
47
+ """
48
+ # testing the inputs
49
+ # test_events(events_pred)
50
+ # test_events(events_gt)
51
+
52
+ # other tests
53
+ minimal_Trange = infer_Trange(events_pred, events_gt)
54
+ if not Trange[0] <= minimal_Trange[0]:
55
+ raise ValueError('`Trange` should include all the events')
56
+ if not minimal_Trange[1] <= Trange[1]:
57
+ raise ValueError('`Trange` should include all the events')
58
+
59
+ if len(events_gt) == 0:
60
+ raise ValueError('Input `events_gt` should have at least one event')
61
+
62
+ if has_point_anomalies(events_pred) or has_point_anomalies(events_gt):
63
+ raise ValueError('Cannot manage point anomalies currently')
64
+
65
+ if Trange is None:
66
+ # Set as default, but Trange should be indicated if probabilities are used
67
+ raise ValueError('Trange should be indicated (or inferred with the `infer_Trange` function')
68
+
69
+ E_gt = get_all_E_gt_func(events_gt, Trange)
70
+ aff_partition = affiliation_partition(events_pred, E_gt)
71
+
72
+ # # Computing precision distance
73
+ # d_precision = [affiliation_precision_distance(Is, J) for Is, J in zip(aff_partition, events_gt)]
74
+
75
+ # # Computing recall distance
76
+ # d_recall = [affiliation_recall_distance(Is, J) for Is, J in zip(aff_partition, events_gt)]
77
+
78
+ # Computing precision
79
+ p_precision = [affiliation_precision_proba(Is, J, E) for Is, J, E in zip(aff_partition, events_gt, E_gt)]
80
+
81
+ # Computing recall
82
+ p_recall = [affiliation_recall_proba(Is, J, E) for Is, J, E in zip(aff_partition, events_gt, E_gt)]
83
+
84
+ if _len_wo_nan(p_precision) > 0:
85
+ p_precision_average = _sum_wo_nan(p_precision) / _len_wo_nan(p_precision)
86
+ else:
87
+ p_precision_average = p_precision[0] # math.nan
88
+ p_recall_average = sum(p_recall) / len(p_recall)
89
+
90
+ dict_out = dict({'precision': p_precision_average,
91
+ 'recall': p_recall_average,
92
+ 'individual_precision_probabilities': p_precision,
93
+ 'individual_recall_probabilities': p_recall})
94
+ return(dict_out)
95
+
96
+ def produce_all_results():
97
+ """
98
+ Produce the affiliation precision/recall for all files
99
+ contained in the `data` repository
100
+ :return: a dictionary indexed by data names, each containing a dictionary
101
+ indexed by algorithm names, each containing the results of the affiliation
102
+ metrics (precision, recall, individual probabilities and distances)
103
+ """
104
+ datasets, Tranges = read_all_as_events() # read all the events in folder `data`
105
+ results = dict()
106
+ for data_name in datasets.keys():
107
+ results_data = dict()
108
+ for algo_name in datasets[data_name].keys():
109
+ if algo_name != 'groundtruth':
110
+ results_data[algo_name] = pr_from_events(datasets[data_name][algo_name],
111
+ datasets[data_name]['groundtruth'],
112
+ Tranges[data_name])
113
+ results[data_name] = results_data
114
+ return(results)
@@ -0,0 +1,175 @@
1
+ from .Range import Range
2
+ import time
3
+ import datetime
4
+ import pandas as pd
5
+
6
+ def load_stream_2_range(stream_data: list, normal_label: int, anomaly_label: int, is_range_name: bool) -> list:
7
+ return_list = []
8
+ start_id = -1
9
+ end_id = -1
10
+ id = 0
11
+ range_id = 1
12
+
13
+ prev_val = -2 #Set prev_val as a value different to normal and anomalous labels
14
+
15
+ for val in stream_data:
16
+ if val == anomaly_label and (prev_val == normal_label or prev_val == -2): #Enter the anomaly range
17
+ start_id = id
18
+ elif val == normal_label and prev_val == anomaly_label: #Go out the anomaly range
19
+ name_buf = ''
20
+ if is_range_name:
21
+ name_buf = str(range_id)
22
+ end_id = id - 1
23
+ return_list.append(Range(start_id, end_id, name_buf))
24
+ range_id += 1
25
+ #start_id = 0
26
+
27
+ id += 1
28
+ prev_val = val
29
+ if start_id > end_id: #start_id != 0 and start_id != -1: #if an anomaly continues till the last point
30
+ return_list.append(Range.Range(start_id, id - 1, str(range_id)))
31
+
32
+ return return_list
33
+
34
+
35
+ def load_stream_file(filename: str, normal_label: int, anomaly_label: int, is_range_name: bool) -> list:
36
+ return_list = []
37
+ start_id = -1
38
+ end_id = -1
39
+ id = 0
40
+ range_id = 1
41
+ #is_first = True
42
+
43
+ prev_val = -2 #Set prev_val as a value different to normal and anomalous labels
44
+
45
+ f = open(filename, 'r', encoding='utf-8', newline='')
46
+
47
+ for line in f.readlines():
48
+ val = int(line.strip().split()[0])
49
+
50
+ '''
51
+ #skip the first line
52
+ if is_first:
53
+ if val == anomaly_label:
54
+ start_id = id
55
+ prev_val = val
56
+ is_first = False
57
+ continue
58
+ '''
59
+
60
+ if val == anomaly_label and (prev_val == normal_label or prev_val == -2): #Enter the anomaly range
61
+ start_id = id
62
+ elif val == normal_label and prev_val == anomaly_label: #Go out the anomaly range
63
+ name_buf = ''
64
+ if is_range_name:
65
+ name_buf = str(range_id)
66
+ end_id = id - 1
67
+ return_list.append(Range.Range(start_id, end_id, name_buf))
68
+ range_id += 1
69
+ #start_id = 0
70
+
71
+ id += 1
72
+ prev_val = val
73
+ f.close()
74
+ if start_id > end_id: #start_id != 0 and start_id != -1: #if an anomaly continues till the last point
75
+ return_list.append(Range.Range(start_id, id - 1, str(range_id)))
76
+
77
+ return return_list
78
+
79
+ def load_range_file(filename: str, time_format: str) -> list:
80
+ return_list = []
81
+ #is_first = True
82
+
83
+ f = open(filename, 'r', encoding='utf-8', newline='')
84
+ for line in f.readlines():
85
+ # skip the first line
86
+ #if is_first:
87
+ #is_first = False
88
+ #continue
89
+
90
+ items = line.strip().split(',')
91
+ if time_format == 'index':
92
+ first_idx = int(items[0])
93
+ last_idx = int(items[1])
94
+ else:
95
+ first_idx = string_to_unixtime(items[0], time_format)
96
+ last_idx = string_to_unixtime(items[1], time_format)
97
+
98
+ name_buf = ''
99
+ if len(items) > 2:
100
+ name_buf = str(items[2])
101
+
102
+ return_list.append(Range.Range(first_idx, last_idx, name_buf))
103
+ f.close()
104
+
105
+ for idx in range(1, len(return_list)):
106
+ if return_list[idx].get_time()[0] <= return_list[idx-1].get_time()[1]:
107
+ print("Error: ranges ({},{}) and ({},{}) are overlapped in {}".format(return_list[idx-1].get_time()[0],
108
+ return_list[idx-1].get_time()[1],
109
+ return_list[idx].get_time()[0],
110
+ return_list[idx].get_time()[1], filename))
111
+ exit(0)
112
+
113
+ return return_list
114
+
115
+
116
+ def unixtime_to_string(epoch: int, format: str) -> str:
117
+ return datetime.datetime.fromtimestamp(epoch).strftime(format) #'%Y-%m-%d %I:%M:%S %p'
118
+
119
+
120
+ def string_to_unixtime(timestamp: str, format: str) -> int:
121
+ return int(time.mktime(datetime.datetime.strptime(timestamp, format).timetuple()))
122
+
123
+
124
+ def save_range_list(filename: str, range_list: list) -> None:
125
+ f = open(filename, encoding='utf-8', mode='w')
126
+ for single_range in range_list:
127
+ first, last = single_range.get_time()
128
+ f.writelines(str(first)+','+str(last)+','+single_range.get_name()+'\n')
129
+ f.close()
130
+
131
+ # Assume that the first line of input files including the information of file format and its corresponding information
132
+ # This function handles three types of file format
133
+ def load_file(filename: str, filetype: str) -> list:
134
+ assert(filetype == 'range' or filetype == 'stream')
135
+
136
+ if filetype == 'stream':
137
+ return load_stream_file(filename, 1, -1, True)
138
+ elif filetype == 'range':
139
+ return load_range_file(filename, 'index')
140
+
141
+
142
+ def make_attack_file(input_files: list, sep: str, label_featname: str, input_normal_label: int, input_anomalous_label: int,
143
+ output_stream_file: str, output_range_file: str, output_normal_label: int, output_anomalous_label: int) -> None:
144
+ label = []
145
+ for an_input_file in input_files:
146
+ temp_file = pd.read_csv(an_input_file, sep=sep)
147
+ label += temp_file[label_featname].values.tolist()
148
+
149
+ with open(output_stream_file, 'w') as f:
150
+ for a_label in label:
151
+ if a_label == input_normal_label:
152
+ f.write('{}\n'.format(output_normal_label))
153
+ elif a_label == input_anomalous_label:
154
+ f.write('{}\n'.format(output_anomalous_label))
155
+ else:
156
+ print("There is an unknown label, " + a_label, flush=True)
157
+ f.close()
158
+ return
159
+
160
+ ranges = load_stream_2_range(label, 0, 1, False)
161
+ save_range_list(output_range_file, ranges)
162
+
163
+ def save_range_2_stream(filename: str, range_list: list, last_idx: int, normal_label: int, anomalous_label: int) -> None:
164
+ f = open(filename, encoding='utf-8', mode='w')
165
+ range_id = 0
166
+ for idx in range(last_idx):
167
+ if idx < range_list[range_id].get_time()[0]:
168
+ f.writelines('{}\n'.format(normal_label))
169
+ elif range_list[range_id].get_time()[0] <= idx <= range_list[range_id].get_time()[1]:
170
+ f.writelines('{}\n'.format(anomalous_label))
171
+ else:
172
+ f.writelines('{}\n'.format(normal_label))
173
+ if range_id < len(range_list) - 1:
174
+ range_id += 1
175
+ f.close()
@@ -0,0 +1,50 @@
1
+ # To store a single anomaly
2
+ class Range:
3
+ def __init__(self, first, last, name):
4
+ self._first_timestamp = first
5
+ self._last_timestamp = last
6
+ self._name = name
7
+
8
+ def set_time(self, first, last):
9
+ self._first_timestamp = first
10
+ self._last_timestamp = last
11
+
12
+ def get_time(self):
13
+ return self._first_timestamp, self._last_timestamp
14
+
15
+ def set_name(self, str):
16
+ self._name = str
17
+
18
+ def get_name(self):
19
+ return self._name
20
+
21
+ def get_len(self):
22
+ return self._last_timestamp - self._first_timestamp + 1
23
+
24
+ def __eq__(self, other):
25
+ return self._first_timestamp == other.get_time()[0] and self._last_timestamp == other.get_time()[1]
26
+
27
+ def distance(self, other_range) -> int:
28
+ if min(self._last_timestamp, other_range.get_time()[1]) - max(self._first_timestamp, other_range.get_time()[0]) > 0:
29
+ return 0
30
+ else:
31
+ return min(abs(self._first_timestamp - other_range.get_time()[1]),
32
+ abs(self._last_timestamp - other_range.get_time()[0]))
33
+
34
+ def compare(self, other_range) -> int:
35
+ if min(self._last_timestamp, other_range.get_time()[1]) - max(self._first_timestamp, other_range.get_time()[0]) > 0:
36
+ return 0
37
+ elif self._last_timestamp - other_range.get_time()[0] < 0:
38
+ return -1
39
+ else:
40
+ return 1
41
+
42
+ def stream_2_ranges(self, prediction_stream: list) -> list:
43
+ result = []
44
+ for i in range(len(prediction_stream)-1):
45
+ start_time = 0
46
+ if prediction_stream[i] == 0 and prediction_stream[i+1] == 1:
47
+ start_time = i+1
48
+ elif prediction_stream[i] == 1 and prediction_stream[i+1] == 0:
49
+ result.append(Range(start_time, i, ''))
50
+ return result
@@ -0,0 +1,184 @@
1
+ import numpy as np
2
+ import cv2 as cv
3
+ from copy import deepcopy
4
+ import pathlib
5
+
6
+ def convert_index(org_index, max_index, graph_width, margin_left):
7
+ return round(float(org_index/max_index)*graph_width+margin_left)
8
+
9
+ def draw_csv(ranges, img, h_floor, h_ceiling, color, max_index, graph_width, margin_left):
10
+ for a_range in ranges:
11
+ start_time = convert_index(a_range.get_time()[0], max_index, graph_width, margin_left)
12
+ end_time = convert_index(a_range.get_time()[1], max_index, graph_width, margin_left)
13
+ cv.rectangle(img, (start_time, h_floor), (end_time, h_ceiling), color, thickness=-1)
14
+
15
+ def draw_csv_range(ranges, img, h_floor, h_ceiling, color, start, end):
16
+ for a_range in ranges:
17
+ if a_range.get_time()[0] <= end or a_range.get_time()[1] >= start:
18
+ cv.rectangle(img, (a_range.get_time()[0]-start+10, h_floor), (a_range.get_time()[1]-start+10, h_ceiling), color, thickness=-1)
19
+
20
+ def shift_ranges(ranges, first_idx):
21
+ for a_range in ranges:
22
+ a_range.set_time(a_range.get_time()[0] - first_idx, a_range.get_time()[1] - first_idx)
23
+
24
+ def draw_graphs(anomalies, predictions, how_show: str):
25
+ method_list = [ 'Anomalies', 'Predictions' ]
26
+ anomalies = deepcopy(anomalies)
27
+ predictions = deepcopy(predictions)
28
+ first_idx = min(anomalies[0].get_time()[0]-100, predictions[0].get_time()[0])
29
+ last_idx = max(anomalies[-1].get_time()[1], predictions[-1].get_time()[1])
30
+ marginal_idx = int(float(last_idx-first_idx)/100)
31
+ first_idx -= marginal_idx
32
+ shift_ranges(anomalies, first_idx)
33
+ shift_ranges(predictions, first_idx)
34
+ ranges_list = [ anomalies, predictions ]
35
+ max_index = max(anomalies[-1].get_time()[1], predictions[-1].get_time()[1]) + marginal_idx
36
+
37
+ color_list = [(70, 70, 70), #black
38
+ (60, 76, 203), #red
39
+ (193, 134, 46), #blue
40
+ (133, 160, 22), #green
41
+ (206, 143, 187), #purple
42
+ (94, 73, 52), # darkblue
43
+ (63, 208, 244) #yellow
44
+ ]
45
+
46
+ margin_left = 10
47
+ margin_right = 150
48
+ margin_top = 20
49
+ margin_bottom = 40
50
+
51
+ graph_gap = 20
52
+ graph_height = 40
53
+ graph_width = 2000
54
+
55
+ n_results = 2
56
+
57
+ width = margin_left + graph_width + margin_right
58
+ height = margin_top + margin_bottom + n_results * (graph_gap + graph_height)
59
+ bpp = 3
60
+
61
+ img = np.ones((height, width, bpp), np.uint8)*255
62
+
63
+ img_h = img.shape[0]
64
+ img_w = img.shape[1]
65
+ img_bpp = img.shape[2]
66
+
67
+ thickness = 1
68
+ fontsize = 1
69
+ cv.line(img, (int(margin_left/2), img_h-margin_bottom), (img_w-int(margin_left/2), img_h-margin_bottom), color_list[0], thickness) #x-axis
70
+ pts = np.array([[img_w-int(margin_left/2), img_h-margin_bottom], [img_w-int(margin_left/2)-7, img_h-margin_bottom+5], [img_w-int(margin_left/2)-7, img_h-margin_bottom-5]], np.int32) #arrow_head
71
+ pts = pts.reshape((-1, 1, 2))
72
+ cv.fillPoly(img, [pts], color_list[0])
73
+ cv.putText(img, 'Relative Index', (img_w-180, img_h-15), cv.FONT_HERSHEY_COMPLEX_SMALL, fontsize, color_list[0], 1, cv.LINE_AA) #x-axis label
74
+
75
+ for i in range(margin_left, width-margin_right, int(graph_width/10)):
76
+ cv.line(img, (i, img_h-margin_bottom+2), (i, img_h-margin_bottom-2), color_list[0], thickness)
77
+ org_index = str(round((i-10) / graph_width * max_index / 1000))
78
+ cv.putText(img, org_index+'K', (i-len(org_index)*5, img_h-margin_bottom + 25), cv.FONT_HERSHEY_COMPLEX_SMALL, fontsize, color_list[0], 1, cv.LINE_AA)
79
+
80
+ thickness = -1
81
+ for idx in range(n_results):
82
+ cv.putText(img, method_list[idx],
83
+ (width - margin_right + 2, img_h - margin_bottom - graph_gap * (idx+1) - graph_height * idx - 12),
84
+ cv.FONT_HERSHEY_COMPLEX_SMALL, fontsize, color_list[0], 1, cv.LINE_AA)
85
+ draw_csv(ranges_list[idx], img, h_floor=img_h - margin_bottom - graph_gap * (idx+1) - graph_height * idx,
86
+ h_ceiling=img_h - margin_bottom - graph_gap * (idx+1) - graph_height * (idx+1),
87
+ color=color_list[(idx+1)%len(color_list)],
88
+ max_index=max_index, graph_width=graph_width, margin_left=margin_left)
89
+
90
+ if how_show == 'screen' or how_show == 'all':
91
+ cv.imshow("drawing", img)
92
+ if how_show == 'file' or how_show == 'all':
93
+ cv.imwrite("../../brief_result.png", img)
94
+ if how_show != 'screen' and how_show != 'all' and how_show != 'file':
95
+ print('Parameter Error')
96
+ cv.waitKey(0);
97
+
98
+
99
+ def draw_multi_graphs(anomalies, predictions_list, predictions_name_list, how_show: str):
100
+ method_list = [ 'Anomalies' ] + predictions_name_list
101
+
102
+ anomalies = deepcopy(anomalies)
103
+ predictions_list = deepcopy(predictions_list)
104
+
105
+ first_idx = anomalies[0].get_time()[0]-100
106
+ last_idx = anomalies[-1].get_time()[1]
107
+ for single_prediction in predictions_list:
108
+ first_idx = min(first_idx, single_prediction[0].get_time()[0])
109
+ last_idx = max(last_idx, single_prediction[-1].get_time()[1])
110
+
111
+ marginal_idx = int(float(last_idx-first_idx)/100)
112
+ first_idx -= marginal_idx
113
+ shift_ranges(anomalies, first_idx)
114
+ for single_prediction in predictions_list:
115
+ shift_ranges(single_prediction, first_idx)
116
+
117
+ ranges_list = [ anomalies ] + predictions_list
118
+
119
+ max_index = anomalies[-1].get_time()[1]
120
+ for single_prediction in predictions_list:
121
+ max_index = max(max_index, single_prediction[-1].get_time()[1])
122
+ max_index = max_index + marginal_idx
123
+
124
+ color_list = [(0, 0, 0), #black
125
+ (60, 76, 203), #red
126
+ (193, 134, 46), #blue
127
+ (133, 160, 22), #green
128
+ (206, 143, 187), #purple
129
+ (94, 73, 52), # darkblue
130
+ (63, 208, 244) #yellow
131
+ ]
132
+
133
+ margin_left = 10
134
+ margin_right = 180
135
+ margin_top = 20
136
+ margin_bottom = 40
137
+
138
+ graph_gap = 20
139
+ graph_height = 40
140
+ graph_width = 2000
141
+
142
+ n_results = len(ranges_list)
143
+
144
+ width = margin_left + graph_width + margin_right
145
+ height = margin_top + margin_bottom + n_results * (graph_gap + graph_height)
146
+ bpp = 3
147
+
148
+ img = np.ones((height, width, bpp), np.uint8)*255
149
+
150
+ img_h = img.shape[0]
151
+ img_w = img.shape[1]
152
+ img_bpp = img.shape[2]
153
+
154
+ thickness = 1
155
+ fontsize = 1.4
156
+ cv.line(img, (int(margin_left/2), img_h-margin_bottom), (img_w-int(margin_left/2), img_h-margin_bottom), color_list[0], thickness) #x-axis
157
+ pts = np.array([[img_w-int(margin_left/2), img_h-margin_bottom], [img_w-int(margin_left/2)-7, img_h-margin_bottom+5], [img_w-int(margin_left/2)-7, img_h-margin_bottom-5]], np.int32) #arrow_head
158
+ pts = pts.reshape((-1, 1, 2))
159
+ cv.fillPoly(img, [pts], color_list[0])
160
+ cv.putText(img, 'Relative Index', (img_w-180, img_h-15), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color_list[0], 1, cv.LINE_AA) #x-axis label
161
+
162
+ for i in range(margin_left, width-margin_right, int(graph_width/10)):
163
+ cv.line(img, (i, img_h-margin_bottom+2), (i, img_h-margin_bottom-2), color_list[0], thickness)
164
+ org_index = str(round((i-10) / graph_width * max_index / 1000))
165
+ cv.putText(img, org_index+'K', (i-len(org_index)*5, img_h-margin_bottom + 25), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color_list[0], 1, cv.LINE_AA)
166
+
167
+ thickness = -1
168
+ for idx in range(n_results):
169
+ cv.putText(img, method_list[idx],
170
+ (width - margin_right + 2, img_h - margin_bottom - graph_gap * (idx+1) - graph_height * idx - 12),
171
+ cv.FONT_HERSHEY_COMPLEX_SMALL, fontsize, color_list[0], 1, cv.LINE_AA)
172
+ draw_csv(ranges_list[idx], img, h_floor=img_h - margin_bottom - graph_gap * (idx+1) - graph_height * idx,
173
+ h_ceiling=img_h - margin_bottom - graph_gap * (idx+1) - graph_height * (idx+1),
174
+ color=color_list[(idx+1)%len(color_list)],
175
+ max_index=max_index, graph_width=graph_width, margin_left=margin_left)
176
+
177
+ if how_show == 'screen' or how_show == 'all':
178
+ cv.imshow("drawing", img)
179
+ if how_show == 'file' or how_show == 'all':
180
+ print("The file is saved at " + str(pathlib.Path(__file__).parent.absolute()))
181
+ cv.imwrite("./brief_result.png", img)
182
+ if how_show != 'screen' and how_show != 'all' and how_show != 'file':
183
+ print('Parameter Error')
184
+ cv.waitKey(0);
File without changes
File without changes