tsadmetrics 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. tsadmetrics-0.1.3/PKG-INFO +22 -0
  2. tsadmetrics-0.1.3/pyproject.toml +30 -0
  3. {tsadmetrics-0.1.1 → tsadmetrics-0.1.3}/setup.py +3 -4
  4. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics/__init__.py +2 -4
  5. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/auc_roc_pr_plot.py +295 -0
  6. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/discontinuity_graph.py +109 -0
  7. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/latency_sparsity_aware.py +294 -0
  8. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/metrics.py +698 -0
  9. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/nabscore.py +311 -0
  10. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/tests.py +376 -0
  11. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/threshold_plt.py +30 -0
  12. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/time_tolerant.py +33 -0
  13. tsadmetrics-0.1.3/tsadmetrics/_tsadeval/vus_utils.py +263 -0
  14. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics/binary_metrics.py +2 -2
  15. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics/metric_utils.py +1 -1
  16. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics/non_binary_metrics.py +1 -1
  17. tsadmetrics-0.1.3/tsadmetrics/py.typed +0 -0
  18. tsadmetrics-0.1.3/tsadmetrics.egg-info/PKG-INFO +22 -0
  19. tsadmetrics-0.1.3/tsadmetrics.egg-info/SOURCES.txt +26 -0
  20. tsadmetrics-0.1.3/tsadmetrics.egg-info/requires.txt +13 -0
  21. tsadmetrics-0.1.1/PKG-INFO +0 -9
  22. tsadmetrics-0.1.1/pyproject.toml +0 -16
  23. tsadmetrics-0.1.1/src/tsadmetrics/ts_aware_utils.py +0 -2
  24. tsadmetrics-0.1.1/src/tsadmetrics.egg-info/PKG-INFO +0 -9
  25. tsadmetrics-0.1.1/src/tsadmetrics.egg-info/SOURCES.txt +0 -16
  26. {tsadmetrics-0.1.1 → tsadmetrics-0.1.3}/README.md +0 -0
  27. {tsadmetrics-0.1.1 → tsadmetrics-0.1.3}/setup.cfg +0 -0
  28. {tsadmetrics-0.1.1 → tsadmetrics-0.1.3}/tests/test_binary.py +0 -0
  29. {tsadmetrics-0.1.1 → tsadmetrics-0.1.3}/tests/test_non_binary.py +0 -0
  30. /tsadmetrics-0.1.1/src/tsadmetrics/py.typed → /tsadmetrics-0.1.3/tsadmetrics/_tsadeval/__init__.py +0 -0
  31. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics/utils.py +0 -0
  32. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics.egg-info/dependency_links.txt +0 -0
  33. {tsadmetrics-0.1.1/src → tsadmetrics-0.1.3}/tsadmetrics.egg-info/top_level.txt +0 -0
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.1
2
+ Name: tsadmetrics
3
+ Version: 0.1.3
4
+ Summary: Librería para evaluación de detección de anomalías en series temporales
5
+ Home-page: https://github.com/pathsko/TSADmetrics
6
+ Author: Pedro Rafael Velasco Priego
7
+ Author-email: Pedro Rafael Velasco Priego <i12veprp@uco.es>
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: joblib==1.4.2
11
+ Requires-Dist: numpy==1.24.4
12
+ Requires-Dist: pandas==2.0.3
13
+ Requires-Dist: PATE==0.1.1
14
+ Requires-Dist: patsy==0.5.6
15
+ Requires-Dist: python-dateutil==2.9.0.post0
16
+ Requires-Dist: pytz==2024.1
17
+ Requires-Dist: scikit-learn==1.3.2
18
+ Requires-Dist: scipy==1.10.1
19
+ Requires-Dist: six==1.16.0
20
+ Requires-Dist: statsmodels==0.14.1
21
+ Requires-Dist: threadpoolctl==3.5.0
22
+ Requires-Dist: tzdata==2024.1
@@ -0,0 +1,30 @@
1
+ [project]
2
+ name = "tsadmetrics"
3
+ version = "0.1.3"
4
+ description = "Librería para evaluación de detección de anomalías en series temporales"
5
+ authors = [
6
+ { name = "Pedro Rafael Velasco Priego", email = "i12veprp@uco.es" }
7
+ ]
8
+ dependencies = [
9
+ "joblib==1.4.2",
10
+ "numpy==1.24.4",
11
+ "pandas==2.0.3",
12
+ "PATE==0.1.1",
13
+ "patsy==0.5.6",
14
+ "python-dateutil==2.9.0.post0",
15
+ "pytz==2024.1",
16
+ "scikit-learn==1.3.2",
17
+ "scipy==1.10.1",
18
+ "six==1.16.0",
19
+ "statsmodels==0.14.1",
20
+ "threadpoolctl==3.5.0",
21
+ "tzdata==2024.1"
22
+ ]
23
+ requires-python = '>=3.8'
24
+
25
+ [tool.setuptools]
26
+ packages = ["tsadmetrics","tsadmetrics._tsadeval"]
27
+
28
+ [build-system]
29
+ requires = ["setuptools>=61.0", "wheel"]
30
+ build-backend = "setuptools.build_meta"
@@ -5,14 +5,14 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="tsadmetrics",
8
- version="0.1.1",
8
+ version="0.1.3",
9
9
  author="Pedro Rafael Velasco Priego",
10
10
  author_email="i12veprp@uco.es",
11
11
  description="A library for time series anomaly detection metrics and evaluation.",
12
12
  long_description=long_description,
13
13
  long_description_content_type="text/markdown",
14
14
  url="https://github.com/pathsko/TSADmetrics",
15
- packages=find_packages(where="src"),
15
+ packages=find_packages(where='./'),
16
16
  classifiers=[
17
17
  "Programming Language :: Python :: 3",
18
18
  "License :: OSI Approved :: MIT License",
@@ -20,7 +20,7 @@ setup(
20
20
  "Intended Audience :: Science/Research",
21
21
  "Topic :: Scientific/Engineering :: Artificial Intelligence",
22
22
  ],
23
- python_requires=">=3.6",
23
+ python_requires=">=3.8",
24
24
  install_requires=[
25
25
  "joblib==1.4.2",
26
26
  "numpy==1.24.4",
@@ -46,5 +46,4 @@ setup(
46
46
  "myst-parser",
47
47
  ],
48
48
  },
49
- package_dir={"": "src"},
50
49
  )
@@ -2,13 +2,11 @@ from .binary_metrics import *
2
2
  from .non_binary_metrics import *
3
3
  from .utils import *
4
4
 
5
- import sys
6
- import os
7
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_tsadeval")))
5
+
8
6
 
9
7
 
10
8
  __author__ = 'Pedro Rafael Velasco Priego i12veprp@uco.es'
11
- __version__ = "0.0.1"
9
+ __version__ = "0.1.3"
12
10
  __all__ = ['point_wise_recall', 'point_wise_precision', 'point_wise_f_score','point_adjusted_recall',
13
11
  'point_adjusted_precision', 'point_adjusted_f_score', 'segment_wise_recall', 'segment_wise_precision',
14
12
  'segment_wise_f_score','delay_th_point_adjusted_recall', 'delay_th_point_adjusted_precision',
@@ -0,0 +1,295 @@
1
+ # import numpy as np
2
+ # from scipy.stats import norm
3
+ # from matplotlib import pyplot as plt
4
+
5
+ # from metrics import f1_from_pr
6
+
7
+
8
+ # class Two_1d_normal_distributions:
9
+ # def __init__(self, P_ampl, N_ampl, P_mu, N_mu, P_std, N_std, color="b", betas=None):
10
+ # self.P_ampl = P_ampl
11
+ # self.N_ampl = N_ampl
12
+ # self.P_mu = P_mu
13
+ # self.N_mu = N_mu
14
+ # self.P_std = P_std
15
+ # self.N_std = N_std
16
+ # self.betas = (1 / 8, 1 / 4, 1 / 2, 1, 2, 4, 8, 16, 32) if betas == None else betas
17
+
18
+ # self.color = color
19
+ # self.N_color = "k"
20
+ # self.P_color = "r"
21
+
22
+ # def make(self, delta=0.05, steps=10001, start=-8, stop=8):
23
+ # index = 0
24
+
25
+ # # For plotting grahps
26
+ # self.fpr = []
27
+ # self.precision = []
28
+ # self.recall = []
29
+
30
+ # # For plotting x´s and o´s on the graphs
31
+ # self.x_fpr = []
32
+ # self.x_precision = []
33
+ # self.x_recall = []
34
+ # self.xs = 0
35
+ # self.x_threshold = []
36
+ # self.o_fpr = []
37
+ # self.o_precision = []
38
+ # self.o_recall = []
39
+ # self.os = 0
40
+ # self.o_threshold = []
41
+
42
+ # # Track maximum f scores for various beta values
43
+ # self.max_f = {i: 0 for i in self.betas}
44
+ # self.max_f_fpr = {i: 0 for i in self.betas}
45
+ # self.max_f_precision = {i: 0 for i in self.betas}
46
+ # self.max_f_recall = {i: 0 for i in self.betas}
47
+ # self.max_f_thresholds = {i: 0 for i in self.betas}
48
+
49
+ # for threshold in np.linspace(start, stop, steps):
50
+ # TN = self.N_ampl * norm.cdf(threshold, loc=self.N_mu, scale=self.N_std)
51
+ # FP = self.N_ampl - TN
52
+ # FN = self.P_ampl * norm.cdf(threshold, loc=self.P_mu, scale=self.P_std)
53
+ # TP = self.P_ampl - FN
54
+ # self.fpr.append(FP / (FP + TN))
55
+ # self.precision.append(TP / (TP + FP))
56
+ # self.recall.append(TP / (TP + FN))
57
+
58
+ # for beta in self.betas:
59
+ # if f1_from_pr(p=self.precision[-1], r=self.recall[-1], beta=beta) > self.max_f[beta]:
60
+ # self.max_f[beta] = f1_from_pr(p=self.precision[-1], r=self.recall[-1], beta=beta)
61
+ # self.max_f_fpr[beta] = self.fpr[-1]
62
+ # self.max_f_precision[beta] = self.precision[-1]
63
+ # self.max_f_recall[beta] = self.recall[-1]
64
+ # self.max_f_thresholds[beta] = threshold
65
+
66
+ # if (FN) / (self.P_ampl) >= self.xs * delta + delta * 0.5:
67
+ # self.x_fpr.append(FP / (FP + TN))
68
+ # self.x_precision.append(TP / (TP + FP))
69
+ # self.x_recall.append(TP / (TP + FN))
70
+ # self.xs += 1
71
+ # # print(xs, TN+FN)
72
+ # self.x_threshold.append(threshold)
73
+ # if (TN / self.N_ampl) >= self.os * delta + delta * 0.5:
74
+ # self.o_fpr.append(FP / (FP + TN))
75
+ # self.o_precision.append(TP / (TP + FP))
76
+ # self.o_recall.append(TP / (TP + FN))
77
+ # self.os += 1
78
+ # self.o_threshold.append(threshold)
79
+ # # print(os, TN+FN)
80
+
81
+ # def plot_roc_pr(self, roc_ax, pr_ax, plot_xs=True, plot_os=True, plot_fs=False):
82
+ # roc_ax.plot(self.fpr, self.recall, self.color, zorder=1)
83
+ # pr_ax.plot(self.precision, self.recall, self.color, zorder=1)
84
+ # if plot_xs:
85
+ # roc_ax.plot(self.x_fpr, self.x_recall, "x", color=self.color, zorder=1)
86
+ # pr_ax.plot(self.x_precision, self.x_recall, "x", color=self.color, zorder=1)
87
+ # if plot_os:
88
+ # roc_ax.plot(self.o_fpr, self.o_recall, "o", color=self.color, fillstyle="none", zorder=1)
89
+ # pr_ax.plot(self.o_precision, self.o_recall, "o", color=self.color, fillstyle="none", zorder=1)
90
+ # if plot_fs:
91
+ # roc_ax.plot(
92
+ # list(self.max_f_fpr.values()),
93
+ # list(self.max_f_recall.values()),
94
+ # ".",
95
+ # linestyle="None",
96
+ # zorder=2,
97
+ # color="k",
98
+ # ) # self.color)
99
+ # pr_ax.plot(
100
+ # list(self.max_f_precision.values()), list(self.max_f_recall.values()), ".", zorder=2, color="k"
101
+ # ) # self.color)
102
+ # for beta in self.betas:
103
+ # # roc_ax.plot([self.max_f_fpr[beta]],[self.max_f_recall[beta]], marker=f"$1/{int(1/beta)}$" if beta<1 else f"${beta}$", linestyle= "None", zorder=2, color="k")
104
+ # # pr_ax.plot([self.max_f_precision[beta]], [self.max_f_recall[beta]], marker=f"$1/{int(1/beta)}$" if beta<1 else f"${beta}$", zorder=2, color="k")
105
+ # if self.color == "forestgreen": # need to place the numbers differently
106
+ # roc_ax.text(
107
+ # self.max_f_fpr[beta],
108
+ # self.max_f_recall[beta],
109
+ # f" $1/{int(1/beta)}$" if beta < 1 else f" ${beta}$",
110
+ # horizontalalignment="left",
111
+ # verticalalignment="top",
112
+ # color=self.color,
113
+ # )
114
+ # pr_ax.text(
115
+ # self.max_f_precision[beta],
116
+ # self.max_f_recall[beta],
117
+ # f"$1/{int(1/beta)}$" if beta < 1 else f"${beta}$",
118
+ # horizontalalignment="left",
119
+ # verticalalignment="bottom",
120
+ # color=self.color,
121
+ # )
122
+ # else:
123
+ # roc_ax.text(
124
+ # self.max_f_fpr[beta],
125
+ # self.max_f_recall[beta],
126
+ # f"$1/{int(1/beta)}$" if beta < 1 else f"${beta}$",
127
+ # horizontalalignment="right",
128
+ # verticalalignment="bottom",
129
+ # color=self.color,
130
+ # )
131
+ # pr_ax.text(
132
+ # self.max_f_precision[beta],
133
+ # self.max_f_recall[beta],
134
+ # f"$1/{int(1/beta)}$ " if beta < 1 else f"${beta}$ ",
135
+ # horizontalalignment="right",
136
+ # verticalalignment="top",
137
+ # color=self.color,
138
+ # )
139
+
140
+ # # adjust axes to get the numbers within the figure
141
+ # xmin, xmax = roc_ax.get_xlim()
142
+ # xmin, xmax = pr_ax.get_xlim()
143
+ # roc_ax.set_xlim([xmin - 0.04, xmax])
144
+ # pr_ax.set_xlim([xmin - 0.01, xmax])
145
+ # pr_ax.set_xlabel("Precision")
146
+ # pr_ax.set_ylabel("Recall")
147
+ # roc_ax.set_xlabel("False positive rate")
148
+ # roc_ax.set_ylabel("Recall")
149
+
150
+ # def plot_roc_prec(self):
151
+ # plt.plot(self.fpr, self.recall, self.color, zorder=1)
152
+ # plt.plot(self.fpr, self.precision, self.color, zorder=1)
153
+ # plt.show()
154
+
155
+ # def plot_roc_pr_lines(self, ax):
156
+ # ax.plot(self.fpr, np.array(self.recall) + 1, self.color, zorder=1)
157
+ # ax.plot(np.array(self.precision) * (-1) + 1, self.recall, self.color, zorder=1)
158
+ # for i in range(0, len(self.recall), 5):
159
+ # ax.plot(
160
+ # [self.fpr[i], 1 - self.precision[i]],
161
+ # [self.recall[i] + 1, self.recall[i]],
162
+ # marker="o",
163
+ # color=self.N_color,
164
+ # zorder=1,
165
+ # alpha=0.3,
166
+ # )
167
+
168
+ # def plot_distributions(
169
+ # self, axes, start=-5, stop=7, steps=1001, normalize=True, plot_xs=True, plot_os=True, plot_fs=False, threshold=0
170
+ # ):
171
+ # grid = np.linspace(start, stop, steps)
172
+ # fill_alpha = 0.2
173
+
174
+ # y = lambda x: norm.pdf(x, loc=self.N_mu, scale=self.N_std) * (1 if normalize else self.N_ampl)
175
+ # axes[0].plot(
176
+ # grid,
177
+ # y(grid),
178
+ # color=self.N_color,
179
+ # label=f"pdf_N/{self.N_ampl}",
180
+ # )
181
+
182
+ # axes[0].fill_between(
183
+ # grid[grid <= threshold], 0, y(grid[grid <= threshold]), alpha=fill_alpha, lw=0, color="darkgreen"
184
+ # )
185
+ # axes[0].fill_between(
186
+ # grid[grid >= threshold], 0, y(grid[grid >= threshold]), alpha=fill_alpha, lw=0, color="orchid"
187
+ # )
188
+ # tn_x = min(self.N_mu, threshold - 0.75)
189
+ # fp_x = max(self.N_mu, threshold + 0.75)
190
+ # axes[0].text(tn_x, y(tn_x) / 2 - 0.005, "TN", horizontalalignment="center", verticalalignment="top")
191
+ # axes[0].text(fp_x, y(fp_x) / 2 - 0.005, "FP", horizontalalignment="center", verticalalignment="top")
192
+ # # add thresholdline, on the whole y-range
193
+ # ymin, ymax = axes[0].get_ylim()
194
+ # axes[0].plot([threshold, threshold], [ymin - 1, ymax + 1], "--", color="gray", lw=1)
195
+ # axes[0].set_ylim([ymin - 0.02, ymax])
196
+
197
+ # # same for anomal distributions
198
+ # y = lambda x: norm.pdf(x, loc=self.P_mu, scale=self.P_std) * (1 if normalize else self.P_ampl)
199
+ # axes[1].plot(
200
+ # grid,
201
+ # y(grid),
202
+ # color=self.P_color,
203
+ # label=f"pdf_P/{self.P_ampl}",
204
+ # )
205
+ # axes[1].fill_between(
206
+ # grid[grid <= threshold], 0, y(grid[grid <= threshold]), alpha=fill_alpha, lw=0, color="chocolate"
207
+ # )
208
+ # axes[1].fill_between(
209
+ # grid[grid >= threshold], 0, y(grid[grid >= threshold]), alpha=fill_alpha, lw=0, color="darkcyan"
210
+ # )
211
+ # fn_x = min(self.P_mu, threshold - 0.75)
212
+ # tp_x = max(self.P_mu, threshold + 0.75)
213
+ # axes[1].text(fn_x, y(fn_x) / 2, "FN", horizontalalignment="center", verticalalignment="top")
214
+ # axes[1].text(tp_x, y(tp_x) / 2, "TP", horizontalalignment="center", verticalalignment="top")
215
+
216
+ # # add thresholdline, on the whole y-range
217
+ # ymin, ymax = axes[1].get_ylim()
218
+ # axes[1].plot([threshold, threshold], [ymin - 1, ymax + 1], "--", color="gray", lw=1)
219
+ # axes[1].set_ylim([ymin, ymax])
220
+
221
+
222
+ # def plot_cdf(self, ax, start=-6, stop=8, steps=1001, normalize=True):
223
+ # grid = np.linspace(start, stop, steps)
224
+
225
+ # ax.plot(
226
+ # grid,
227
+ # norm.cdf(grid, loc=self.N_mu, scale=self.N_std) * (1 if normalize else self.N_ampl),
228
+ # color=self.N_color,
229
+ # label=f"pdf_N/{self.N_ampl}",
230
+ # )
231
+ # ax.plot(
232
+ # grid,
233
+ # norm.cdf(grid, loc=self.P_mu, scale=self.P_std) * (1 if normalize else self.P_ampl),
234
+ # color=self.P_color,
235
+ # label=f"pdf_P/{self.P_ampl}",
236
+ # )
237
+
238
+
239
+ # if __name__ == "__main__":
240
+
241
+ # # Make detector distributions
242
+
243
+ # t1 = Two_1d_normal_distributions(
244
+ # 1, 49, 1.8, -1, 2, 1, color="mediumblue", betas=(1 / 8, 1 / 4, 1 / 2, 1, 2, 4, 8, 16)
245
+ # )
246
+ # t2 = Two_1d_normal_distributions(
247
+ # 1, 49, 1, -1, 1, 1, color="forestgreen", betas=(1 / 8, 1 / 4, 1 / 2, 1, 2, 4, 8, 16)
248
+ # )
249
+
250
+ # t1.make(steps=1001, delta=0.1)
251
+ # t2.make(steps=1001, delta=0.1)
252
+
253
+
254
+ # # Make roc and pr plots
255
+
256
+ # figsize = (4, 4)
257
+
258
+ # roc_fig, roc_ax = plt.subplots(figsize=figsize)
259
+ # pr_fig, pr_ax = plt.subplots(figsize=figsize)
260
+ # t1.plot_roc_pr(roc_ax, pr_ax, False, False, True)
261
+ # t2.plot_roc_pr(roc_ax, pr_ax, False, False, True)
262
+ # roc_fig.tight_layout()
263
+ # pr_fig.tight_layout()
264
+ # roc_fig.savefig("auc_roc_f.pdf")
265
+ # pr_fig.savefig("auc_pr_f.pdf")
266
+ # plt.show()
267
+ # plt.close("all")
268
+
269
+
270
+ # # Make distribution plots
271
+
272
+ # figsize = (5, 3)
273
+
274
+ # for beta in t1.betas: # [16,4,1,1/4,1/8]:
275
+ # fig, axes = plt.subplots(2, 2, figsize=figsize, sharex=True, sharey=True)
276
+ # t1.plot_distributions(
277
+ # [axes[0][0], axes[1][0]], plot_xs=False, plot_os=False, plot_fs=True, threshold=t1.max_f_thresholds[beta]
278
+ # )
279
+ # t2.plot_distributions(
280
+ # [axes[0][1], axes[1][1]], plot_xs=False, plot_os=False, plot_fs=True, threshold=t2.max_f_thresholds[beta]
281
+ # )
282
+
283
+ # axes[0][0].set_title(f"Blue detector", color=t1.color)
284
+ # axes[0][1].set_title("Green detector", color=t2.color)
285
+ # axes[1][0].set_xlabel("Anomaly \n score", color=t1.color)
286
+ # axes[1][1].set_xlabel("Anomaly \n score", color=t2.color)
287
+ # shadowaxes = fig.add_subplot(111, xticks=[], yticks=[], frame_on=False)
288
+ # shadowaxes.set_ylabel("Probability density", labelpad=25)
289
+ # fig.tight_layout()
290
+ # axes[0][0].set_ylabel("Normal\nsamples", labelpad=25)
291
+ # axes[1][0].set_ylabel("Anomalous\nsamples", labelpad=25)
292
+
293
+ # plt.subplots_adjust(hspace=0.0)
294
+ # plt.savefig(f"auc_distributions_b{beta}.pdf")
295
+ # plt.show()
@@ -0,0 +1,109 @@
1
+ # from maketable import *
2
+
3
+
4
+ # class Score_graphs_table(Table):
5
+ # def __init__(self, metric_names, results, marks=[]):
6
+ # self.metric_names = metric_names
7
+ # self.results = results
8
+ # self.marks = marks
9
+ # super().__init__(Table_content([], [], []), scale=2)
10
+ # self.x_factor = 1 / 20 * self.scale
11
+ # self.y_factor = 1 / 2
12
+ # self.y_shift = -0.2
13
+
14
+ # self.row_length = 2
15
+ # self.n_rows = len(metric_names)
16
+
17
+ # def add_top_row(self):
18
+ # self.string += "Metric"
19
+ # self.string += "&"
20
+ # self.string += "Score"
21
+ # self.end_row()
22
+
23
+ # def add_next_row(self):
24
+ # self.string += self.metric_names[self.rows_added]
25
+ # self.string += "&"
26
+ # self.add_graph(self.rows_added + 1)
27
+ # self.end_row()
28
+ # self.rows_added += 1
29
+
30
+ # def add_graph(self, number):
31
+ # self.add_line(f"\\begin{{tikzpicture}}[baseline=-\\the\\dimexpr\\fontdimen22\\textfont2\\relax]")
32
+ # for x in self.marks:
33
+ # self.add_line(
34
+ # f"\draw[-, gray] ({x*self.x_factor},{self.y_shift}) -- ({x*self.x_factor},{0.2*self.y_factor + self.y_shift});"
35
+ # )
36
+ # self.add_line(
37
+ # f"\draw[-, gray] (0,{self.y_shift}) -- ({self.x_factor*(len(self.results[self.metric_names[number-1]])-1)},{self.y_shift});"
38
+ # )
39
+ # self.add_line("\\foreach \\i/\\a in")
40
+ # self.add_line(
41
+ # str(
42
+ # [
43
+ # (round(i * self.x_factor, 3), round(a * self.y_factor + self.y_shift, 3))
44
+ # for i, a in enumerate(self.results[self.metric_names[number - 1]])
45
+ # ]
46
+ # )
47
+ # .replace(",", "/")
48
+ # .replace(")/", ",")
49
+ # .replace("(", "")
50
+ # .replace("[", "{")
51
+ # .replace("]", "}")
52
+ # .replace(")}", "}{")
53
+ # )
54
+ # self.add_line("\\coordinate (now) at (\\i,\\a) {};")
55
+ # self.add_line(" \\ifthenelse{\\equal{\\i}{0.0}}{}{")
56
+ # self.add_line(" \\draw[-, teal, thick] (prev) -- (now);")
57
+ # self.add_line(" }")
58
+ # self.add_line(" \\coordinate (prev) at (\\i,\\a) {};")
59
+ # self.add_line("}")
60
+ # self.add_line("\\end{tikzpicture}")
61
+
62
+
63
+ # def score_graphs():
64
+ # # define scenario
65
+ # ts_length = 100
66
+ # pred_length = 5
67
+ # gt_length = 20
68
+ # gt_start = 40
69
+ # marks = [35, 40, 55, 60]
70
+ # assert pred_length % 2 == 1
71
+
72
+ # # prepare metrics list
73
+ # All_metrics.remove(metrics.Range_PR)
74
+ # all_metrics_and_rffront = [*All_metrics, metrics.Range_PR, Range_PR_front]
75
+
76
+ # # make results and names
77
+ # metric_names = []
78
+ # result = {}
79
+ # for metric in all_metrics_and_rffront:
80
+ # # set names
81
+ # if metric == metrics.TaF:
82
+ # metric_names.append(metric(5, [3, 4], [3], delta=10).name)
83
+ # elif metric == metrics.Time_Tolerant:
84
+ # metric_names.append(metric(5, [3, 4], [3], d=10).name)
85
+ # else:
86
+ # metric_names.append(metric(5, [3, 4], [3]).name)
87
+
88
+ # # get results
89
+ # current_result = []
90
+ # for pred_mid in range(pred_length // 2, ts_length - pred_length // 2):
91
+ # gt = [[gt_start, gt_start + gt_length - 1]]
92
+ # pred = [[pred_mid - pred_length // 2, pred_mid + pred_length // 2]]
93
+ # if metric == metrics.TaF:
94
+ # current_result.append(metric(ts_length, gt, pred, delta=10).get_score())
95
+ # elif metric == metrics.Time_Tolerant:
96
+ # current_result.append(metric(ts_length, gt, pred, d=10).get_score())
97
+ # else:
98
+ # current_result.append(metric(ts_length, gt, pred).get_score())
99
+ # current_result = np.array(current_result)
100
+ # current_result = (current_result - min(current_result)) / (max(current_result) - min(current_result))
101
+ # result[metric_names[-1]] = current_result
102
+
103
+ # # make the table
104
+ # table = Score_graphs_table(metric_names, result, marks)
105
+ # table.write()
106
+ # print(table)
107
+
108
+
109
+ # score_graphs()