RRAEsTorch 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,245 @@
1
+ import numpy as np
2
+
3
+ class Null_Tracker:
4
+ def __init__(self, *args, **kwargs):
5
+ pass
6
+
7
+ def __call__(self, current_loss, prev_avg_loss, *args, **kwargs):
8
+ return {}
9
+
10
+ def init(self):
11
+ return {}
12
+
13
+ class RRAE_fixed_Tracker:
14
+ """ Default Tracker for RRAEs that provides fixed value of k_max """
15
+ def __init__(self, k_init):
16
+ self.k_now = k_init
17
+
18
+ def __call__(self, *args, **kwargs):
19
+ return {"k_max": self.k_now}
20
+
21
+ def init(self):
22
+ return {"k_max": self.k_now}
23
+
24
+
25
+ class RRAE_gen_Tracker:
26
+ """ Tracker that performs the generic adaptive algorithm.
27
+
28
+ The algorithm begins with a large value of k_max provided as k_init, it starts
29
+ training until convergence, and saves the loss value as 'optimal loss'. Then
30
+ k_max is decreased one by one and training is continued until convergence to
31
+ the optimal loss each time. Finally, when a value of k_max is too low that
32
+ the loss never converges again to the optimal value, k_max is re-increased
33
+ by one and training is continued.
34
+
35
+ Parameters
36
+ ----------
37
+ k_init: start value of k_max (choose to be big)
38
+ patience_conv: patience for convergence (assuming that loss stagnated)
39
+ patience_init: how many forward steps to do as 'initialization', so
40
+ before the adaptive algorithm starts.
41
+ patience_not_right: patience for assuming that the value of k_max
42
+ is too small
43
+ perf_loss: the optimal loss, if known a priori
44
+ eps_0: the error in percent, below which we assume stagnation
45
+ for the initialmization phase
46
+ eps_perc: the error in percent, below which we assume stagnation
47
+ after the initialization part is done
48
+ save_steps: if set to True, model is saved when changing k value
49
+ k_min: the minimum value of k, if known in advance
50
+ converged_steps: number of steps after convergence to the right k
51
+ if this parameter is not set, the stagnation
52
+ criteria will determine when training is stopped
53
+ """
54
+ def __init__(
55
+ self,
56
+ k_init,
57
+ patience_conv=1,
58
+ patience_init=None,
59
+ patience_not_right=500,
60
+ perf_loss=0,
61
+ eps_0=1,
62
+ eps_perc=5,
63
+ save_each_k=False,
64
+ k_min=0,
65
+ converged_steps=np.inf,
66
+ ):
67
+
68
+ self.patience_c_conv = 0
69
+ self.patience_c = 0
70
+ self.steps_c = 0
71
+ self.k_now = k_init
72
+
73
+ self.change_prot = False
74
+ self.loss_prev_mode = np.inf
75
+ self.wait_counter = 0
76
+ self.k_now = k_init
77
+ self.converged = False
78
+ self.total_steps = 0
79
+
80
+ self.patience_conv = patience_conv
81
+ self.patience = patience_not_right
82
+ self.patience_init = patience_init
83
+ self.init_phase = True
84
+ self.ideal_loss = np.nan
85
+ self.eps_0 = eps_0
86
+ self.perf_loss = perf_loss
87
+ self.eps_perc = eps_perc
88
+ self.k_steps = 0
89
+ self.max_patience = np.inf
90
+ self.save_each_k = save_each_k
91
+ self.stop_train = False
92
+ self.k_min = k_min
93
+ self.converged_steps = converged_steps
94
+ self.converged_steps_c = 0
95
+
96
+ def __call__(self, current_loss, prev_avg_loss, *args, **kwargs):
97
+
98
+ current_loss = float(current_loss)
99
+ prev_avg_loss = float(prev_avg_loss)
100
+
101
+ save = False
102
+ break_ = False
103
+ if self.init_phase:
104
+ if self.patience_init is not None:
105
+ if (
106
+ np.abs(current_loss - prev_avg_loss) / np.abs(prev_avg_loss) * 100
107
+ < self.eps_perc
108
+ ):
109
+ self.patience_c += 1
110
+ if self.patience_c == self.patience_init:
111
+ self.patience_c = 0
112
+ self.init_phase = False
113
+ self.ideal_loss = prev_avg_loss
114
+ print(f"Ideal loss is {self.ideal_loss}")
115
+ print("Stagnated")
116
+
117
+ if current_loss < self.perf_loss:
118
+ self.ideal_loss = self.perf_loss
119
+ self.init_phase = False
120
+ self.patience_c = 0
121
+ print(f"Ideal loss is {self.ideal_loss}")
122
+
123
+ return {"k_max": self.k_now, "save": save, "break_": break_, "stop_train": self.stop_train, "load": False}
124
+
125
+ self.total_steps += 1
126
+
127
+ if (self.k_now <= self.k_min) and (not self.converged):
128
+ print("Converged to minimum value")
129
+ self.converged = True
130
+
131
+ load = False
132
+
133
+ if not self.converged:
134
+ if current_loss < self.ideal_loss:
135
+ self.patience_c = 0
136
+ self.k_steps = 0
137
+ self.patience_c_conv += 1
138
+ if self.patience_c_conv == self.patience_conv:
139
+ self.patience_c_conv = 0
140
+ self.k_now -= 1
141
+ save = True
142
+ self.total_steps = 0
143
+ else:
144
+ self.patience_c_conv = 0
145
+ self.k_steps += 1
146
+ stg = np.abs(current_loss - prev_avg_loss)/np.abs(prev_avg_loss)*100 < self.eps_0
147
+ worse = current_loss > prev_avg_loss
148
+ if stg or worse:
149
+ self.k_steps = 0
150
+ self.patience_c += 1
151
+ if self.patience_c == self.patience:
152
+ self.patience_c = 0
153
+ self.k_now += 1
154
+ save = False
155
+ load = True
156
+ self.converged = True
157
+ break_ = True
158
+ print("Reached a k_max that's too low, adding 1 to k_max")
159
+
160
+ else:
161
+ self.converged_steps_c += 1
162
+
163
+ if self.converged_steps_c >= self.converged_steps:
164
+ self.stop_train = True
165
+ save = True
166
+ self.patience_c = 0
167
+
168
+ else:
169
+ if np.abs(current_loss - prev_avg_loss)/np.abs(prev_avg_loss)*100 < self.eps_perc:
170
+ self.patience_c += 1
171
+ if self.patience_c == self.patience:
172
+ self.patience_c = 0
173
+ save = True
174
+ self.stop_train = True
175
+ print("Stopping training")
176
+
177
+ return {"k_max": self.k_now, "save": save, "break_": break_, "stop_train": self.stop_train, "load": load}
178
+
179
+ def init(self):
180
+ return {"k_max": self.k_now}
181
+
182
+
183
+ class RRAE_pars_Tracker:
184
+ """ Tracker that performs the parsimonious adaptive algorithm.
185
+
186
+ The algorithm begins with the smallest value of k_max (1 if not given)
187
+ then the value of k_max is increased by 1 everytime convergence is reached.
188
+
189
+ NOTE: It is not recommended to use this algorithm, it usually leads to less
190
+ explainable, and worse, results. But it is here if someone wants to experiment.
191
+
192
+ Parameters
193
+ ----------
194
+ k_init: start value of k_max (choose to be small)
195
+ patience: how much to wait to assume stagnation
196
+ eps_perc: error in percent under which we assume stagnation
197
+ """
198
+ def __init__(
199
+ self,
200
+ k_init=None,
201
+ patience=5000,
202
+ eps_perc=1
203
+ ):
204
+ k_init = 1 if k_init is None else k_init
205
+
206
+ self.patience = patience
207
+ self.eps = eps_perc
208
+ self.patience_c = 0
209
+ self.loss_prev_mode = np.inf
210
+ self.k_now = k_init
211
+ self.converged = False
212
+ self.stop_train = False
213
+
214
+ def __call__(self, current_loss, prev_avg_loss, *args, **kwargs):
215
+ save = False
216
+ break_ = False
217
+ if not self.converged:
218
+ if np.abs(current_loss - prev_avg_loss) < self.eps:
219
+ self.patience_c += 1
220
+ if self.patience_c == self.patience:
221
+ self.patience_c = 0
222
+ save = True
223
+
224
+ if np.abs(prev_avg_loss - self.loss_prev_mode)/np.abs(self.loss_prev_mode)*100 < self.eps:
225
+ self.k_now -= 1
226
+ break_ = True
227
+ self.converged = True
228
+ self.patience_c = 0
229
+ else:
230
+ self.k_now += 1
231
+ self.loss_prev_mode = prev_avg_loss
232
+ else:
233
+ if np.abs(current_loss - prev_avg_loss)/np.abs(prev_avg_loss)*100 < self.eps:
234
+ self.patience_c += 1
235
+ if self.patience_c == self.patience:
236
+ self.patience_c = 0
237
+ self.prev_k_steps = 0
238
+ save = True
239
+ self.stop_train = True
240
+ print("Stopping training")
241
+
242
+ return {"k_max": self.k_now, "save": save, "break_": break_, "stop_train": self.stop_train}
243
+
244
+ def init(self):
245
+ return {"k_max": self.k_now}
@@ -0,0 +1,5 @@
1
+ from .training_classes import (
2
+ Trainor_class,
3
+ RRAE_Trainor_class,
4
+ AE_Trainor_class
5
+ )