radnn 0.0.6__py3-none-any.whl → 0.0.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,389 @@
1
+ import os
2
+ import numpy as np
3
+ from datetime import datetime
4
+ from tensorflow import keras
5
+ from radnn import MLSystem, FileSystem, FileStore, Errors
6
+ from radnn.data import DataSetBase
7
+ from radnn.learn import LearningAlgorithm
8
+
9
+ from radnn.plots import PlotLearningCurve
10
+ from radnn.evaluation import EvaluateClassification
11
+
12
+ from .ml_experiment_env import MLExperimentEnv
13
+ from .ml_experiment_config import MLExperimentConfig, model_code_mllib
14
+ from .ml_experiment_store import MLExperimentStore
15
+
16
+
17
+ # --------------------------------------------------------------------------------------------------------------------
18
+ # Define a custom sorting function
19
+ def _sort_by_last_path_element(folder):
20
+ # Split the path into its components
21
+ components = folder.split(os.pathsep)
22
+ # Extract the last path element
23
+ last_element = components[-1]
24
+ # Extract the numeric part of the last element
25
+ numeric_part = ''.join(filter(str.isdigit, last_element))
26
+ # Convert the numeric part to an integer
27
+ try:
28
+ return int(numeric_part)
29
+ except ValueError:
30
+ # If the numeric part is not convertible to an integer, return a large number
31
+ return float('inf')
32
+ # --------------------------------------------------------------------------------------------------------------------
33
+
34
+
35
+
36
+
37
+
38
+
39
+ # ======================================================================================================================
40
+ class MLExperiment:
41
+ # --------------------------------------------------------------------------------------------------------------------
42
+ def __init__(self, setup, model=None, learning_algorithm=None, cost_function=None, metrics=[], is_retraining=False):
43
+ # ...........................................................................
44
+ self.model = model
45
+ self.learning_algorithm: LearningAlgorithm = learning_algorithm
46
+ self.cost_function = cost_function
47
+ self.metrics = metrics
48
+ self.is_retraining = is_retraining
49
+ self.is_showing_step_progress = False
50
+ self.environment = None
51
+ self.config = None
52
+ self.model_name = None
53
+ self.model_fs = None
54
+
55
+ self.generator = None
56
+ self._dataset: DataSetBase = None
57
+ self._ts_feed = None
58
+ self._vs_feed = None
59
+
60
+ self.process_log = None
61
+ self.learning_history = None
62
+ self._evaluation = None
63
+
64
+ self._is_graph_built = False
65
+ self._has_loaded_state = False
66
+
67
+ self._start_train_time = None
68
+ self._end_train_time = None
69
+
70
+ self._currentModelFolder = None
71
+ self._currentModelStateFolder = None
72
+ self._currentModelLogFileStore = None
73
+ # ...........................................................................
74
+
75
+ if isinstance(setup, MLExperimentEnv):
76
+ self.environment = setup
77
+ self.config = self.environment.config
78
+ self.model_fs = self.environment.model_fs
79
+ elif isinstance(setup, MLExperimentConfig):
80
+ self.config = setup
81
+ elif isinstance(setup, dict):
82
+ self.config = MLExperimentConfig().assign(setup)
83
+ elif hasattr(setup, "config"):
84
+ self.config = setup.config
85
+ else:
86
+ raise Exception("Incompatible machine learning experiment setup object")
87
+
88
+ self.model_name = model_code_mllib(self.config)
89
+
90
+ #if self.environment is None:
91
+ # raise Exception("Not supported yet: Creating a machine learning experiment environment from a config or dictionary object")
92
+
93
+ if self.model_fs is None:
94
+ if MLSystem.Instance().filesys is None:
95
+ raise Exception(Errors.MLSYS_NO_FILESYS)
96
+ oFileSys: FileSystem = MLSystem.Instance().filesys
97
+ self.model_fs = MLExperimentStore(oFileSys.models.folder(self.model_name))
98
+
99
+ assert isinstance(self.model_fs, MLExperimentStore), f"Unsupported model store object of class {self.model_fs.__class__}"
100
+ # --------------------------------------------------------------------------------------------------------------------
101
+ @property
102
+ def evaluation(self):
103
+ if self._evaluation is None:
104
+ raise Exception("Must run evaluation for the metrics object to become available")
105
+ return self._evaluation
106
+ # --------------------------------------------------------------------------------------------------------------------
107
+ @property
108
+ def model_state_folder(self):
109
+ return self.model_fs.state_fs.base_folder
110
+ # --------------------------------------------------------------------------------------------------------------------
111
+ @property
112
+ def checkpoint_paths(self):
113
+ oCheckpointFS = self.model_fs.checkpoint_fs
114
+ if self._currentModelFolder is not None:
115
+ if self._currentModelFolder != self.model_fs.base_folder:
116
+ oCheckpointFS = FileStore(self._currentModelFolder).subfs("checkpoints")
117
+
118
+ sCheckPointPaths = oCheckpointFS.Files("*.index", True, p_tSortFilenamesAs=int)
119
+ sCheckPointPaths = sorted(sCheckPointPaths, key=_sort_by_last_path_element)
120
+ return sCheckPointPaths
121
+ # --------------------------------------------------------------------------------------------------------------------
122
+ @property
123
+ def is_pretrained(self):
124
+ return self.model_fs.state_fs.exists("saved_model.pb")
125
+ # --------------------------------------------------------------------------------------------------------------------
126
+ @classmethod
127
+ def restore(cls, model_name, model=None):
128
+ oConfig = {"ModelName": model_name}
129
+ oExperiment = MLExperiment(oConfig, model)
130
+ return oExperiment.load()
131
+ # --------------------------------------------------------------------------------------------------------------------
132
+ @property
133
+ def dataset(self):
134
+ return self._dataset
135
+ # ......................................
136
+ @dataset.setter
137
+ def dataset(self, value):
138
+ self._dataset = value
139
+ if isinstance(self._dataset, DataSetBase):
140
+ if self._ts_feed is None:
141
+ self._ts_feed = self._dataset.ts.feed
142
+ if self._vs_feed is None:
143
+ self._vs_feed = self._dataset.vs.feed
144
+ # --------------------------------------------------------------------------------------------------------------------
145
+ @property
146
+ def training_set(self):
147
+ return self._ts_feed
148
+ # ......................................
149
+ @training_set.setter
150
+ def training_set(self, value):
151
+ self._ts_feed = value
152
+ # --------------------------------------------------------------------------------------------------------------------
153
+ @property
154
+ def validation_set(self):
155
+ return self._vs_feed
156
+ # ......................................
157
+ @validation_set.setter
158
+ def validation_set(self, value):
159
+ self._vs_feed = value
160
+
161
+ # --------------------------------------------------------------------------------------------------------------------
162
+ def print_info(self):
163
+ if not self._is_graph_built:
164
+ for oSamples, oTarget in self._ts_feed.take(1):
165
+ # We recall one batch/sample to create the graph and initialize the model parameters
166
+ y = self.model(oSamples)
167
+ break
168
+
169
+ self._is_graph_built = True
170
+
171
+ self.model.summary()
172
+ # --------------------------------------------------------------------------------------------------------------------
173
+ def _timing_info(self):
174
+ # Timings
175
+ nEpochs = self.config["Training.MaxEpoch"]
176
+ dDiff = self._end_train_time - self._start_train_time
177
+ nElapsedHours = dDiff.total_seconds() / 3600
178
+ nSecondsPerEpoch = dDiff.total_seconds() / nEpochs
179
+ dTiming = {"StartTime": self._start_train_time, "EndTime": self._end_train_time,
180
+ "ElapsedHours": nElapsedHours, "SecondsPerEpoch": nSecondsPerEpoch}
181
+ return dTiming
182
+ # --------------------------------------------------------------------------------------------------------------------
183
+ def save(self):
184
+
185
+ self.model.save(self.model_fs.state_fs.base_folder)
186
+ if self.learning_history is not None:
187
+ self.model_fs.log_fs.obj.save(self.learning_history, "keras_learning_history.pkl")
188
+ dTiming = self._timing_info()
189
+ self.model_fs.log_fs.obj.save(dTiming, "timing_info.pkl")
190
+ dTiming["StartTime"] = dTiming["StartTime"].strftime('%Y-%m-%dT%H:%M:%S')
191
+ dTiming["EndTime"] = dTiming["EndTime"].strftime('%Y-%m-%dT%H:%M:%S')
192
+ self.model_fs.log_fs.json.save(dTiming, f"timing_info_{self._end_train_time.strftime('%Y-%m-%dT%H%M%S')}.json",
193
+ is_sorted_keys=False)
194
+
195
+ # //TODO: Keep cost function names and other learning parameters for evaluation
196
+ # --------------------------------------------------------------------------------------------------------------------
197
+ def load(self, use_last_checkpoint=False, model_root_folder=None):
198
+ self._currentModelFolder = self.model_fs.base_folder
199
+ self._currentModelStateFolder = self.model_fs.state_fs.base_folder
200
+ self._currentModelLogFileStore = self.model_fs.log_fs
201
+ if model_root_folder is not None:
202
+ self._currentModelFolder = model_root_folder
203
+ self._currentModelStateFolder = os.path.join(model_root_folder, "state")
204
+ self._currentModelLogFileStore = FileStore(model_root_folder).subfs("logs")
205
+
206
+ print("Loading saved state from ", self._currentModelStateFolder)
207
+ self.model = keras.models.load_model(self._currentModelStateFolder)
208
+ self.learning_history = (self._currentModelLogFileStore.obj.load("keras_learning_history.pkl"))
209
+ self._has_loaded_state = True
210
+
211
+ if use_last_checkpoint:
212
+ self.load_model_params(use_last_checkpoint=True)
213
+
214
+ return self.model
215
+ # --------------------------------------------------------------------------------------------------------------------
216
+ def unload_model(self):
217
+ del self.model
218
+ self.model = None
219
+ # --------------------------------------------------------------------------------------------------------------------
220
+ def save_model_params(self):
221
+ if self.model is not None:
222
+ self.model.save_weights(self.model_fs.param_fs.folder("model_params"))
223
+ # --------------------------------------------------------------------------------------------------------------------
224
+ def load_model_params(self, checkpoint_path=None, use_last_checkpoint=False, is_ignoring_not_found=False):
225
+ sTargetPath = None
226
+ sCheckPointPaths = self.checkpoint_paths
227
+ if checkpoint_path is not None:
228
+ if checkpoint_path in sCheckPointPaths:
229
+ sTargetPath = checkpoint_path
230
+ else:
231
+ raise Exception(f"Model params not found in checkpoint path {checkpoint_path}")
232
+ elif use_last_checkpoint:
233
+ if len(sCheckPointPaths) > 0:
234
+ sTargetPath = sCheckPointPaths[-1]
235
+ else:
236
+ raise Exception(f"No checkpoints are saved in {self.model_fs.checkpoint_fs.base_folder}")
237
+ else:
238
+ if self.model_fs.param_fs.is_empty:
239
+ raise Exception(f"Model params not found in {self.model_fs.param_fs.base_folder}")
240
+ else:
241
+ sTargetPath = self.model_fs.param_fs.subpath("model_params")
242
+
243
+ if sTargetPath is not None:
244
+ if is_ignoring_not_found:
245
+ self.model.load_weights(sTargetPath)
246
+ '''
247
+ nLogLevel = tf.get_logger().getEffectiveLevel()
248
+ try:
249
+ tf.get_logger().setLevel(logging.CRITICAL) #This does not suppress warning during loading of models
250
+
251
+ finally:
252
+ tf.get_logger().setLevel(nLogLevel)
253
+ '''
254
+ else:
255
+ self.model.load_weights(sTargetPath)
256
+ print("Loaded weights from %s" % sTargetPath)
257
+ self._has_loaded_state = True
258
+ # --------------------------------------------------------------------------------------------------------------------
259
+ def transfer_model_params_to(self, new_model, input_shape, metircs=[]):
260
+ self.save_model_params()
261
+ del self.model
262
+ self.model = new_model
263
+ self.model.build(input_shape=input_shape)
264
+ self.load_model_params()
265
+ self.model.compile(metrics=metircs) # , run_eagerly = True)
266
+ return self.model
267
+ # --------------------------------------------------------------------------------------------------------------------
268
+ def train(self):
269
+ # //TODO: Decouple from Keras training
270
+
271
+ if (not self.is_pretrained) or self.is_retraining:
272
+ oOptimizer = self.learning_algorithm.optimizer
273
+ self._start_train_time = datetime.now()
274
+ self.model.compile(loss=self.cost_function, optimizer=oOptimizer, metrics=self.metrics)
275
+ if MLSystem.Instance().switches["IsDebuggable"]:
276
+ self.model.run_eagerly = True
277
+
278
+ if self.is_showing_step_progress:
279
+ nVerbose = 1
280
+ else:
281
+ nVerbose = 2
282
+
283
+ nEpochs = self.config["Training.MaxEpoch"]
284
+
285
+ if self.generator is not None:
286
+ self.process_log = self.model.fit_generator(generator=self.generator,
287
+ epochs=nEpochs,
288
+ callbacks=self.learning_algorithm.callbacks,
289
+ validation_data=self._vs_feed,
290
+ verbose=nVerbose)
291
+ else:
292
+ if "Training.StepsPerEpoch" in self.config:
293
+ self.process_log = self.model.fit(self._ts_feed
294
+ , batch_size=self.config["Training.BatchSize"]
295
+ , epochs=nEpochs
296
+ , validation_data=self._vs_feed
297
+ , callbacks=self.learning_algorithm.callbacks
298
+ , steps_per_epoch=self.config["Training.StepsPerEpoch"]
299
+ , verbose=nVerbose)
300
+ else:
301
+ self.process_log = self.model.fit(self._ts_feed
302
+ , batch_size=self.config["Training.BatchSize"]
303
+ , epochs=nEpochs
304
+ , validation_data=self._vs_feed
305
+ , callbacks=self.learning_algorithm.callbacks
306
+ , verbose=nVerbose)
307
+ self._end_train_time = datetime.now()
308
+ self.learning_history = self.process_log.history
309
+ self.save()
310
+ self._is_graph_built = True
311
+ else:
312
+ self.load()
313
+
314
+ return self.model
315
+ # --------------------------------------------------------------------------------------------------------------------
316
+
317
+
318
+ # // Evaluation \\
319
+ # --------------------------------------------------------------------------------------------------------------------
320
+ def plot_learning_curve(self):
321
+ oTrainingLogPlot = PlotLearningCurve(self.learning_history, self.model_name)
322
+ oTrainingLogPlot.prepare(metric_key=self.metrics[0]).show()
323
+ oTrainingLogPlot.prepare_cost(self.cost_function).show()
324
+ # --------------------------------------------------------------------------------------------------------------------
325
+ def _evaluation_metrics(self, true_class_labels, predicted_class_labels):
326
+ true_class_labels = true_class_labels.reshape(-1)
327
+ predicted_class_labels = predicted_class_labels.reshape(-1)
328
+ self._evaluation = EvaluateClassification(true_class_labels, predicted_class_labels)
329
+
330
+ return self._evaluation
331
+ # --------------------------------------------------------------------------------------------------------------------
332
+ def _evaluation_report(self, true_class_labels, predicted_class_labels, p_nID=None):
333
+ true_class_labels = true_class_labels.reshape(-1)
334
+ predicted_class_labels = predicted_class_labels.reshape(-1)
335
+
336
+ self._evaluation = EvaluateClassification(true_class_labels, predicted_class_labels)
337
+ self._evaluation.print_confusion_matrix()
338
+ self._evaluation.print_overall()
339
+ self._evaluation.print_per_class()
340
+
341
+ if p_nID is not None:
342
+ bMissclassifiedFlags = (true_class_labels != predicted_class_labels)
343
+ print(f"Missclassified Samples: {np.sum(bMissclassifiedFlags)}/{true_class_labels.shape[0]}")
344
+
345
+ nMissTrue = true_class_labels[bMissclassifiedFlags]
346
+ nMissPredicted = predicted_class_labels[bMissclassifiedFlags]
347
+ nMissIDs = p_nID[bMissclassifiedFlags]
348
+ for i, nID in enumerate(nMissIDs):
349
+ print(f" |__ Sample#{int(nID):07d} True:{int(nMissTrue[i])} != {int(nMissPredicted[i])}")
350
+
351
+ return self._evaluation
352
+ # --------------------------------------------------------------------------------------------------------------------
353
+ def evaluate_classifier(self, true_class_labels=None, sample_feed=None, is_evaluating_using_keras=False, is_printing=True):
354
+ if sample_feed is None:
355
+ sample_feed = self._vs_feed
356
+
357
+ nLoss, nAccuracy = (None, None)
358
+ if is_evaluating_using_keras:
359
+ oTestResults = self.model.evaluate(sample_feed, verbose=1)
360
+ nLoss, nAccuracy = oTestResults
361
+ print("Evaluation: Loss:%.6f - Accuracy:%.6f" % (nLoss, nAccuracy))
362
+
363
+ # ... // Evaluate \\ ...
364
+ nVerbose = 0
365
+ if is_printing:
366
+ nVerbose = 1
367
+
368
+ nPrediction = self.model.predict(sample_feed, verbose=nVerbose)
369
+ if isinstance(nPrediction, tuple):
370
+ nPredictedClassProbabilities, nPredictedClassLabels, nIDs = nPrediction
371
+ else:
372
+ nPredictedClassProbabilities = nPrediction
373
+ nPredictedClassLabels = np.argmax(nPredictedClassProbabilities, axis=1)
374
+ nIDs = None
375
+
376
+ if true_class_labels is None:
377
+ true_class_labels = self._dataset.vs_labels
378
+
379
+ if is_printing:
380
+ return self._evaluation_report(true_class_labels, nPredictedClassLabels, nIDs), nLoss, nAccuracy
381
+ else:
382
+ return self._evaluation_metrics(true_class_labels, nPredictedClassLabels), nLoss, nAccuracy
383
+ # --------------------------------------------------------------------------------------------------------------------
384
+
385
+
386
+
387
+
388
+
389
+
@@ -30,6 +30,35 @@ from datetime import datetime
30
30
 
31
31
  from radnn.system import FileSystem
32
32
 
33
+ # --------------------------------------------------------------------------------------
34
+ def model_code_mllib(p_oDict):
35
+ if "Experiment.BaseName" in p_oDict:
36
+ sBaseName = p_oDict["Experiment.BaseName"]
37
+ nNumber = int(p_oDict["Experiment.Number"])
38
+ sVariation = None
39
+ if "Experiment.Variation" in p_oDict:
40
+ sVariation = p_oDict["Experiment.Variation"]
41
+ nFoldNumber = None
42
+ if "Experiment.FoldNumber" in p_oDict:
43
+ nFoldNumber = p_oDict["Experiment.FoldNumber"]
44
+
45
+ sCode = "%s_%02d" % (sBaseName, nNumber)
46
+ if sVariation is not None:
47
+ sCode += ".%s" % str(sVariation)
48
+ if nFoldNumber is not None:
49
+ sCode += "-%02d" % int(nFoldNumber)
50
+
51
+ elif "ModelName" in p_oDict:
52
+ sCode = p_oDict["ModelName"]
53
+ if "ModelVariation" in p_oDict:
54
+ sCode += "_" + p_oDict["ModelVariation"]
55
+ if "ExperimentNumber" in p_oDict:
56
+ sCode = sCode + "_%02d" % p_oDict["ExperimentNumber"]
57
+
58
+ return sCode
59
+ # --------------------------------------------------------------------------------------
60
+
61
+
33
62
  # --------------------------------------------------------------------------------------
34
63
  def legacy_model_code(config_dict):
35
64
  if "ModelName" in config_dict:
@@ -163,19 +192,18 @@ class MLExperimentConfig(dict):
163
192
  self["Experiment.BaseName"] += "_" + config_dict["DatasetName"]
164
193
  return self
165
194
  # --------------------------------------------------------------------------------------
166
- def save(self, filename):
195
+ def save_to_json(self, filename=None):
167
196
  if filename is not None:
168
197
  self.filename = filename
169
198
 
170
- sJSON = json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)
199
+ sJSON = json.dumps(self, sort_keys=False, indent=4)
171
200
  with open(self.filename, "w") as oFile:
172
201
  oFile.write(sJSON)
173
202
  oFile.close()
174
203
 
175
204
  return self
176
-
177
205
  # --------------------------------------------------------------------------------------
178
- def save_config(self, fs, filename_only=None):
206
+ def save(self, fs, filename_only=None):
179
207
  if isinstance(fs, FileSystem):
180
208
  fs = fs.configs
181
209
 
@@ -183,8 +211,11 @@ class MLExperimentConfig(dict):
183
211
  filename_only = get_experiment_code(self)
184
212
 
185
213
  sFileName = fs.file(filename_only + ".json")
186
- return self.save(sFileName)
187
-
214
+ return self.save_to_json(sFileName)
215
+ # --------------------------------------------------------------------------------------
216
+ def save_config(self, fs, filename_only):
217
+ # Backwards compatibility 0.6.0
218
+ return self.save()
188
219
  # --------------------------------------------------------------------------------------
189
220
  def load_config(self, fs, filename_only):
190
221
  if isinstance(fs, FileSystem):
@@ -36,6 +36,11 @@ from radnn.system.tee_logger import TeeLogger
36
36
 
37
37
 
38
38
 
39
+
40
+
41
+
42
+
43
+
39
44
  class MLExperimentEnv(dict):
40
45
 
41
46
  # --------------------------------------------------------------------------------------------------------------------
@@ -98,7 +103,7 @@ class MLExperimentEnv(dict):
98
103
  sExperimentFileName = experiment_filename
99
104
  else:
100
105
  sExperimentFileName = oConfigFS.file(f"{experiment_base_name}_{experiment_variation}.json")
101
- oConfig = MLExperimentConfig(sExperimentFileName,p_nExperimentNumber=experiment_variation)
106
+ oConfig = MLExperimentConfig(filename=sExperimentFileName, variation=experiment_variation)
102
107
  oPrintOutput.append(f" |__ {'configuration file':<24}: {sExperimentFileName}")
103
108
 
104
109
  return oConfig, oPrintOutput, dExperimentSpec
@@ -108,6 +113,7 @@ class MLExperimentEnv(dict):
108
113
  def __init__(self, config_fs, model_fs=None, base_name=None, number=None, variation=None, fold_number=None,
109
114
  experiment_filename=None, experiment_code=None, experiment_config=None, model_filestore=None):
110
115
 
116
+ super(MLExperimentEnv, self).__init__()
111
117
  if isinstance(config_fs, FileSystem):
112
118
  oConfigFS = config_fs.configs
113
119
  oModelFS = config_fs.models
@@ -0,0 +1,10 @@
1
+ from radnn import FileStore
2
+
3
+ class MLExperimentStore(FileStore):
4
+ def __init__(self, base_folder, is_verbose=False, must_exist=False):
5
+ super(MLExperimentStore, self).__init__(base_folder, is_verbose, must_exist)
6
+
7
+ self.param_fs = self.subfs("weights")
8
+ self.log_fs = self.subfs("logs")
9
+ self.checkpoint_fs = self.subfs("checkpoints")
10
+ self.state_fs = self.subfs("state")
@@ -0,0 +1,7 @@
1
+ from radnn import mlsys
2
+
3
+ if mlsys.is_tensorflow_installed:
4
+ from .keras_optimization_algorithm import KOptimizationAlgorithm
5
+ from .keras_learning_rate_scheduler import KLearningRateScheduler
6
+
7
+ from .learning_algorithm import LearningAlgorithm
@@ -0,0 +1,31 @@
1
+ import tensorflow.keras as ker
2
+
3
+ class KLearningRateScheduler(ker.callbacks.LearningRateScheduler):
4
+ # -----------------------------------------------------------------------------------
5
+ def __init__(self, config, is_verbose=True):
6
+ self.config = None
7
+ self.lr_schedule = None
8
+ self.is_verbose = is_verbose
9
+
10
+ if isinstance(config, dict):
11
+ self.config = config
12
+ self.lr_schedule = config["Training.LearningRateSchedule"]
13
+ elif isinstance(config, list):
14
+ self.lr_schedule = config
15
+
16
+ super(KLearningRateScheduler, self).__init__(self.check_schedule)
17
+ # -----------------------------------------------------------------------------------
18
+ def check_schedule(self, epoch, lr):
19
+ nNewLR = lr
20
+
21
+ for nIndex, oSchedule in enumerate(self.lr_schedule):
22
+ if epoch == oSchedule[0]:
23
+ nNewLR = oSchedule[1]
24
+ if self.is_verbose:
25
+ print("Schedule #%d: Setting LR to %.5f" % (nIndex + 1, nNewLR))
26
+ break
27
+
28
+ if self.is_verbose:
29
+ print("LR: %.6f" % nNewLR)
30
+ return nNewLR
31
+ # -----------------------------------------------------------------------------------
@@ -0,0 +1,32 @@
1
+ import tensorflow.keras as ker
2
+ from .keras_learning_rate_scheduler import KLearningRateScheduler
3
+
4
+ class KOptimizationAlgorithm(object):
5
+ # -----------------------------------------------------------------------------------
6
+ def __init__(self, config, is_verbose=True):
7
+ self.config = config
8
+ self.optimizer = None
9
+ self.callbacks = []
10
+ self.is_verbose = is_verbose
11
+
12
+ self.optimizer_name = self.config["Training.Optimizer"].upper()
13
+ if self.optimizer_name == "SGD":
14
+ self.optimizer = ker.optimizers.SGD(learning_rate=self.config["Training.LearningRate"],
15
+ momentum=self.config["Training.Momentum"])
16
+ if "Training.LearningRateSchedule" in self.config:
17
+ oLearningRateSchedule = KLearningRateScheduler(self.config)
18
+ self.callbacks.append(oLearningRateSchedule)
19
+ elif self.optimizer_name == "ADAM":
20
+ self.optimizer = ker.optimizers.Adam(learning_rate=self.config["Training.LearningRate"])
21
+ elif self.optimizer_name == "RMSPROP":
22
+ # //TODO: Rho
23
+ if "Training.Momentum" in self.config:
24
+ self.optimizer = ker.optimizers.RMSprop(learning_rate=self.config["Training.LearningRate"],
25
+ momentum=self.config["Training.Momentum"])
26
+ else:
27
+ self.optimizer = ker.optimizers.RMSprop(learning_rate=self.config["Training.LearningRate"])
28
+
29
+ assert self.optimizer is not None, f'Unsupported optimizer {self.config["Training.Optimizer"]}'
30
+ if self.is_verbose:
31
+ print(f"Learning algorithm {self.optimizer_name}")
32
+ # -----------------------------------------------------------------------------------
@@ -0,0 +1,35 @@
1
+ from radnn.core import is_tensorflow_installed
2
+
3
+ if is_tensorflow_installed:
4
+ from .keras_optimization_algorithm import KOptimizationAlgorithm
5
+
6
+ class LearningAlgorithm(object):
7
+ # -----------------------------------------------------------------------------------
8
+ def __init__(self, config, is_verbose=True):
9
+ self.config = config
10
+ self.is_verbose = is_verbose
11
+ self._implementation = None
12
+
13
+ self.prepare()
14
+ # -----------------------------------------------------------------------------------
15
+ @property
16
+ def optimizer(self):
17
+ oResult = None
18
+ if self._implementation is not None:
19
+ if isinstance(self._implementation, KOptimizationAlgorithm):
20
+ oResult = self._implementation.optimizer
21
+ return oResult
22
+ # -----------------------------------------------------------------------------------
23
+ @property
24
+ def callbacks(self):
25
+ oResult = None
26
+ if self._implementation is not None:
27
+ if isinstance(self._implementation, KOptimizationAlgorithm):
28
+ oResult = self._implementation.callbacks
29
+ return oResult
30
+ # -----------------------------------------------------------------------------------
31
+ def prepare(self):
32
+ if is_tensorflow_installed:
33
+ self._implementation = KOptimizationAlgorithm(self.config, self.is_verbose)
34
+ return self
35
+ # -----------------------------------------------------------------------------------
@@ -0,0 +1,4 @@
1
+ from radnn import mlsys
2
+
3
+ if mlsys.is_tensorflow_installed:
4
+ from .keras_best_state_saver import KBestStateSaver
@@ -0,0 +1,17 @@
1
+ import os
2
+ import tensorflow.keras as ker
3
+
4
+ class KBestStateSaver(object):
5
+ # -----------------------------------------------------------------------------------
6
+ def __init__(self, experiment_fs, metric, verbose=2):
7
+ self.experiment_fs = experiment_fs
8
+ self.metric = metric
9
+ self.checkpoint_fs = self.experiment_fs.subfs("checkpoints")
10
+
11
+ sCheckPointPathTemplate = os.path.join(self.checkpoint_fs.base_folder, "{epoch}")
12
+
13
+ self.Callback = ker.callbacks.ModelCheckpoint( filepath=sCheckPointPathTemplate,
14
+ verbose=verbose, save_weights_only=True,
15
+ monitor="val_" + self.metric,
16
+ mode="max", save_best_only=True)
17
+ # -----------------------------------------------------------------------------------