radnn 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- radnn/__init__.py +6 -3
- radnn/core.py +9 -0
- radnn/data/__init__.py +4 -0
- radnn/data/dataset_base.py +327 -0
- radnn/data/image_dataset.py +100 -0
- radnn/data/preprocess/__init__.py +2 -0
- radnn/data/preprocess/normalizer.py +111 -0
- radnn/data/preprocess/standardizer.py +100 -0
- radnn/data/sample_set.py +153 -0
- radnn/data/sequence_dataset.py +136 -0
- radnn/errors.py +2 -0
- radnn/evaluation/evaluate_classification.py +2 -2
- radnn/experiment/ml_experiment.py +389 -0
- radnn/experiment/ml_experiment_config.py +37 -6
- radnn/experiment/ml_experiment_env.py +7 -1
- radnn/experiment/ml_experiment_store.py +10 -0
- radnn/learn/__init__.py +7 -0
- radnn/learn/keras_learning_rate_scheduler.py +31 -0
- radnn/learn/keras_optimization_algorithm.py +32 -0
- radnn/learn/learning_algorithm.py +35 -0
- radnn/learn/state/__init__.py +4 -0
- radnn/learn/state/keras_best_state_saver.py +17 -0
- radnn/ml_system.py +96 -0
- radnn/plots/__init__.py +2 -1
- radnn/plots/plot_auto_multi_image.py +112 -0
- radnn/system/files/jsonfile.py +4 -1
- radnn/system/filesystem.py +2 -6
- radnn/utils.py +89 -0
- {radnn-0.0.6.dist-info → radnn-0.0.7.dist-info}/METADATA +2 -2
- radnn-0.0.7.dist-info/RECORD +53 -0
- radnn-0.0.6.dist-info/RECORD +0 -33
- {radnn-0.0.6.dist-info → radnn-0.0.7.dist-info}/LICENSE.txt +0 -0
- {radnn-0.0.6.dist-info → radnn-0.0.7.dist-info}/WHEEL +0 -0
- {radnn-0.0.6.dist-info → radnn-0.0.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
# ......................................................................................
|
|
2
|
+
# MIT License
|
|
3
|
+
|
|
4
|
+
# Copyright (c) 2023-2025 Pantelis I. Kaplanoglou
|
|
5
|
+
|
|
6
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
8
|
+
# in the Software without restriction, including without limitation the rights
|
|
9
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
11
|
+
# furnished to do so, subject to the following conditions:
|
|
12
|
+
|
|
13
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
14
|
+
# copies or substantial portions of the Software.
|
|
15
|
+
|
|
16
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
22
|
+
# SOFTWARE.
|
|
23
|
+
|
|
24
|
+
# ......................................................................................
|
|
25
|
+
|
|
26
|
+
import numpy as np
|
|
27
|
+
|
|
28
|
+
'''
|
|
29
|
+
Standardization for rank 3 and above tensors using numpy
|
|
30
|
+
'''
|
|
31
|
+
class Standardizer(object):
|
|
32
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
33
|
+
def __init__(self, name=None, filestore=None):
|
|
34
|
+
# ................................................................
|
|
35
|
+
# // Fields \\
|
|
36
|
+
self.mean = None
|
|
37
|
+
self.std = None
|
|
38
|
+
self.name = name
|
|
39
|
+
self.filestore = filestore
|
|
40
|
+
# ................................................................
|
|
41
|
+
self.load()
|
|
42
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
43
|
+
def load(self):
|
|
44
|
+
if (self.name is not None) and (self.filestore is not None):
|
|
45
|
+
dStats = self.filestore.obj.load("%s-meanstd.pkl" % self.name)
|
|
46
|
+
if dStats is not None:
|
|
47
|
+
self.mean = dStats["mean"]
|
|
48
|
+
self.std = dStats["std"]
|
|
49
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
50
|
+
def save(self):
|
|
51
|
+
if (self.name is not None) and (self.filestore is not None):
|
|
52
|
+
dStats = {"mean": self.mean, "std": self.std}
|
|
53
|
+
self.filestore.obj.save(dStats, "%s-meanstd.pkl" % self.name, is_overwriting=True)
|
|
54
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
55
|
+
def fit(self, data, axis_for_stats=-1, is_recalculating=False, is_verbose=False):
|
|
56
|
+
bIsCached = False
|
|
57
|
+
if (self.name is not None) and (self.filestore is not None):
|
|
58
|
+
if self.mean is not None:
|
|
59
|
+
bIsCached = True
|
|
60
|
+
|
|
61
|
+
if (not bIsCached) or is_recalculating:
|
|
62
|
+
# Collect statistics with maximum precision
|
|
63
|
+
data = data.astype(np.float64)
|
|
64
|
+
nAxes = list(range(len(data.shape)))
|
|
65
|
+
if axis_for_stats is None:
|
|
66
|
+
nAxes = tuple(nAxes)
|
|
67
|
+
else:
|
|
68
|
+
if axis_for_stats == -1:
|
|
69
|
+
axis_for_stats = nAxes[-1]
|
|
70
|
+
|
|
71
|
+
nAxes.remove(axis_for_stats)
|
|
72
|
+
if len(nAxes) == 1:
|
|
73
|
+
nAxes = nAxes[0]
|
|
74
|
+
else:
|
|
75
|
+
nAxes = tuple(nAxes)
|
|
76
|
+
|
|
77
|
+
self.mean = np.mean(data, axis=nAxes)
|
|
78
|
+
self.std = np.std(data, axis=nAxes)
|
|
79
|
+
if is_verbose:
|
|
80
|
+
print(" Standardization: mean/std shape:%s" % str(self.mean.shape))
|
|
81
|
+
self.save()
|
|
82
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
83
|
+
def fit_transform(self, data, axis_for_stats=-1, is_recalculating=False, is_verbose=False):
|
|
84
|
+
self.fit(data, axis_for_stats, is_recalculating, is_verbose)
|
|
85
|
+
return self.transform(data)
|
|
86
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
87
|
+
def standardize(self, data):
|
|
88
|
+
return (data - self.mean) / self.std
|
|
89
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
90
|
+
def destandardize(self, data):
|
|
91
|
+
return (data * self.std) + self.mean
|
|
92
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
93
|
+
def transform(self, data):
|
|
94
|
+
nStandardizedData = (data - self.mean) / self.std
|
|
95
|
+
return nStandardizedData.astype(data.dtype)
|
|
96
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
97
|
+
def inverse_transform(self, data):
|
|
98
|
+
nNonStandardizedData = (data * self.std) + self.mean
|
|
99
|
+
return nNonStandardizedData.astype(data.dtype)
|
|
100
|
+
# --------------------------------------------------------------------------------------------------------------------
|
radnn/data/sample_set.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
from .dataset_base import DataSetBase
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SampleSet(object):
|
|
7
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
8
|
+
def __init__(self, subset_type="custom", has_ids=False):
|
|
9
|
+
self.subset_type = subset_type
|
|
10
|
+
self.parent_dataset = None
|
|
11
|
+
|
|
12
|
+
self.has_ids = has_ids
|
|
13
|
+
|
|
14
|
+
self.ids = None
|
|
15
|
+
self.samples = None
|
|
16
|
+
self.sample_count = None
|
|
17
|
+
self.labels = None
|
|
18
|
+
|
|
19
|
+
self._step = 1
|
|
20
|
+
self._iter_start_pos = 0
|
|
21
|
+
self._iter_counter = 0
|
|
22
|
+
|
|
23
|
+
self.feed = None
|
|
24
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
25
|
+
@property
|
|
26
|
+
def is_training_set(self):
|
|
27
|
+
return (self.subset_type == "training") or (self.subset_type == "train") or (self.subset_type == "ts")
|
|
28
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
29
|
+
@property
|
|
30
|
+
def is_validation_set(self):
|
|
31
|
+
return (self.subset_type == "validation") or (self.subset_type == "val") or (self.subset_type == "vs")
|
|
32
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
33
|
+
@property
|
|
34
|
+
def is_unknown_test_set(self):
|
|
35
|
+
return (self.subset_type == "testing") or (self.subset_type == "test") or (self.subset_type == "ut")
|
|
36
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
37
|
+
@property
|
|
38
|
+
def has_labels(self):
|
|
39
|
+
return self.labels is not None
|
|
40
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
41
|
+
@property
|
|
42
|
+
def data_tuple(self):
|
|
43
|
+
if self.has_ids:
|
|
44
|
+
if self.labels is None:
|
|
45
|
+
return (self.ids, self.samples)
|
|
46
|
+
else:
|
|
47
|
+
return (self.ids, self.samples, self.labels)
|
|
48
|
+
else:
|
|
49
|
+
if self.labels is None:
|
|
50
|
+
return self.samples
|
|
51
|
+
else:
|
|
52
|
+
return (self.ids, self.samples, self.labels)
|
|
53
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
54
|
+
def subset_of(self, parent_dataset: DataSetBase):
|
|
55
|
+
self.parent_dataset = parent_dataset
|
|
56
|
+
if self.parent_dataset is not None:
|
|
57
|
+
if self.is_training_set:
|
|
58
|
+
if self.parent_dataset.ts_samples is not None:
|
|
59
|
+
self.parent_dataset.ts = self
|
|
60
|
+
self.ids = self.parent_dataset.ts_sample_ids
|
|
61
|
+
self.samples = self.parent_dataset.ts_samples
|
|
62
|
+
self.sample_count = self.parent_dataset.ts_sample_count
|
|
63
|
+
self.labels = self.parent_dataset.ts_labels
|
|
64
|
+
elif self.is_validation_set:
|
|
65
|
+
if self.parent_dataset.vs_samples is not None:
|
|
66
|
+
self.parent_dataset.vs = self
|
|
67
|
+
self.ids = self.parent_dataset.vs_sample_ids
|
|
68
|
+
self.samples = self.parent_dataset.vs_samples
|
|
69
|
+
self.sample_count = self.parent_dataset.vs_sample_count
|
|
70
|
+
self.labels = self.parent_dataset.vs_labels
|
|
71
|
+
elif self.is_unknown_test_set:
|
|
72
|
+
if self.parent_dataset.ut_samples is not None:
|
|
73
|
+
self.parent_dataset.ut = self
|
|
74
|
+
self.ids = self.parent_dataset.ut_sample_ids
|
|
75
|
+
self.samples = self.parent_dataset.ut_samples
|
|
76
|
+
self.sample_count = self.parent_dataset.ut_sample_count
|
|
77
|
+
self.labels = self.parent_dataset.ut_labels
|
|
78
|
+
|
|
79
|
+
self.has_ids = self.ids is not None
|
|
80
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
81
|
+
'''
|
|
82
|
+
def create_feed(self, has_ids=False):
|
|
83
|
+
self.has_ids = has_ids
|
|
84
|
+
if is_tensorflow_installed:
|
|
85
|
+
import tensorflow as tf
|
|
86
|
+
|
|
87
|
+
if has_ids:
|
|
88
|
+
self.feed = tf.data.Dataset.from_tensor_slices((self.ids, self.samples, self.labels))
|
|
89
|
+
else:
|
|
90
|
+
self.feed = tf.data.Dataset.from_tensor_slices((self.samples, self.labels))
|
|
91
|
+
|
|
92
|
+
self.feed = self.feed.map(preprocess_tf, num_parallel_calls=8)
|
|
93
|
+
|
|
94
|
+
if (self.subset_type == "training") or (self.subset_type == "train") or (self.subset_type == "ts"):
|
|
95
|
+
# -----------------------------------------------------------------------------------
|
|
96
|
+
def preprocess_tf(self, sample_pack):
|
|
97
|
+
|
|
98
|
+
import tensorflow as tf
|
|
99
|
+
|
|
100
|
+
if self.has_ids:
|
|
101
|
+
nId, nSample, nLabel = sample_pack
|
|
102
|
+
else:
|
|
103
|
+
nSample, nLabel = sample_pack
|
|
104
|
+
|
|
105
|
+
tImage = tf.cast(p_tImageInVS, tf.float32) # //[BF] overflow of standardization
|
|
106
|
+
tNormalizedImage = self.normalizeImage(tImage)
|
|
107
|
+
|
|
108
|
+
tTargetOneHot = tf.one_hot(p_tLabelInVS, self.ClassCount)
|
|
109
|
+
|
|
110
|
+
return tNormalizedImage, tTargetOneHot
|
|
111
|
+
'''
|
|
112
|
+
|
|
113
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
114
|
+
def __iter__(self):
|
|
115
|
+
self._iter_counter = 0
|
|
116
|
+
if self.ids is not None:
|
|
117
|
+
if self.labels is not None:
|
|
118
|
+
yield from self._generator_for_supervised_with_ids()
|
|
119
|
+
else:
|
|
120
|
+
yield from self._generator_for_unsupervised_with_ids()
|
|
121
|
+
else:
|
|
122
|
+
if self.labels is not None:
|
|
123
|
+
yield from self._generator_for_supervised()
|
|
124
|
+
else:
|
|
125
|
+
yield from self._generator_for_unsupervised()
|
|
126
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
127
|
+
def _generator_for_supervised(self):
|
|
128
|
+
nIndex = self._iter_start_pos
|
|
129
|
+
while self._iter_counter < self.sample_count:
|
|
130
|
+
yield (self.samples[nIndex, ...], self.labels[nIndex, ...])
|
|
131
|
+
nIndex += self._step
|
|
132
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
133
|
+
def _generator_for_unsupervised(self):
|
|
134
|
+
nIndex = self._iter_start_pos
|
|
135
|
+
while self._iter_counter < self.sample_count:
|
|
136
|
+
yield self.samples[nIndex, ...]
|
|
137
|
+
nIndex += self._step
|
|
138
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
139
|
+
def _generator_for_supervised_with_ids(self):
|
|
140
|
+
nIndex = self._iter_start_pos
|
|
141
|
+
while self._iter_counter < self.sample_count:
|
|
142
|
+
yield (self.ids[nIndex], self.samples[nIndex, ...], self.labels[nIndex, ...])
|
|
143
|
+
nIndex += self._step
|
|
144
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
145
|
+
def _generator_for_unsupervised_with_ids(self):
|
|
146
|
+
nIndex = self._iter_start_pos
|
|
147
|
+
while self._iter_counter < self.sample_count:
|
|
148
|
+
yield (self.ids[nIndex], self.samples[nIndex, ...])
|
|
149
|
+
nIndex += self._step
|
|
150
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
# ......................................................................................
|
|
2
|
+
# MIT License
|
|
3
|
+
|
|
4
|
+
# Copyright (c) 2022-2025 Pantelis I. Kaplanoglou
|
|
5
|
+
|
|
6
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
8
|
+
# in the Software without restriction, including without limitation the rights
|
|
9
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
11
|
+
# furnished to do so, subject to the following conditions:
|
|
12
|
+
|
|
13
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
14
|
+
# copies or substantial portions of the Software.
|
|
15
|
+
|
|
16
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
22
|
+
# SOFTWARE.
|
|
23
|
+
|
|
24
|
+
# ......................................................................................
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
from .dataset_base import DataSetBase
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ----------------------------------------------------------------------------------------------------------------------
|
|
32
|
+
def generate_sequence_clips(samples, labels, window_size, stride, is_padding_zeros=False):
|
|
33
|
+
nSequenceIndex = 0
|
|
34
|
+
while nSequenceIndex < samples.shape[0]:
|
|
35
|
+
nLabel = labels[nSequenceIndex]
|
|
36
|
+
nPosition = 0
|
|
37
|
+
nSpanPoints = window_size
|
|
38
|
+
if is_padding_zeros:
|
|
39
|
+
nSpanPoints = window_size - 3 * stride
|
|
40
|
+
|
|
41
|
+
nDataPointCount = samples.shape[1]
|
|
42
|
+
while (nPosition + nSpanPoints) <= nDataPointCount:
|
|
43
|
+
if is_padding_zeros and ((nPosition + window_size) >= nDataPointCount):
|
|
44
|
+
nSeqSample = np.zeros((window_size, samples.shape[2]), np.float32)
|
|
45
|
+
nSeqSample[nPosition + window_size - nDataPointCount:, :] = samples[nSequenceIndex, nPosition:, :]
|
|
46
|
+
else:
|
|
47
|
+
nSeqSample = samples[nSequenceIndex, nPosition:nPosition + window_size, :]
|
|
48
|
+
|
|
49
|
+
yield (nSeqSample, nLabel)
|
|
50
|
+
|
|
51
|
+
nPosition += stride
|
|
52
|
+
nSequenceIndex += 1
|
|
53
|
+
# ----------------------------------------------------------------------------------------------------------------------
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class SequenceDataset(DataSetBase):
|
|
60
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
61
|
+
def __init__(self, name, fs, clip_window_size=None, clip_stride=None, is_padding_zeros=False, random_seed=None, is_classification=True):
|
|
62
|
+
super(SequenceDataset, self).__init__(name, fs, random_seed, is_classification)
|
|
63
|
+
self.clip_window_size = clip_window_size
|
|
64
|
+
self.clip_stride = clip_stride
|
|
65
|
+
self.is_padding_zeros = is_padding_zeros
|
|
66
|
+
self.card["clips.window_size"] = self.clip_window_size
|
|
67
|
+
self.card["clips.stride"] = self.clip_stride
|
|
68
|
+
self.card["clips.is_padding_zeros"] = self.is_padding_zeros
|
|
69
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
70
|
+
@property
|
|
71
|
+
def ts_sequence_clips(self):
|
|
72
|
+
return generate_sequence_clips(self.ts_samples, self.ts_labels, self.clip_window_size, self.clip_stride,
|
|
73
|
+
self.is_padding_zeros)
|
|
74
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
75
|
+
@property
|
|
76
|
+
def vs_sequence_clips(self):
|
|
77
|
+
if self.vs_samples is not None:
|
|
78
|
+
return generate_sequence_clips(self.vs_samples, self.vs_labels, self.clip_window_size, self.clip_stride,
|
|
79
|
+
self.is_padding_zeros)
|
|
80
|
+
else:
|
|
81
|
+
return None
|
|
82
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
83
|
+
@property
|
|
84
|
+
def ut_sequence_clips(self):
|
|
85
|
+
if self.ut_samples is not None:
|
|
86
|
+
return generate_sequence_clips(self.ut_samples, self.ut_labels, self.clip_window_size, self.clip_stride,
|
|
87
|
+
self.is_padding_zeros)
|
|
88
|
+
else:
|
|
89
|
+
return None
|
|
90
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
91
|
+
def convert_samples_to_clips(self, clip_window_size=None, clip_stride=None, is_padding_zeros=False):
|
|
92
|
+
if clip_window_size is not None:
|
|
93
|
+
self.clip_window_size = clip_window_size
|
|
94
|
+
if clip_stride is not None:
|
|
95
|
+
self.clip_stride = clip_stride
|
|
96
|
+
if is_padding_zeros and (not self.is_padding_zeros):
|
|
97
|
+
self.is_padding_zeros = is_padding_zeros
|
|
98
|
+
|
|
99
|
+
self.card["clips.window_size"] = self.clip_window_size
|
|
100
|
+
self.card["clips.stride"] = self.clip_stride
|
|
101
|
+
self.card["clips.is_padding_zeros"] = self.is_padding_zeros
|
|
102
|
+
|
|
103
|
+
# Create training set clips
|
|
104
|
+
nClips = []
|
|
105
|
+
nClipLabels = []
|
|
106
|
+
for (nClip, nClipLabel) in self.ts_sequence_clips:
|
|
107
|
+
nClips.append(nClip)
|
|
108
|
+
nClipLabels.append(nClipLabel)
|
|
109
|
+
nClips = np.asarray(nClips)
|
|
110
|
+
nClipLabels = np.asarray(nClipLabels)
|
|
111
|
+
self.assign_training_set(nClips, nClipLabels)
|
|
112
|
+
|
|
113
|
+
# Create validation set clips
|
|
114
|
+
if self.vs_samples is not None:
|
|
115
|
+
nClips = []
|
|
116
|
+
nClipLabels = []
|
|
117
|
+
for (nClip, nClipLabel) in self.vs_sequence_clips:
|
|
118
|
+
nClips.append(nClip)
|
|
119
|
+
nClipLabels.append(nClipLabel)
|
|
120
|
+
nClips = np.asarray(nClips)
|
|
121
|
+
nClipLabels = np.asarray(nClipLabels)
|
|
122
|
+
self.assign_validation_set(nClips, nClipLabels)
|
|
123
|
+
|
|
124
|
+
# Create unknown test set clips
|
|
125
|
+
if self.ut_samples is not None:
|
|
126
|
+
nClips = []
|
|
127
|
+
nClipLabels = []
|
|
128
|
+
for (nClip, nClipLabel) in self.ut_sequence_clips:
|
|
129
|
+
nClips.append(nClip)
|
|
130
|
+
nClipLabels.append(nClipLabel)
|
|
131
|
+
nClips = np.asarray(nClips)
|
|
132
|
+
nClipLabels = np.asarray(nClipLabels)
|
|
133
|
+
self.assign_unknown_test_set(nClips, nClipLabels)
|
|
134
|
+
|
|
135
|
+
return self
|
|
136
|
+
# --------------------------------------------------------------------------------------
|
radnn/errors.py
ADDED
|
@@ -94,10 +94,10 @@ class EvaluateClassification(object):
|
|
|
94
94
|
print("-" * nRepeat)
|
|
95
95
|
# --------------------------------------------------------------------------------------------------------------
|
|
96
96
|
def print_overall(self):
|
|
97
|
-
print(f"Weighted Average Recall % :{self.average_recall*100.0:.3f}")
|
|
98
|
-
print(f"Weighted Average Precision %:{self.average_precision*100.0:.3f}")
|
|
99
97
|
print(f"Accuracy % :{self.accuracy*100.0 :.3f}")
|
|
100
98
|
print(f"Average F1 Score % :{self.average_f1score*100.0:.3f}")
|
|
99
|
+
print(f"Weighted Average Recall % :{self.average_recall*100.0:.3f}")
|
|
100
|
+
print(f"Weighted Average Precision %:{self.average_precision*100.0:.3f}")
|
|
101
101
|
if (self.class_count == 2) and (self.auc is not None):
|
|
102
102
|
print(f"Area Under the Curve (AUC):{self.auc:.4f}")
|
|
103
103
|
print()
|