radnn 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
radnn/data/sample_set.py CHANGED
@@ -29,6 +29,7 @@ from sklearn.utils.class_weight import compute_class_weight
29
29
  from .sample_set_kind import SampleSetKindInfo
30
30
 
31
31
  from radnn.core import RequiredLibs
32
+
32
33
  oReqs = RequiredLibs()
33
34
  if oReqs.is_torch_installed:
34
35
  # [TODO] Additional coupling to torch (+)->MANOLO
@@ -36,9 +37,10 @@ if oReqs.is_torch_installed:
36
37
  import torch
37
38
  from torch.utils.data import DataLoader
38
39
 
40
+
39
41
  class SampleSet(object):
40
42
  LOADER_NUM_WORKERS = 8
41
-
43
+
42
44
  # --------------------------------------------------------------------------------------------------------------------
43
45
  def __init__(self, parent_dataset, subset_kind: str, **kwargs):
44
46
  '''
@@ -60,43 +62,43 @@ class SampleSet(object):
60
62
  self.info.is_classification = kwargs.get("is_classification", False)
61
63
  self.batch_size = kwargs.get("batch_size", None)
62
64
  self.transform_augment = kwargs.get("sample_transform_augment", None)
63
- self._sample_count = 0
64
-
65
- self.ids : list | np.ndarray | None = kwargs.get('ids', None)
66
- self.samples: list | np.ndarray | None = kwargs.get("samples", None)
65
+ self._sample_count = 0
66
+
67
+ self.ids: list | np.ndarray | None = kwargs.get('ids', None)
68
+ self.samples: list | np.ndarray | None = kwargs.get("samples", None)
67
69
  if (self.samples is not None):
68
70
  if isinstance(self.samples, list):
69
71
  self.samples = np.array(self.samples, np.float32)
70
72
  self._sample_count = self.samples.shape[0]
71
- self.labels: list | np.ndarray | None = kwargs.get("labels", None)
72
-
73
- self.files = None
74
- self._sample_directory: pd.DataFrame | None = kwargs.get("sample_file_records", None)
73
+ self.labels: list | np.ndarray | None = kwargs.get("labels", None)
74
+
75
+ self.files = None
76
+ self._sample_directory: pd.DataFrame | None = kwargs.get("sample_file_records", None)
75
77
  if self._sample_directory is not None:
76
78
  self.files = self._sample_directory.iloc[:, 0].to_list()
77
79
  self._sample_count = len(self.files)
78
- self.labels = self._sample_directory.iloc[:, 1].to_list()
79
-
80
- assert self._sample_count > 0, ERR_SUBSET_MUST_HAVE_SAMPLES
80
+ self.labels = self._sample_directory.iloc[:, 1].to_list()
81
+
82
+ assert self._sample_count > 0, ERR_DS_SUBSET_MUST_HAVE_SAMPLES
81
83
  self._step = 1
82
84
  self._iter_start_pos = 0
83
85
  self._iter_counter = 0
84
86
  self._are_samples_in_memory = self.samples is not None
85
-
87
+
86
88
  self.has_sample_ids = self.ids is not None
87
-
89
+
88
90
  self.minibatch_count = self._sample_count
89
91
  if self.batch_size is not None:
90
92
  bMustSuffle = kwargs.get("must_shuffle", self.info.must_shuffle)
91
93
  if SampleSet.LOADER_NUM_WORKERS > 0:
92
94
  self.loader = DataLoader(self, batch_size=self.batch_size, shuffle=bMustSuffle,
93
- num_workers=SampleSet.LOADER_NUM_WORKERS,
94
- pin_memory=True,persistent_workers=True, prefetch_factor=SampleSet.LOADER_NUM_WORKERS
95
+ num_workers=SampleSet.LOADER_NUM_WORKERS,
96
+ pin_memory=True, persistent_workers=True, prefetch_factor=SampleSet.LOADER_NUM_WORKERS
95
97
  )
96
98
  else:
97
99
  self.loader = DataLoader(self, batch_size=self.batch_size, shuffle=bMustSuffle,
98
- num_workers=SampleSet.LOADER_NUM_WORKERS,
99
- pin_memory=True
100
+ num_workers=SampleSet.LOADER_NUM_WORKERS,
101
+ pin_memory=True
100
102
  )
101
103
 
102
104
  self.minibatch_count = len(self.loader)
@@ -104,19 +106,23 @@ class SampleSet(object):
104
106
  self.loader = DataLoader(self, shuffle=self.info.must_shuffle, num_workers=SampleSet.LOADER_NUM_WORKERS,
105
107
  pin_memory=True, persistent_workers=True
106
108
  )
107
-
109
+
108
110
  if self.info.is_classification:
109
111
  self.info.class_indices = np.sort(np.unique(self.labels))
110
- self.info.class_weights = compute_class_weight(class_weight='balanced', classes=self.info.class_indices, y=np.array(self.labels))
112
+ self.info.class_weights = compute_class_weight(class_weight='balanced', classes=self.info.class_indices,
113
+ y=np.array(self.labels))
111
114
  self.info.class_count = len(self.info.class_indices)
115
+
112
116
  # --------------------------------------------------------------------------------------------------------------------
113
117
  @property
114
118
  def sample_count(self):
115
119
  return self._sample_count
120
+
116
121
  # --------------------------------------------------------------------------------------------------------------------
117
122
  @property
118
123
  def has_labels(self):
119
124
  return self.labels is not None
125
+
120
126
  # --------------------------------------------------------------------------------------------------------------------
121
127
  @property
122
128
  def data_tuple(self):
@@ -137,9 +143,11 @@ class SampleSet(object):
137
143
  return (self.ids, self.samples, self.labels)
138
144
  else:
139
145
  return None
146
+
140
147
  # --------------------------------------------------------------------------------------------------------------------
141
148
  def __len__(self):
142
149
  return self._sample_count
150
+
143
151
  # --------------------------------------------------------------------------------------------------------------------
144
152
  def __getitem__(self, index):
145
153
  nID = None
@@ -147,11 +155,10 @@ class SampleSet(object):
147
155
  tSample = self.do_load_sample(index)
148
156
  else:
149
157
  tSample, nID = self.do_load_sample_from_file(index)
150
-
151
-
158
+
152
159
  if self.transform_augment is not None:
153
160
  tSample = self.transform_augment(tSample)
154
-
161
+
155
162
  if self.ids is not None:
156
163
  nID = self.ids[index]
157
164
 
@@ -160,15 +167,16 @@ class SampleSet(object):
160
167
  if self.labels is not None:
161
168
  # Supervised
162
169
  oResult.append(self.do_load_target(index))
163
-
170
+
164
171
  if nID is not None:
165
172
  oResult.append(nID)
166
173
 
167
174
  return tuple(oResult)
175
+
168
176
  # --------------------------------------------------------------------------------------------------------------------
169
177
  def __iter__(self):
170
178
  self._iter_counter = 0
171
-
179
+
172
180
  if self._are_samples_in_memory:
173
181
  if self.ids is not None:
174
182
  if self.labels is not None:
@@ -185,6 +193,7 @@ class SampleSet(object):
185
193
  yield from self._file_generator_for_supervised()
186
194
  else:
187
195
  yield from self._file_generator_for_unsupervised()
196
+
188
197
  # --------------------------------------------------------------------------------------------------------------------
189
198
  def do_load_sample_from_file(self, index):
190
199
  '''
@@ -192,6 +201,7 @@ class SampleSet(object):
192
201
  :param index: The index of the sample in the sample file list
193
202
  '''
194
203
  pass
204
+
195
205
  # --------------------------------------------------------------------------------------------------------------------
196
206
  def do_load_sample(self, index):
197
207
  '''
@@ -200,6 +210,7 @@ class SampleSet(object):
200
210
  :return:
201
211
  '''
202
212
  return torch.tensor(self.samples[index, ...], dtype=torch.float32)
213
+
203
214
  # --------------------------------------------------------------------------------------------------------------------
204
215
  def do_load_target(self, index):
205
216
  '''
@@ -207,6 +218,7 @@ class SampleSet(object):
207
218
  :param index: The index of the sample in the sample subset
208
219
  '''
209
220
  return torch.tensor(self.labels[index, ...], dtype=torch.long)
221
+
210
222
  # --------------------------------------------------------------------------------------------------------------------
211
223
  def _file_generator_for_supervised(self):
212
224
  nIndex = self._iter_start_pos
@@ -216,6 +228,7 @@ class SampleSet(object):
216
228
  tSample = self.transform_augment(tSample)
217
229
  yield (tSample, self.do_load_target(nIndex))
218
230
  nIndex += self._step
231
+
219
232
  # --------------------------------------------------------------------------------------------------------------------
220
233
  def _file_generator_for_unsupervised(self):
221
234
  nIndex = self._iter_start_pos
@@ -225,6 +238,7 @@ class SampleSet(object):
225
238
  tSample = self.transform_augment(tSample)
226
239
  yield (tSample)
227
240
  nIndex += self._step
241
+
228
242
  # --------------------------------------------------------------------------------------------------------------------
229
243
  def _numpy_generator_for_supervised(self):
230
244
  nIndex = self._iter_start_pos
@@ -234,6 +248,7 @@ class SampleSet(object):
234
248
  tSample = self.transform_augment(tSample)
235
249
  yield (tSample, self.do_load_target(nIndex))
236
250
  nIndex += self._step
251
+
237
252
  # --------------------------------------------------------------------------------------------------------------------
238
253
  def _numpy_generator_for_supervised_with_ids(self):
239
254
  nIndex = self._iter_start_pos
@@ -243,6 +258,7 @@ class SampleSet(object):
243
258
  tSample = self.transform_augment(tSample)
244
259
  yield (self.ids[nIndex], tSample, self.do_load_target(nIndex))
245
260
  nIndex += self._step
261
+
246
262
  # --------------------------------------------------------------------------------------------------------------------
247
263
  def _numpy_generator_for_unsupervised(self):
248
264
  nIndex = self._iter_start_pos
@@ -252,6 +268,7 @@ class SampleSet(object):
252
268
  tSample = self.transform_augment(tSample)
253
269
  yield tSample
254
270
  nIndex += self._step
271
+
255
272
  # --------------------------------------------------------------------------------------------------------------------
256
273
  def _numpy_generator_for_unsupervised_with_ids(self):
257
274
  nIndex = self._iter_start_pos
@@ -261,6 +278,7 @@ class SampleSet(object):
261
278
  tSample = self.transform_augment(tSample)
262
279
  yield (self.ids[nIndex], tSample)
263
280
  nIndex += self._step
281
+
264
282
  # --------------------------------------------------------------------------------------------------------------------
265
283
  def print_info(self):
266
284
  sDescription = self.info.kind_description
@@ -269,18 +287,19 @@ class SampleSet(object):
269
287
  if self.minibatch_count is not None:
270
288
  sMinibatches = f" minbatches: {self.minibatch_count}"
271
289
  if (self.samples is not None) and isinstance(self.samples, np.ndarray):
272
- print(f" |__ {sDescription} set samples: {self.sample_count} shape: {self.samples.shape}{sMinibatches}")
273
- print(f" |__ {sDescription} set labels: {self.sample_count} shape: {self.labels.shape}")
290
+ print(f" |__ {sDescription} samples: {self.sample_count} shape: {self.samples.shape}{sMinibatches}")
274
291
  else:
275
- print(f" |__ {sDescription} set samples: {self.sample_count} {sMinibatches}")
276
-
292
+ print(f" |__ {sDescription} samples: {self.sample_count} {sMinibatches}")
293
+
277
294
  if (self.labels is not None) and isinstance(self.labels, np.ndarray):
278
- print(f" |__ Labels: {self.sample_count} shape:{self.labels.shape}")
295
+ print(f" |__ {sDescription} labels: {self.sample_count} shape:{self.labels.shape}")
279
296
  else:
280
- print(f" |__ Labels: {self.sample_count}")
297
+ print(f" |__ {sDescription} labels: {self.sample_count}")
298
+
281
299
  # --------------------------------------------------------------------------------------------------------------------
282
300
  def __str__(self):
283
301
  return f"{self.info.kind_description} samples:{self.sample_count} minibatches:{self.minibatch_count}"
302
+
284
303
  # --------------------------------------------------------------------------------------------------------------------
285
304
  def __repr__(self):
286
305
  return self.__str__()
@@ -31,21 +31,24 @@ class SampleSetKind(Enum):
31
31
  TRAINING_SET = 0
32
32
  VALIDATION_SET = 1
33
33
  UNKNOWN_TEST_SET = 2
34
+
35
+
36
+ # ======================================================================================================================
34
37
 
35
38
 
36
39
  # ======================================================================================================================
37
40
  class SampleSetKindInfo(dict):
38
41
  # --------------------------------------------------------------------------------------------------------------------
39
- def __init__(self, kind_str: str|None=None, kind: int | None=None, **kwargs):
42
+ def __init__(self, kind_str: str | None = None, kind: int | None = None, **kwargs):
40
43
  super().__init__(**kwargs)
41
44
  self._kind: int | None = kind
42
45
  self._kind_str: str | None = None
43
46
  if kind_str is not None:
44
47
  self._kind_str = kind_str.lower()
45
-
48
+
46
49
  # Invoke the property getters
47
- self["kind"] = self.kind
48
- self["kind_str"] = self.kind_description
50
+ self["kind"] = self.kind
51
+ self["kind_str"] = self.kind_description
49
52
  # --------------------------------------------------------------------------------------------------------------------
50
53
  @property
51
54
  def kind_description(self):
@@ -58,11 +61,12 @@ class SampleSetKindInfo(dict):
58
61
  elif nKind == SampleSetKind.value:
59
62
  self._kind_str = "training"
60
63
  return self._kind_str
64
+
61
65
  # --------------------------------------------------------------------------------------------------------------------
62
66
  @property
63
67
  def kind(self):
64
68
  if (self._kind is None):
65
- nKind = -1 # Unknown
69
+ nKind = -1 # Unknown
66
70
  if self.is_training_set:
67
71
  nKind = SampleSetKind.TRAINING_SET.value
68
72
  elif self.is_validation_set:
@@ -71,26 +75,32 @@ class SampleSetKindInfo(dict):
71
75
  nKind = SampleSetKind.UNKNOWN_TEST_SET.value
72
76
  self._kind = nKind
73
77
  return self._kind
78
+
74
79
  # --------------------------------------------------------------------------------------------------------------------
75
80
  @property
76
81
  def must_shuffle(self):
77
82
  return self.kind == SampleSetKind.TRAINING_SET.value
83
+
78
84
  # --------------------------------------------------------------------------------------------------------------------
79
85
  @property
80
86
  def is_training_set(self):
81
87
  return (self._kind_str == "training") or (self._kind_str == "train") or (self._kind_str == "ts")
88
+
82
89
  # --------------------------------------------------------------------------------------------------------------------
83
90
  @property
84
91
  def is_validation_set(self):
85
92
  return (self._kind_str == "validation") or (self._kind_str == "val") or (self._kind_str == "vs")
93
+
86
94
  # --------------------------------------------------------------------------------------------------------------------
87
95
  @property
88
96
  def is_unknown_test_set(self):
89
97
  return (self._kind_str == "testing") or (self._kind_str == "test") or (self._kind_str == "ut")
98
+
90
99
  # --------------------------------------------------------------------------------------------------------------------
91
100
  @property
92
101
  def is_classification(self):
93
102
  return self["Task"] == "classification"
103
+
94
104
  # --------------------------------------------------------------------------------------------------------------------
95
105
  @is_classification.setter
96
106
  def is_classification(self, value):
@@ -99,26 +109,32 @@ class SampleSetKindInfo(dict):
99
109
  self["Classes.Count"] = None
100
110
  self["Classes.Indices"] = None
101
111
  self["Classes.Weights"] = None
112
+
102
113
  # --------------------------------------------------------------------------------------------------------------------
103
114
  @property
104
115
  def class_count(self):
105
116
  return self["Classes.Count"]
117
+
106
118
  # --------------------------------------------------------------------------------------------------------------------
107
119
  @class_count.setter
108
120
  def class_count(self, value):
109
121
  self["Classes.Count"] = value
122
+
110
123
  # --------------------------------------------------------------------------------------------------------------------
111
124
  @property
112
125
  def class_indices(self):
113
126
  return self["Classes.Indices"]
127
+
114
128
  # --------------------------------------------------------------------------------------------------------------------
115
129
  @class_indices.setter
116
130
  def class_indices(self, value):
117
131
  self["Classes.Indices"] = value
132
+
118
133
  # --------------------------------------------------------------------------------------------------------------------
119
134
  @property
120
135
  def class_weights(self):
121
136
  return self["Classes.Weights"]
137
+
122
138
  # --------------------------------------------------------------------------------------------------------------------
123
139
  @class_weights.setter
124
140
  def class_weights(self, value):
@@ -1,13 +1,29 @@
1
+ import numpy as np
2
+ from radnn.data.sample_set_kind import SampleSetKind
3
+
1
4
  class SampleSet(object):
2
5
  # --------------------------------------------------------------------------------------------------------------------
3
- def __init__(self, parent_dataset, samples=None, labels=None):
6
+ def __init__(self, parent_dataset, samples=None, labels=None, ids=None, kind: SampleSetKind | str = None):
4
7
  self.parent_dataset = parent_dataset
5
8
  self.samples = samples
6
9
  self.labels = labels
10
+ self.ids = ids
11
+ if self.ids is None:
12
+ self.ids = np.arange(len(self.samples)) + 1
13
+ self.kind: SampleSetKind = kind
14
+
7
15
  self.loader = None
8
16
  self._sample_count = None
9
17
  self._minibatch_count = None
10
18
  # --------------------------------------------------------------------------------------------------------------------
19
+ def assign(self, samples, labels=None, ids=None):
20
+ self.samples = samples
21
+ self.labels = labels
22
+ self.ids = ids
23
+ if self.ids is None:
24
+ self.ids = np.arange(len(self.samples)) + 1
25
+ return self
26
+ # --------------------------------------------------------------------------------------------------------------------
11
27
  @property
12
28
  def sample_count(self):
13
29
  if self._sample_count is None:
@@ -26,7 +42,49 @@ class SampleSet(object):
26
42
  def __getitem__(self, index):
27
43
  return (self.samples[index], self.labels[index])
28
44
  # --------------------------------------------------------------------------------------------------------------------
45
+ def __str__(self):
46
+ sDescr = f"{self.kind:<17} samples: {self.sample_count:7d} mini-batches: {self.minibatch_count}"
47
+ return sDescr
48
+ # --------------------------------------------------------------------------------------------------------------------
49
+ def __repr__(self):
50
+ return self.__str__()
29
51
 
52
+ # --------------------------------------------------------------------------------------------------------------------
53
+ def _iter_unsupervised(self):
54
+ for nIndex, nSample in enumerate(self.samples):
55
+ nLabel = self.labels[nIndex]
56
+ nId = self.ids[nIndex]
57
+ yield nSample, int(nId)
58
+ # --------------------------------------------------------------------------------------------------------------------
59
+ def _iter_supervised(self):
60
+ for nIndex, nSample in enumerate(self.samples):
61
+ nLabel = self.labels[nIndex]
62
+ nId = self.ids[nIndex]
63
+ yield nSample, nLabel, int(nId)
64
+ # --------------------------------------------------------------------------------------------------------------------
65
+ def __iter__(self):
66
+ if self.labels is None:
67
+ return self._iter_unsupervised()
68
+ else:
69
+ return self._iter_supervised()
70
+ # --------------------------------------------------------------------------------------------------------------------
71
+ def print_info(self):
72
+ sDescription = self.kind.name.lower().replace("_", " ")
73
+ sDescription = sDescription[0].upper() + sDescription[1:]
74
+ sMinibatches = ""
75
+ if self.minibatch_count is not None:
76
+ sMinibatches = f" minbatches: {self.minibatch_count}"
77
+ if (self.samples is not None) and isinstance(self.samples, np.ndarray):
78
+ print(f" |__ {sDescription} samples: {self.sample_count} shape: {self.samples.shape}{sMinibatches}")
79
+ else:
80
+ print(f" |__ {sDescription} samples: {self.sample_count} {sMinibatches}")
81
+
82
+ if (self.labels is not None) and isinstance(self.labels, np.ndarray):
83
+ print(f" |__ {sDescription} labels: {self.sample_count} shape:{self.labels.shape}")
84
+ else:
85
+ print(f" |__ {sDescription} labels: {self.sample_count}")
86
+ # --------------------------------------------------------------------------------------------------------------------
87
+ # ======================================================================================================================
30
88
 
31
89
 
32
90
 
@@ -68,10 +68,10 @@ class SequenceDataSet(DataSetBase):
68
68
  self.clip_stride: int|None = None
69
69
  self.is_padding_zeros: bool|None = None
70
70
  # --------------------------------------------------------------------------------------------------------------------
71
- def do_read_hyperparams(self):
72
- self.clip_window_size = self.hparams.get("Data.Clip.WindowSize", None)
73
- self.clip_stride = self.hparams.get("Data.Clip.Stride", None)
74
- self.is_padding_zeros = self.hparams.get("Data.Clip.IsPaddingZeroes", None)
71
+ def read_hyperparams(self):
72
+ self.clip_window_size = self.hprm.get("Data.Clip.WindowSize", None)
73
+ self.clip_stride = self.hprm.get("Data.Clip.Stride", None)
74
+ self.is_padding_zeros = self.hprm.get("Data.Clip.IsPaddingZeroes", None)
75
75
  # --------------------------------------------------------------------------------------------------------------------
76
76
  def generate_clips(self, subset: SampleSet):
77
77
  return generate_sequence_clips(self, subset.samples, subset.labels,
@@ -0,0 +1,24 @@
1
+ from enum import Enum
2
+
3
+ class MLTask(Enum):
4
+ BINARY_CLASSIFICATION = 0
5
+ MULTICLASS_CLASSIFICATION = 1
6
+ REGRESSION = 2
7
+ MULTIVARIATE_REGRESSION = 3
8
+ CLUSTERING = 4
9
+ INPUT_RECONSTRUCTION = 5
10
+ RANKING = 6
11
+
12
+
13
+ def get_task_from_hyperparams(hyperparams: dict) -> MLTask:
14
+ sTask = None
15
+ if "Experiment.Task" in hyperparams:
16
+ sTask = hyperparams["Experiment.Task"]
17
+
18
+ oTask = None
19
+ if sTask is not None:
20
+ sTask = sTask.upper().replace(" ", "_")
21
+ if sTask in MLTask.__members__:
22
+ oTask = MLTask[sTask]
23
+
24
+ return oTask
@@ -162,7 +162,7 @@ class MLModelFreezer(object):
162
162
  def convnext_frozen(self):
163
163
  """
164
164
  torchvision.convnext_tiny has:
165
- model.features: Sequential of 8 modules:
165
+ model.features: Sequential of 8 nnmodules:
166
166
  0 stem
167
167
  1 stage1
168
168
  2 downsample1
radnn/plots/__init__.py CHANGED
@@ -5,5 +5,6 @@ from .plot_voronoi_2d import PlotVoronoi2D
5
5
  from .plot_multi_scatter import MultiScatterPlot
6
6
  from .plot_auto_multi_image import AutoMultiImagePlot
7
7
  from .plot_function import PlotFunction
8
- from .plot_histogram_of_classes import PlotHistogramOfClasses, CPlot
9
- from .plot_visualize_dataset2d import PlotVisualizeDataset2D
8
+ from .plot_histogram_of_classes import PlotHistogramOfClasses
9
+ from .plot_visualize_dataset2d import PlotVisualizeDataset2D
10
+ from .plot_legacy import CPlot, PlotDataset
@@ -28,101 +28,23 @@ import numpy as np
28
28
  import matplotlib.pyplot as plt # use the subpackage (a.k.a. namespace) with the alias "plt"
29
29
  from matplotlib import colors
30
30
 
31
-
32
- # ====================================================================================================
33
- class CPlot(object): # class CPlot: object
34
- # --------------------------------------------------------------------------------------
35
- # Constructor
36
- def __init__(self, p_sTitle, p_oSamples, p_oLabels
37
- , p_sLabelDescriptions=["orange tree", "olive tree"]
38
- , p_sColors=["darkorange", "darkseagreen"]
39
- # https://matplotlib.org/3.1.0/gallery/color/named_colors.html
40
- , p_sXLabel="Feature 1"
41
- , p_sYLabel="Feature 2"
42
- ):
43
- # ................................................................
44
- # // Fields \\
45
- self.Title = p_sTitle
46
- self.Samples = p_oSamples
47
- self.Labels = p_oLabels
48
- self.LabelDescriptions = p_sLabelDescriptions
49
- self.Colors = p_sColors
50
- self.XLabel = p_sXLabel
51
- self.YLabel = p_sYLabel
52
- # ................................................................
53
-
54
- # --------------------------------------------------------------------------------------
55
- def Show(self, p_bIsMinMaxScaled=False, p_nLineSlope=None, p_nLineIntercept=None, p_nLimitsX=[-4, 4],
56
- p_nLimitsY=[-4, 4]):
57
-
58
- # Two dimensional dataset for the scatter plot
59
- nXValues = self.Samples[:, 0]
60
- nYValues = self.Samples[:, 1]
61
- nLabels = self.Labels
62
-
63
- oColorMap = colors.ListedColormap(self.Colors)
64
-
65
- fig, ax = plt.subplots(figsize=(8, 8))
66
- plt.scatter(nXValues, nYValues, c=nLabels, cmap=oColorMap)
67
-
68
- plt.title(self.Title)
69
- cb = plt.colorbar()
70
- nLoc = np.arange(0, max(nLabels), max(nLabels) / float(len(self.Colors)))
71
- cb.set_ticks(nLoc)
72
- cb.set_ticklabels(self.LabelDescriptions)
73
-
74
- if (p_nLineSlope is not None):
75
- x1 = np.min(nXValues)
76
- y1 = p_nLineSlope * x1 + p_nLineIntercept;
77
- x2 = np.max(nXValues)
78
- y2 = p_nLineSlope * x2 + p_nLineIntercept;
79
- oPlot1 = ax.plot([x1, x2], [y1, y2], 'r--', label="Decision line")
80
- oLegend = plt.legend(loc="upper left", shadow=True, fontsize='x-large')
81
- oLegend.get_frame().set_facecolor("lightyellow")
82
-
83
- if p_bIsMinMaxScaled:
84
- ax.set_xlim((-0.05, 1.05))
85
- ax.set_ylim((-0.05, 1.05))
86
- else:
87
- ax.set_xlim(p_nLimitsX[0], p_nLimitsX[1])
88
- ax.set_ylim(p_nLimitsY[0], p_nLimitsY[1])
89
-
90
- ax.set_xlabel(self.XLabel)
91
- ax.set_ylabel(self.YLabel)
92
-
93
- # plt.scatter(oDataset.Samples[:,0], oDataset.Samples[:,1])
94
- # , t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
95
-
96
- plt.show()
97
- # --------------------------------------------------------------------------------------
98
-
99
-
100
- # ====================================================================================================
101
-
102
-
103
-
104
-
105
-
106
-
107
-
108
-
109
31
  # =========================================================================================================================
110
32
  class PlotHistogramOfClasses(object): # class CPlot: object
111
33
  # --------------------------------------------------------------------------------------
112
- def __init__(self, p_nData, p_nClasses, p_bIsProbabilities=False):
113
- self.Data = p_nData
114
- self.Classes = p_nClasses
115
- self.IsProbabilities = p_bIsProbabilities
34
+ def __init__(self, data, classes, is_probabilities=False):
35
+ self.data = data
36
+ self.classes = classes
37
+ self.is_probabilities = is_probabilities
116
38
 
117
39
  # --------------------------------------------------------------------------------------
118
40
  def prepare(self):
119
41
 
120
42
  fig, ax = plt.subplots(figsize=(7, 7))
121
43
 
122
- ax.hist(self.Data, density=self.IsProbabilities, bins=self.Classes, ec="k")
44
+ ax.hist(self.data, density=self.is_probabilities, bins=self.classes, ec="k")
123
45
  ax.locator_params(axis='x', integer=True)
124
46
 
125
- if self.IsProbabilities:
47
+ if self.is_probabilities:
126
48
  plt.ylabel('Probabilities')
127
49
  else:
128
50
  plt.ylabel('Counts')