eqcctpro 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eqcctpro might be problematic. Click here for more details.

@@ -0,0 +1,407 @@
1
+ import warnings
2
+ import absl.logging
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ from io import StringIO
6
+ from contextlib import redirect_stdout
7
+ from silence_tensorflow import silence_tensorflow
8
+
9
+ # Silence TensorFlow logging
10
+ absl.logging.set_verbosity(absl.logging.ERROR)
11
+ tf.get_logger().setLevel('ERROR')
12
+ tf.autograph.set_verbosity(0)
13
+ warnings.filterwarnings("ignore")
14
+ silence_tensorflow()
15
+
16
+ w1 = 6000
17
+ w2 = 3
18
+ drop_rate = 0.2
19
+ stochastic_depth_rate = 0.1
20
+
21
+ positional_emb = False
22
+ conv_layers = 4
23
+ num_classes = 1
24
+ input_shape = (w1, w2)
25
+ num_classes = 1
26
+ input_shape = (6000, 3)
27
+ image_size = 6000 # We'll resize input images to this size
28
+ patch_size = 40 # Size of the patches to be extract from the input images
29
+ num_patches = (image_size // patch_size)
30
+ projection_dim = 40
31
+
32
+ num_heads = 4
33
+ transformer_units = [
34
+ projection_dim,
35
+ projection_dim,
36
+ ] # Size of the transformer layers
37
+ transformer_layers = 4
38
+
39
+ def recall(y_true, y_pred):
40
+ true_positives = tf.keras.backend.sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
41
+ possible_positives = tf.keras.backend.sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1)))
42
+ recall = true_positives / (possible_positives + tf.keras.backend.epsilon())
43
+ return recall
44
+
45
+
46
+ def precision(y_true, y_pred):
47
+ true_positives = tf.keras.backend.sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
48
+ predicted_positives = tf.keras.backend.sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1)))
49
+ precision = true_positives / (predicted_positives + tf.keras.backend.epsilon())
50
+ return precision
51
+
52
+
53
+ def f1(y_true, y_pred):
54
+ precisionx = precision(y_true, y_pred)
55
+ recallx = recall(y_true, y_pred)
56
+ return 2*((precisionx*recallx)/(precisionx+recallx+tf.keras.backend.epsilon()))
57
+
58
+
59
+ def wbceEdit( y_true, y_pred) :
60
+ ms = tf.keras.backend.mean(tf.keras.backend.square(y_true-y_pred))
61
+ ssim = 1-tf.reduce_mean(tf.image.ssim(y_true,y_pred,1.0))
62
+ return (ssim + ms)
63
+
64
+ class Patches(tf.keras.layers.Layer):
65
+ def __init__(self, patch_size, **kwargs):
66
+ super(Patches, self).__init__()
67
+ self.patch_size = patch_size
68
+
69
+ def get_config(self):
70
+ config = super().get_config().copy()
71
+ config.update({
72
+ 'patch_size' : self.patch_size,
73
+
74
+ })
75
+
76
+ return config
77
+
78
+ def call(self, images):
79
+ batch_size = tf.shape(images)[0]
80
+ patches = tf.image.extract_patches(
81
+ images=images,
82
+ sizes=[1, self.patch_size, 1, 1],
83
+ strides=[1, self.patch_size, 1, 1],
84
+ rates=[1, 1, 1, 1],
85
+ padding="VALID",
86
+ )
87
+ patch_dims = patches.shape[-1]
88
+ patches = tf.reshape(patches, [batch_size, -1, patch_dims])
89
+ return patches
90
+
91
+ class PatchEncoder(tf.keras.layers.Layer):
92
+ def __init__(self, num_patches, projection_dim, **kwargs):
93
+ super(PatchEncoder, self).__init__()
94
+ self.num_patches = num_patches
95
+ self.projection = tf.keras.layers.Dense(units=projection_dim)
96
+ self.position_embedding = tf.keras.layers.Embedding(
97
+ input_dim=num_patches, output_dim=projection_dim
98
+ )
99
+
100
+ def get_config(self):
101
+ config = super().get_config().copy()
102
+ config.update({
103
+ 'num_patches' : self.num_patches,
104
+ 'projection_dim' : projection_dim,
105
+
106
+ })
107
+
108
+ return config
109
+
110
+ def call(self, patch):
111
+ positions = tf.range(start=0, limit=self.num_patches, delta=1)
112
+ encoded = self.projection(patch) + self.position_embedding(positions)
113
+
114
+ #print(patch,positions)
115
+ #temp = self.position_embedding(positions)
116
+ #temp = tf.reshape(temp,(1,int(temp.shape[0]),int(temp.shape[1])))
117
+ #encoded = tf.keras.layers.Add()([self.projection(patch), temp])
118
+ #print(temp,encoded)
119
+
120
+ return encoded
121
+
122
+
123
+ # Referred from: github.com:rwightman/pytorch-image-models.
124
+ class StochasticDepth(tf.keras.layers.Layer):
125
+ def __init__(self, drop_prop, **kwargs):
126
+ super(StochasticDepth, self).__init__(**kwargs)
127
+ self.drop_prob = drop_prop
128
+
129
+ def call(self, x, training=None):
130
+ if training:
131
+ keep_prob = 1 - self.drop_prob
132
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
133
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
134
+ random_tensor = tf.floor(random_tensor)
135
+ return (x / keep_prob) * random_tensor
136
+ return x
137
+
138
+
139
+ class PreLoadGeneratorTest(tf.keras.utils.Sequence):
140
+ def __init__(self, list_IDs, inp_data, batch_size=32, norm_mode='std',
141
+ dim=None, n_channels=None, dtype=np.float32, **kwargs):
142
+ self.batch_size = int(batch_size)
143
+ self.list_IDs = list_IDs
144
+ self.inp_data = inp_data
145
+ self.norm_mode = norm_mode
146
+ self.dtype = dtype
147
+
148
+ # Infer input shape if not provided
149
+ sample = np.array(next(iter(self.inp_data.values())))
150
+ if dim is None or n_channels is None:
151
+ if sample.ndim == 2:
152
+ # Expect (T, C) like (6000, 3)
153
+ self.dim = (int(sample.shape[0]),)
154
+ self.n_channels = int(sample.shape[1])
155
+ elif sample.ndim == 1:
156
+ # Fallback to (T, 1)
157
+ self.dim = (int(sample.shape[0]),)
158
+ self.n_channels = 1
159
+ else:
160
+ raise ValueError(f"Unsupported sample shape: {sample.shape}")
161
+ else:
162
+ self.dim = tuple(dim)
163
+ self.n_channels = int(n_channels)
164
+
165
+ self.on_epoch_end()
166
+
167
+ def __len__(self):
168
+ return int(np.ceil(len(self.list_IDs) / self.batch_size))
169
+
170
+ def __getitem__(self, index):
171
+ start_idx = index * self.batch_size
172
+ end_idx = min((index + 1) * self.batch_size, len(self.list_IDs))
173
+ indexes = self.indexes[start_idx:end_idx]
174
+ list_IDs_temp = [self.list_IDs[k] for k in indexes]
175
+
176
+ X = self.__data_generation(list_IDs_temp)
177
+
178
+ # Keras tolerates a short last batch; we already sized X to the true count.
179
+ return {'input': X.astype(self.dtype, copy=False)}
180
+
181
+ def on_epoch_end(self):
182
+ self.indexes = np.arange(len(self.list_IDs))
183
+
184
+ def __data_generation(self, list_IDs_temp):
185
+ batch_count = len(list_IDs_temp)
186
+ X = np.empty((batch_count, self.dim[0], self.n_channels), dtype=self.dtype)
187
+
188
+ for i, ID in enumerate(list_IDs_temp):
189
+ # Use the correct storage and make a writable float32 copy
190
+ data = np.array(self.inp_data[ID], dtype=self.dtype, copy=True)
191
+
192
+ data = self._normalize(data, self.norm_mode)
193
+ # Ensure (T, C) layout
194
+ if data.ndim == 1:
195
+ data = data[:, None]
196
+ if data.shape != (self.dim[0], self.n_channels):
197
+ raise ValueError(f"Sample {ID} has shape {data.shape}, expected {(self.dim[0], self.n_channels)}")
198
+ X[i] = data
199
+
200
+ return X
201
+
202
+ def _normalize(self, data, mode='max'):
203
+ # Out-of-place ops to be safe even if upstream hands us a read-only view
204
+ data = data - np.mean(data, axis=0, keepdims=True)
205
+ if mode == 'max':
206
+ max_data = np.max(data, axis=0, keepdims=True)
207
+ max_data[max_data == 0] = 1
208
+ data = data / max_data
209
+ elif mode == 'std':
210
+ std_data = np.std(data, axis=0, keepdims=True)
211
+ std_data[std_data == 0] = 1
212
+ data = data / std_data
213
+ return data
214
+
215
+ def load_eqcct_model(input_modelP, input_modelS, log_file="results/logs/model.log"):
216
+ # print(f"[{datetime.now()}] Loading EQCCT model.")
217
+
218
+ # with open(log_file, mode="w", buffering=1) as log:
219
+ # log.write(f"*** Loading the model ...\n")
220
+
221
+ # Model CCT
222
+ inputs = tf.keras.layers.Input(shape=input_shape,name='input')
223
+
224
+ featuresP = create_cct_modelP(inputs)
225
+ featuresP = tf.keras.layers.Reshape((6000,1))(featuresP)
226
+
227
+ featuresS = create_cct_modelS(inputs)
228
+ featuresS = tf.keras.layers.Reshape((6000,1))(featuresS)
229
+
230
+ logitp = tf.keras.layers.Conv1D(1, 15, strides =(1), padding='same',activation='sigmoid', kernel_initializer='he_normal',name='picker_P')(featuresP)
231
+ logits = tf.keras.layers.Conv1D(1, 15, strides =(1), padding='same',activation='sigmoid', kernel_initializer='he_normal',name='picker_S')(featuresS)
232
+
233
+ modelP = tf.keras.models.Model(inputs=[inputs], outputs=[logitp])
234
+ modelS = tf.keras.models.Model(inputs=[inputs], outputs=[logits])
235
+
236
+ model = tf.keras.models.Model(inputs=[inputs], outputs=[logitp,logits])
237
+
238
+ summary_output = StringIO()
239
+ with redirect_stdout(summary_output):
240
+ model.summary()
241
+ # log.write(summary_output.getvalue())
242
+ # log.write('\n')
243
+
244
+ sgd = tf.keras.optimizers.Adam()
245
+ model.compile(optimizer=sgd,
246
+ loss=['binary_crossentropy','binary_crossentropy'],
247
+ metrics=['acc',f1,precision, recall])
248
+
249
+ modelP.load_weights(input_modelP)
250
+ modelS.load_weights(input_modelS)
251
+
252
+ # log.write(f"*** Loading is complete!")
253
+
254
+ return model
255
+
256
+ def convF1(inpt, D1, fil_ord, Dr):
257
+
258
+ channel_axis = 1 if tf.keras.backend.image_data_format() == "channels_first" else -1
259
+ #filters = inpt._keras_shape[channel_axis]
260
+ filters = int(inpt.shape[-1])
261
+
262
+ #infx = tf.keras.layers.Activation(tf.nn.gelu')(inpt)
263
+ pre = tf.keras.layers.Conv1D(filters, fil_ord, strides =(1), padding='same',kernel_initializer='he_normal')(inpt)
264
+ pre = tf.keras.layers.BatchNormalization()(pre)
265
+ pre = tf.keras.layers.Activation(tf.nn.gelu)(pre)
266
+
267
+ #shared_conv = tf.keras.layers.Conv1D(D1, fil_ord, strides =(1), padding='same')
268
+
269
+ inf = tf.keras.layers.Conv1D(filters, fil_ord, strides =(1), padding='same',kernel_initializer='he_normal')(pre)
270
+ inf = tf.keras.layers.BatchNormalization()(inf)
271
+ inf = tf.keras.layers.Activation(tf.nn.gelu)(inf)
272
+ inf = tf.keras.layers.Add()([inf,inpt])
273
+
274
+ inf1 = tf.keras.layers.Conv1D(D1, fil_ord, strides =(1), padding='same',kernel_initializer='he_normal')(inf)
275
+ inf1 = tf.keras.layers.BatchNormalization()(inf1)
276
+ inf1 = tf.keras.layers.Activation(tf.nn.gelu)(inf1)
277
+ encode = tf.keras.layers.Dropout(Dr)(inf1)
278
+
279
+ return encode
280
+
281
+
282
+ def mlp(x, hidden_units, dropout_rate):
283
+ for units in hidden_units:
284
+ x = tf.keras.layers.Dense(units, activation=tf.nn.gelu)(x)
285
+ #x = tf.keras.layers.Dense(units, activation='relu')(x)
286
+ x = tf.keras.layers.Dropout(dropout_rate)(x)
287
+ return x
288
+
289
+
290
+ def create_cct_modelP(inputs):
291
+
292
+ inputs1 = convF1(inputs, 10, 11, 0.1)
293
+ inputs1 = convF1(inputs1, 20, 11, 0.1)
294
+ inputs1 = convF1(inputs1, 40, 11, 0.1)
295
+
296
+ inputreshaped = tf.keras.layers.Reshape((6000,1,40))(inputs1)
297
+ # Augment data.
298
+ #augmented = data_augmentation(inputs)
299
+ # Create patches.
300
+ patches = Patches(patch_size)(inputreshaped)
301
+ # Encode patches.
302
+ encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
303
+ #print('done')
304
+
305
+ # Calculate Stochastic Depth probabilities.
306
+ dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)]
307
+
308
+ # Create multiple layers of the Transformer block.
309
+ for i in range(transformer_layers):
310
+ #encoded_patches = convF1(encoded_patches, 40,11, 0.1)
311
+ # Layer normalization 1.
312
+ x1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
313
+
314
+ # Create a multi-head attention layer.
315
+ attention_output = tf.keras.layers.MultiHeadAttention(
316
+ num_heads=num_heads, key_dim=projection_dim, dropout=0.1
317
+ )(x1, x1)
318
+ #attention_output = convF1(attention_output, 40,11, 0.1)
319
+
320
+
321
+ # Skip connection 1.
322
+ attention_output = StochasticDepth(dpr[i])(attention_output)
323
+ x2 = tf.keras.layers.Add()([attention_output, encoded_patches])
324
+
325
+ # Layer normalization 2.
326
+ x3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x2)
327
+
328
+ # MLP.
329
+ x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
330
+
331
+ # Skip connection 2.
332
+ x3 = StochasticDepth(dpr[i])(x3)
333
+ encoded_patches = tf.keras.layers.Add()([x3, x2])
334
+
335
+ # Apply sequence pooling.
336
+ representation = tf.keras.layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
337
+ #print(representation)
338
+ '''
339
+ attention_weights = tf.nn.softmax(tf.keras.layers.Dense(1)(representation), axis=1)
340
+ weighted_representation = tf.matmul(
341
+ attention_weights, representation, transpose_a=True
342
+ )
343
+ weighted_representation = tf.squeeze(weighted_representation, -2)
344
+
345
+ return weighted_representation
346
+ '''
347
+ return representation
348
+
349
+
350
+ def create_cct_modelS(inputs):
351
+
352
+ inputs1 = convF1(inputs, 10, 11, 0.1)
353
+ inputs1 = convF1(inputs1, 20, 11, 0.1)
354
+ inputs1 = convF1(inputs1, 40, 11, 0.1)
355
+
356
+ inputreshaped = tf.keras.layers.Reshape((6000,1,40))(inputs1)
357
+ # Augment data.
358
+ #augmented = data_augmentation(inputs)
359
+ # Create patches.
360
+ patches = Patches(patch_size)(inputreshaped)
361
+ # Encode patches.
362
+ encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
363
+ #print('done')
364
+
365
+ # Calculate Stochastic Depth probabilities.
366
+ dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)]
367
+
368
+ # Create multiple layers of the Transformer block.
369
+ for i in range(transformer_layers):
370
+ encoded_patches = convF1(encoded_patches, 40,11, 0.1)
371
+ # Layer normalization 1.
372
+ x1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
373
+
374
+ # Create a multi-head attention layer.
375
+ attention_output = tf.keras.layers.MultiHeadAttention(
376
+ num_heads=num_heads, key_dim=projection_dim, dropout=0.1
377
+ )(x1, x1)
378
+ attention_output = convF1(attention_output, 40,11, 0.1)
379
+
380
+
381
+ # Skip connection 1.
382
+ attention_output = StochasticDepth(dpr[i])(attention_output)
383
+ x2 = tf.keras.layers.Add()([attention_output, encoded_patches])
384
+
385
+ # Layer normalization 2.
386
+ x3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x2)
387
+
388
+ # MLP.
389
+ x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
390
+
391
+ # Skip connection 2.
392
+ x3 = StochasticDepth(dpr[i])(x3)
393
+ encoded_patches = tf.keras.layers.Add()([x3, x2])
394
+
395
+ # Apply sequence pooling.
396
+ representation = tf.keras.layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
397
+ #print(representation)
398
+ '''
399
+ attention_weights = tf.nn.softmax(tf.keras.layers.Dense(1)(representation), axis=1)
400
+ weighted_representation = tf.matmul(
401
+ attention_weights, representation, transpose_a=True
402
+ )
403
+ weighted_representation = tf.squeeze(weighted_representation, -2)
404
+
405
+ return weighted_representation
406
+ '''
407
+ return representation