noshot 12.0.0__py3-none-any.whl → 13.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb +164 -0
  2. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb +94 -0
  3. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb +134 -0
  4. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb +127 -0
  5. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb +123 -0
  6. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb +108 -0
  7. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb +646 -0
  8. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb +553 -0
  9. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb +216 -0
  10. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb +195 -0
  11. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb +427 -0
  12. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb +186 -0
  13. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb +398 -0
  14. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb +171 -0
  15. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb +401 -0
  16. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb +8511 -0
  17. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb +737 -0
  18. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb +591 -0
  19. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb +551 -0
  20. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/METADATA +1 -1
  21. noshot-13.0.0.dist-info/RECORD +32 -0
  22. noshot-12.0.0.dist-info/RECORD +0 -13
  23. /noshot/data/DLE FSD BDA/DLE/{1. DNN (Image Classification).ipynb → DLE 1 (Json)/1. DNN (Image Classification).ipynb} +0 -0
  24. /noshot/data/DLE FSD BDA/DLE/{2. DNN vs CNN.ipynb → DLE 1 (Json)/2. DNN vs CNN.ipynb} +0 -0
  25. /noshot/data/DLE FSD BDA/DLE/{3. CNN (Object Detecrion).ipynb → DLE 1 (Json)/3. CNN (Object Detecrion).ipynb} +0 -0
  26. /noshot/data/DLE FSD BDA/DLE/{4. FCN (Image Segmentaion).ipynb → DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb} +0 -0
  27. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/WHEEL +0 -0
  28. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  29. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,551 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Develop an image segmentation model using a fully convolutional network\n",
8
+ "\n",
9
+ "**Dataset**: <https://www.kaggle.com/competitions/data-science-bowl-2018/data>"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "import tensorflow as tf\n",
19
+ "import os\n",
20
+ "import random\n",
21
+ "import numpy as np\n",
22
+ " \n",
23
+ "from tqdm import tqdm \n",
24
+ "\n",
25
+ "from skimage.io import imread, imshow\n",
26
+ "from skimage.transform import resize\n",
27
+ "import matplotlib.pyplot as plt\n",
28
+ "from sklearn.model_selection import train_test_split\n"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": null,
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "seed = 42\n",
38
+ "np.random.seed = seed\n",
39
+ "\n",
40
+ "IMG_WIDTH = 128\n",
41
+ "IMG_HEIGHT = 128\n",
42
+ "IMG_CHANNELS = 3\n",
43
+ "\n",
44
+ "TRAIN_PATH = \"stage1_train//\"\n",
45
+ "TEST_PATH = \"stage1_test//\"\n",
46
+ "\n",
47
+ "train_ids = next(os.walk(TRAIN_PATH))[1]\n",
48
+ "test_ids = next(os.walk(TEST_PATH))[1]\n",
49
+ "\n",
50
+ "X = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\n",
51
+ "y = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=bool)"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "metadata": {},
58
+ "outputs": [],
59
+ "source": [
60
+ "print('Resizing training images and masks')\n",
61
+ "for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): \n",
62
+ " path = TRAIN_PATH + id_\n",
63
+ " print(path)\n",
64
+ " img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS] \n",
65
+ " img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n",
66
+ " X[n] = img #Fill empty X_train with values from img\n",
67
+ " mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=bool)\n",
68
+ " for mask_file in next(os.walk(path + '/masks/'))[2]:\n",
69
+ " mask_ = imread(path + '/masks/' + mask_file)\n",
70
+ " mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', \n",
71
+ " preserve_range=True), axis=-1)\n",
72
+ " mask = np.maximum(mask, mask_) \n",
73
+ " \n",
74
+ " y[n] = mask "
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": null,
80
+ "metadata": {},
81
+ "outputs": [],
82
+ "source": [
83
+ "# test images\n",
84
+ "test_images = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\n",
85
+ "sizes_test = []\n",
86
+ "print('Resizing test images') \n",
87
+ "for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\n",
88
+ " path = TEST_PATH + id_\n",
89
+ " img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n",
90
+ " sizes_test.append([img.shape[0], img.shape[1]])\n",
91
+ " img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n",
92
+ " test_images[n] = img\n",
93
+ "\n",
94
+ "print('Done!')"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "metadata": {},
101
+ "outputs": [],
102
+ "source": [
103
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": null,
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": [
112
+ "len(X_test)"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": null,
118
+ "metadata": {},
119
+ "outputs": [],
120
+ "source": [
121
+ "image_x = random.randint(0, len(X_train))\n",
122
+ "plt.axis(\"off\")\n",
123
+ "imshow(X_train[image_x])\n",
124
+ "plt.show()\n",
125
+ "plt.axis(\"off\")\n",
126
+ "imshow(np.squeeze(y_train[image_x]))\n",
127
+ "plt.show()"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": null,
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "num_classes = 1"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": null,
142
+ "metadata": {},
143
+ "outputs": [],
144
+ "source": [
145
+ "inputs = tf.keras.layers.Input(shape=(None, None, 3))"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": null,
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "print(inputs.shape)"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": null,
160
+ "metadata": {},
161
+ "outputs": [],
162
+ "source": [
163
+ "def encoder(inputs):\n",
164
+ " c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(inputs)\n",
165
+ " c1 = tf.keras.layers.Dropout(0.1)(c1)\n",
166
+ " c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)\n",
167
+ " p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1)\n",
168
+ "\n",
169
+ " c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)\n",
170
+ " c2 = tf.keras.layers.Dropout(0.1)(c2)\n",
171
+ " c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)\n",
172
+ " p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2)\n",
173
+ "\n",
174
+ " c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)\n",
175
+ " c3 = tf.keras.layers.Dropout(0.2)(c3)\n",
176
+ " c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)\n",
177
+ " p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3)\n",
178
+ "\n",
179
+ " c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)\n",
180
+ " c4 = tf.keras.layers.Dropout(0.2)(c4)\n",
181
+ " c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)\n",
182
+ " p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4)\n",
183
+ "\n",
184
+ " c5 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)\n",
185
+ " c5 = tf.keras.layers.Dropout(0.2)(c5)\n",
186
+ " c5 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)\n",
187
+ " p5 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c5)\n",
188
+ "\n",
189
+ " c6 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p5)\n",
190
+ " c6 = tf.keras.layers.Dropout(0.3)(c6)\n",
191
+ " c6 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)\n",
192
+ "\n",
193
+ " u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c6)\n",
194
+ " c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)\n",
195
+ " c6 = tf.keras.layers.Dropout(0.2)(c6)\n",
196
+ " c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)\n",
197
+ " return c6"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "execution_count": null,
203
+ "metadata": {},
204
+ "outputs": [],
205
+ "source": [
206
+ "def decoder(c6):\n",
207
+ " u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)\n",
208
+ " c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)\n",
209
+ " c7 = tf.keras.layers.Add()([u7, c7])\n",
210
+ "\n",
211
+ " u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)\n",
212
+ " c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)\n",
213
+ " c8 = tf.keras.layers.Add()([u8, c8])\n",
214
+ "\n",
215
+ " u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)\n",
216
+ " c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)\n",
217
+ " c9 = tf.keras.layers.Add()([u9, c9])\n",
218
+ "\n",
219
+ " u10 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c9)\n",
220
+ " c10 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u10)\n",
221
+ " c10 = tf.keras.layers.Add()([u10, c10])\n",
222
+ "\n",
223
+ " outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation='sigmoid')(c10)\n",
224
+ " return outputs\n"
225
+ ]
226
+ },
227
+ {
228
+ "cell_type": "code",
229
+ "execution_count": null,
230
+ "metadata": {},
231
+ "outputs": [],
232
+ "source": [
233
+ "encoder = encoder(inputs)\n",
234
+ "outputs = decoder(encoder)"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "model = tf.keras.Model(inputs=[inputs], outputs=[outputs])"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": null,
249
+ "metadata": {},
250
+ "outputs": [],
251
+ "source": [
252
+ "model.summary()"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": null,
258
+ "metadata": {},
259
+ "outputs": [],
260
+ "source": [
261
+ "tf.keras.utils.plot_model(model, \"model.png\",show_shapes=True)\n"
262
+ ]
263
+ },
264
+ {
265
+ "cell_type": "code",
266
+ "execution_count": null,
267
+ "metadata": {},
268
+ "outputs": [],
269
+ "source": [
270
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])"
271
+ ]
272
+ },
273
+ {
274
+ "cell_type": "code",
275
+ "execution_count": null,
276
+ "metadata": {},
277
+ "outputs": [],
278
+ "source": [
279
+ "callbacks = [\n",
280
+ " tf.keras.callbacks.EarlyStopping(patience=15, monitor='val_loss'),\n",
281
+ " tf.keras.callbacks.TensorBoard(log_dir='logs')]\n",
282
+ "\n",
283
+ "model.fit(X_train, y_train, validation_data=(X_test,y_test), batch_size=16, epochs=10, callbacks=callbacks)"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": null,
289
+ "metadata": {},
290
+ "outputs": [],
291
+ "source": [
292
+ "loss = model.history.history['loss']\n",
293
+ "val_loss = model.history.history['val_loss']\n",
294
+ "\n",
295
+ "plt.figure()\n",
296
+ "plt.plot( loss, 'r', label='Training loss')\n",
297
+ "plt.plot( val_loss, 'bo', label='Validation loss')\n",
298
+ "plt.title('Training and Validation Loss')\n",
299
+ "plt.xlabel('Epoch')\n",
300
+ "plt.ylabel('Loss Value')\n",
301
+ "plt.ylim([0, 1])\n",
302
+ "plt.legend()\n",
303
+ "plt.show()"
304
+ ]
305
+ },
306
+ {
307
+ "cell_type": "code",
308
+ "execution_count": null,
309
+ "metadata": {},
310
+ "outputs": [],
311
+ "source": [
312
+ "accuracy = model.history.history['accuracy']\n",
313
+ "val_accuracy = model.history.history['val_accuracy']\n",
314
+ "\n",
315
+ "plt.figure()\n",
316
+ "plt.plot( accuracy, 'r', label='Training accuracy')\n",
317
+ "plt.plot( val_accuracy, 'bo', label='Validation accuracy')\n",
318
+ "plt.title('Training and Validation accuracy')\n",
319
+ "plt.xlabel('Epoch')\n",
320
+ "plt.ylabel('Loss Value')\n",
321
+ "plt.ylim([0, 1])\n",
322
+ "plt.legend()\n",
323
+ "plt.show()"
324
+ ]
325
+ },
326
+ {
327
+ "cell_type": "code",
328
+ "execution_count": null,
329
+ "metadata": {},
330
+ "outputs": [],
331
+ "source": [
332
+ "def display(display_list):\n",
333
+ " plt.figure(figsize=(15, 15))\n",
334
+ "\n",
335
+ " title = ['Input image', 'True mask', 'Predicted mask']\n",
336
+ "\n",
337
+ " for i in range(len(display_list)):\n",
338
+ " plt.subplot(1, len(display_list), i+1)\n",
339
+ " plt.title(title[i])\n",
340
+ " plt.imshow(tf.keras.utils.array_to_img(display_list[i]))\n",
341
+ " plt.axis('off')\n",
342
+ " plt.show()"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": null,
348
+ "metadata": {},
349
+ "outputs": [],
350
+ "source": [
351
+ "i = random.randint(0, len(X_test))\n",
352
+ "sample_image = X_test[i]\n",
353
+ "sample_mask = y_test[i]"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "code",
358
+ "execution_count": null,
359
+ "metadata": {},
360
+ "outputs": [],
361
+ "source": [
362
+ "predictions = model.predict(X_test)"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": null,
368
+ "metadata": {},
369
+ "outputs": [],
370
+ "source": [
371
+ "len(predictions[0])"
372
+ ]
373
+ },
374
+ {
375
+ "cell_type": "code",
376
+ "execution_count": null,
377
+ "metadata": {},
378
+ "outputs": [],
379
+ "source": [
380
+ "len(predictions)"
381
+ ]
382
+ },
383
+ {
384
+ "cell_type": "code",
385
+ "execution_count": null,
386
+ "metadata": {},
387
+ "outputs": [],
388
+ "source": [
389
+ "prediction = model.predict(sample_image[tf.newaxis, ...])[0]"
390
+ ]
391
+ },
392
+ {
393
+ "cell_type": "code",
394
+ "execution_count": null,
395
+ "metadata": {},
396
+ "outputs": [],
397
+ "source": [
398
+ "predicted_mask = (prediction > 0.5).astype(np.uint8)"
399
+ ]
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "execution_count": null,
404
+ "metadata": {},
405
+ "outputs": [],
406
+ "source": [
407
+ "display([sample_image, sample_mask,predicted_mask])"
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "code",
412
+ "execution_count": null,
413
+ "metadata": {},
414
+ "outputs": [],
415
+ "source": [
416
+ "# Building a segementation (U-Net) model\n",
417
+ "\n",
418
+ "from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate\n",
419
+ "from tensorflow.keras.models import Model\n",
420
+ "\n",
421
+ "def unet_model(input_size=(128, 128, 3)):\n",
422
+ " inputs = Input(input_size)\n",
423
+ " \n",
424
+ " c1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)\n",
425
+ " c1 = Conv2D(64, (3, 3), activation='relu', padding='same')(c1)\n",
426
+ " p1 = MaxPooling2D((2, 2))(c1)\n",
427
+ " \n",
428
+ " c2 = Conv2D(128, (3, 3), activation='relu', padding='same')(p1)\n",
429
+ " c2 = Conv2D(128, (3, 3), activation='relu', padding='same')(c2)\n",
430
+ " p2 = MaxPooling2D((2, 2))(c2)\n",
431
+ " \n",
432
+ " c3 = Conv2D(256, (3, 3), activation='relu', padding='same')(p2)\n",
433
+ " c3 = Conv2D(256, (3, 3), activation='relu', padding='same')(c3)\n",
434
+ " p3 = MaxPooling2D((2, 2))(c3)\n",
435
+ " \n",
436
+ " c4 = Conv2D(512, (3, 3), activation='relu', padding='same')(p3)\n",
437
+ " c4 = Conv2D(512, (3, 3), activation='relu', padding='same')(c4)\n",
438
+ " p4 = MaxPooling2D(pool_size=(2, 2))(c4)\n",
439
+ " \n",
440
+ " c5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(p4)\n",
441
+ " c5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(c5)\n",
442
+ " \n",
443
+ " u6 = UpSampling2D(size=(2, 2))(c5)\n",
444
+ " u6 = concatenate([u6, c4], axis=3)\n",
445
+ " c6 = Conv2D(512, (3, 3), activation='relu', padding='same')(u6)\n",
446
+ " c6 = Conv2D(512, (3, 3), activation='relu', padding='same')(c6)\n",
447
+ " \n",
448
+ " u7 = UpSampling2D(size=(2, 2))(c6)\n",
449
+ " u7 = concatenate([u7, c3], axis=3)\n",
450
+ " c7 = Conv2D(256, (3, 3), activation='relu', padding='same')(u7)\n",
451
+ " c7 = Conv2D(256, (3, 3), activation='relu', padding='same')(c7)\n",
452
+ " \n",
453
+ " u8 = UpSampling2D(size=(2, 2))(c7)\n",
454
+ " u8 = concatenate([u8, c2], axis=3)\n",
455
+ " c8 = Conv2D(128, (3, 3), activation='relu', padding='same')(u8)\n",
456
+ " c8 = Conv2D(128, (3, 3), activation='relu', padding='same')(c8)\n",
457
+ " \n",
458
+ " u9 = UpSampling2D(size=(2, 2))(c8)\n",
459
+ " u9 = concatenate([u9, c1], axis=3)\n",
460
+ " c9 = Conv2D(64, (3, 3), activation='relu', padding='same')(u9)\n",
461
+ " c9 = Conv2D(64, (3, 3), activation='relu', padding='same')(c9)\n",
462
+ " \n",
463
+ " outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)\n",
464
+ " \n",
465
+ " model = Model(inputs=[inputs], outputs=[outputs])\n",
466
+ " \n",
467
+ " return model\n",
468
+ "\n",
469
+ "model = unet_model()\n",
470
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])"
471
+ ]
472
+ },
473
+ {
474
+ "cell_type": "code",
475
+ "execution_count": null,
476
+ "metadata": {},
477
+ "outputs": [],
478
+ "source": [
479
+ "# Training the segmentation model\n",
480
+ "history = model.fit(X, y, validation_split=0.1, batch_size=16, epochs=10, verbose=1)"
481
+ ]
482
+ },
483
+ {
484
+ "cell_type": "code",
485
+ "execution_count": null,
486
+ "metadata": {},
487
+ "outputs": [],
488
+ "source": [
489
+ "# Predicting the segmentation maps\n",
490
+ "preds_train = model.predict(X, verbose=1)\n",
491
+ "preds_test = model.predict(X_test, verbose=1)\n",
492
+ "\n",
493
+ "# Thresholding predictions\n",
494
+ "preds_train_t = (preds_train > 0.5).astype(np.uint8)\n",
495
+ "preds_test_t = (preds_test > 0.5).astype(np.uint8)"
496
+ ]
497
+ },
498
+ {
499
+ "cell_type": "code",
500
+ "execution_count": null,
501
+ "metadata": {},
502
+ "outputs": [],
503
+ "source": [
504
+ "def plot_sample(X, y, preds, ix=None):\n",
505
+ " if ix is None:\n",
506
+ " ix = random.randint(0, len(X))\n",
507
+ " \n",
508
+ " has_mask = y[ix].max() > 0\n",
509
+ " \n",
510
+ " fig, ax = plt.subplots(1, 3, figsize=(20, 10))\n",
511
+ " ax[0].imshow(X[ix])\n",
512
+ " if has_mask:\n",
513
+ " ax[0].contour(y[ix].squeeze(), colors='r', levels=[0.5])\n",
514
+ " ax[0].set_title('Original Image')\n",
515
+ " \n",
516
+ " ax[1].imshow(y[ix].squeeze())\n",
517
+ " ax[1].set_title('True Mask')\n",
518
+ " \n",
519
+ " ax[2].imshow(preds[ix].squeeze(), cmap='gray')\n",
520
+ " if has_mask:\n",
521
+ " ax[2].contour(y[ix].squeeze(), colors='r', levels=[0.5])\n",
522
+ " ax[2].set_title('Predicted Mask')\n",
523
+ " \n",
524
+ " plt.show()\n",
525
+ "\n",
526
+ "plot_sample(X, y, preds_train_t)\n"
527
+ ]
528
+ }
529
+ ],
530
+ "metadata": {
531
+ "kernelspec": {
532
+ "display_name": "Python 3 (ipykernel)",
533
+ "language": "python",
534
+ "name": "python3"
535
+ },
536
+ "language_info": {
537
+ "codemirror_mode": {
538
+ "name": "ipython",
539
+ "version": 3
540
+ },
541
+ "file_extension": ".py",
542
+ "mimetype": "text/x-python",
543
+ "name": "python",
544
+ "nbconvert_exporter": "python",
545
+ "pygments_lexer": "ipython3",
546
+ "version": "3.12.4"
547
+ }
548
+ },
549
+ "nbformat": 4,
550
+ "nbformat_minor": 4
551
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: noshot
3
- Version: 12.0.0
3
+ Version: 13.0.0
4
4
  Summary: Support library for Artificial Intelligence, Machine Learning and Data Science tools
5
5
  Author: Tim Stan S
6
6
  License: MIT
@@ -0,0 +1,32 @@
1
+ noshot/__init__.py,sha256=000R40tii8lDFU8C1fBaD3SOnxD0PWRNWZU-km49YrU,21
2
+ noshot/main.py,sha256=Y92i47Aa0XctPccKQ-hoFlkRbxFmb1NWOf-OtPb_oVU,669
3
+ noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/1. DNN (Image Classification).ipynb,sha256=397KrOUOxsmKB5VZIAhG7QTxFdmLi7IV-CzsYyIIJJQ,8651
4
+ noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/2. DNN vs CNN.ipynb,sha256=yUHoexSUzeD1KbrhOIhPAg_Yd-WWLlMDuqBUmkdq70M,12138
5
+ noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. CNN (Object Detecrion).ipynb,sha256=FjeGzLcrwxfGnER5aNc523_otdU_wlsBYiVYvgBrkVk,6953
6
+ noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb,sha256=6h4eV8A6tuGrB72iqSiI98qv80Eb_H_XoKdyIKM431I,8785
7
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb,sha256=U6q8Uwqs830cZSgWKmk29nClnfGem0qc2Lkf6qT1lU0,6377
8
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb,sha256=PLYLcsA8tGxMGXb9e2rqQI6zPidC6UNayMx5JqKhOI8,3420
9
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb,sha256=MknRySzMml400I2J8mrCteFj3A_5SDwzIZwZ-Vv1ksM,4980
10
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb,sha256=uBNutPKhF13bgGR_CauUiZXNQD3TQtdwKiUFwJ3_VeE,4552
11
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb,sha256=B2yx_oCM2xSW7o2Q3mHdclmhN8xfvDPXGC1bBpMe39Y,4331
12
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb,sha256=JpyMHakK6K6bMG4CMApcmLAqQi2bJYaws0nez9NRUS0,3519
13
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb,sha256=Csm7rQhN5SA4_1WcSZLYr7fGBDWWimHD12EaSSO001g,19658
14
+ noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb,sha256=HWMP2WdrjdfDIQbLghERjamCphL-UUni1-8QbPPBx9I,14749
15
+ noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb,sha256=kheRjG7QuHB2g6IaHrkRFZAm7UAo07521KdbDAP8wmg,6400
16
+ noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb,sha256=rLSsgBpcspl2ym0m33PRfIknATKTrEde1FgjY27dJNE,5971
17
+ noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb,sha256=Wvk7hvqd7MSSUY37Z0vMp7nf0xA6FSkw5L9-H-N_nUs,543555
18
+ noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb,sha256=1t0V9Bq6ywXGl7gtmsNpe15c4p5uoaVC32pUyXUqR1M,5423
19
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb,sha256=qpZN91XMM-W_Z5ePwjF-xZWMz4v8WK8kQersGCPUs54,11186
20
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb,sha256=9QfH0tR5HvjHZrSXApzD8qrgsUCCPqpmeDOtiYwRq9Q,3803
21
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb,sha256=UdqrWLEuJdPOWFGSagxexuCoXHSdGEHbQmDguJgrR-A,11128
22
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb,sha256=U3eX1BlVvh1QGrCrZv3iOfcDR0Yx-85rSYLbEJjPDlI,296229
23
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb,sha256=T0jOZPiAF5mAa4sdvq0YcUgUQfUESpR5_mzpIU9Tfug,21212
24
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb,sha256=fk4c-bIsQvJvniqTLcedes-KJpVZuMdpiLgjt23228Y,16724
25
+ noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb,sha256=m3ujDj3fLIP1j202GSC5uf8J_qdoKq8oO2M2eYKtCMY,17497
26
+ noshot/utils/__init__.py,sha256=QVrN1ZpzPXxZqDOqot5-t_ulFjZXVx7Cvr-Is9AK0po,110
27
+ noshot/utils/shell_utils.py,sha256=-XfgYlNQlULa_rRJ3vsfTns4m_jiueGEj396J_y0Gus,2611
28
+ noshot-13.0.0.dist-info/licenses/LICENSE.txt,sha256=fgCruaVm5cUjFGOeEoGIimT6nnUunBqcNZHpGzK8TSw,1086
29
+ noshot-13.0.0.dist-info/METADATA,sha256=F3R9Ym33T0l2R6qCXsqpBNsfqrJvtNkOyjM-P28GYMs,2574
30
+ noshot-13.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
31
+ noshot-13.0.0.dist-info/top_level.txt,sha256=UL-c0HffdRwohz-y9icY_rnY48pQDdxGcBsgyCKh2Q8,7
32
+ noshot-13.0.0.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- noshot/__init__.py,sha256=000R40tii8lDFU8C1fBaD3SOnxD0PWRNWZU-km49YrU,21
2
- noshot/main.py,sha256=Y92i47Aa0XctPccKQ-hoFlkRbxFmb1NWOf-OtPb_oVU,669
3
- noshot/data/DLE FSD BDA/DLE/1. DNN (Image Classification).ipynb,sha256=397KrOUOxsmKB5VZIAhG7QTxFdmLi7IV-CzsYyIIJJQ,8651
4
- noshot/data/DLE FSD BDA/DLE/2. DNN vs CNN.ipynb,sha256=yUHoexSUzeD1KbrhOIhPAg_Yd-WWLlMDuqBUmkdq70M,12138
5
- noshot/data/DLE FSD BDA/DLE/3. CNN (Object Detecrion).ipynb,sha256=FjeGzLcrwxfGnER5aNc523_otdU_wlsBYiVYvgBrkVk,6953
6
- noshot/data/DLE FSD BDA/DLE/4. FCN (Image Segmentaion).ipynb,sha256=6h4eV8A6tuGrB72iqSiI98qv80Eb_H_XoKdyIKM431I,8785
7
- noshot/utils/__init__.py,sha256=QVrN1ZpzPXxZqDOqot5-t_ulFjZXVx7Cvr-Is9AK0po,110
8
- noshot/utils/shell_utils.py,sha256=-XfgYlNQlULa_rRJ3vsfTns4m_jiueGEj396J_y0Gus,2611
9
- noshot-12.0.0.dist-info/licenses/LICENSE.txt,sha256=fgCruaVm5cUjFGOeEoGIimT6nnUunBqcNZHpGzK8TSw,1086
10
- noshot-12.0.0.dist-info/METADATA,sha256=N2rDT6xfFJdZiqqoKM7iCRqwbeibXMjuthszOU-prvQ,2574
11
- noshot-12.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- noshot-12.0.0.dist-info/top_level.txt,sha256=UL-c0HffdRwohz-y9icY_rnY48pQDdxGcBsgyCKh2Q8,7
13
- noshot-12.0.0.dist-info/RECORD,,