noshot 11.0.0__py3-none-any.whl → 13.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/1. DNN (Image Classification).ipynb +389 -0
  2. noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/2. DNN vs CNN.ipynb +516 -0
  3. noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. CNN (Object Detecrion).ipynb +259 -0
  4. noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb +274 -0
  5. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb +164 -0
  6. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb +94 -0
  7. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb +134 -0
  8. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb +127 -0
  9. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb +123 -0
  10. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb +108 -0
  11. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb +646 -0
  12. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb +553 -0
  13. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb +216 -0
  14. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb +195 -0
  15. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb +427 -0
  16. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb +186 -0
  17. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb +398 -0
  18. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb +171 -0
  19. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb +401 -0
  20. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb +8511 -0
  21. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb +737 -0
  22. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb +591 -0
  23. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb +551 -0
  24. noshot/main.py +3 -3
  25. {noshot-11.0.0.dist-info → noshot-13.0.0.dist-info}/METADATA +1 -1
  26. noshot-13.0.0.dist-info/RECORD +32 -0
  27. noshot/data/ML TS XAI/ML/CNN(Image_for_Folders_5).ipynb +0 -201
  28. noshot/data/ML TS XAI/ML/CNN(Image_form_Folder_2).ipynb +0 -201
  29. noshot/data/ML TS XAI/ML/Json Codes/ML LAB CIA 2.ipynb +0 -409
  30. noshot/data/ML TS XAI/ML/ML 1/1. EDA-PCA (Balance Scale Dataset).ipynb +0 -147
  31. noshot/data/ML TS XAI/ML/ML 1/1. EDA-PCA (Rice Dataset).ipynb +0 -181
  32. noshot/data/ML TS XAI/ML/ML 1/10. HMM Veterbi.ipynb +0 -152
  33. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Balance Scale Dataset).ipynb +0 -117
  34. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Iris Dataset).ipynb +0 -156
  35. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Sobar-72 Dataset).ipynb +0 -215
  36. noshot/data/ML TS XAI/ML/ML 1/3. LDA (Balance Scale Dataset).ipynb +0 -78
  37. noshot/data/ML TS XAI/ML/ML 1/3. LDA (NPHA Doctor Visits Dataset).ipynb +0 -114
  38. noshot/data/ML TS XAI/ML/ML 1/4. Linear Regression (Machine Dataset).ipynb +0 -115
  39. noshot/data/ML TS XAI/ML/ML 1/4. Linear Regression (Real Estate Dataset).ipynb +0 -146
  40. noshot/data/ML TS XAI/ML/ML 1/5. Logistic Regression (Magic04 Dataset).ipynb +0 -130
  41. noshot/data/ML TS XAI/ML/ML 1/5. Logistic Regression (Wine Dataset).ipynb +0 -112
  42. noshot/data/ML TS XAI/ML/ML 1/6. Naive Bayes Classifier (Agaricus Lepiota Dataset).ipynb +0 -118
  43. noshot/data/ML TS XAI/ML/ML 1/6. Naive Bayes Classifier (Wine Dataset).ipynb +0 -89
  44. noshot/data/ML TS XAI/ML/ML 1/7. SVM (Rice Dataset).ipynb +0 -120
  45. noshot/data/ML TS XAI/ML/ML 1/8. FeedForward NN (Sobar72 Dataset).ipynb +0 -262
  46. noshot/data/ML TS XAI/ML/ML 1/9. CNN (Cifar10 Dataset).ipynb +0 -156
  47. noshot/data/ML TS XAI/ML/ML 2/1. PCA.ipynb +0 -162
  48. noshot/data/ML TS XAI/ML/ML 2/10. CNN.ipynb +0 -100
  49. noshot/data/ML TS XAI/ML/ML 2/11. HMM.ipynb +0 -336
  50. noshot/data/ML TS XAI/ML/ML 2/2. KNN.ipynb +0 -149
  51. noshot/data/ML TS XAI/ML/ML 2/3. LDA.ipynb +0 -132
  52. noshot/data/ML TS XAI/ML/ML 2/4. Linear Regression.ipynb +0 -86
  53. noshot/data/ML TS XAI/ML/ML 2/5. Logistic Regression.ipynb +0 -115
  54. noshot/data/ML TS XAI/ML/ML 2/6. Naive Bayes (Titanic).ipynb +0 -196
  55. noshot/data/ML TS XAI/ML/ML 2/6. Naive Bayes (Wine).ipynb +0 -98
  56. noshot/data/ML TS XAI/ML/ML 2/7. SVM Linear.ipynb +0 -109
  57. noshot/data/ML TS XAI/ML/ML 2/8. SVM Non-Linear.ipynb +0 -195
  58. noshot/data/ML TS XAI/ML/ML 2/9. FNN With Regularization.ipynb +0 -189
  59. noshot/data/ML TS XAI/ML/ML 2/9. FNN Without Regularization.ipynb +0 -197
  60. noshot/data/ML TS XAI/ML/ML 2/All in One Lab CIA 1 Q.ipynb +0 -1087
  61. noshot/data/ML TS XAI/ML/ML 3 (Latest)/1. PCA EDA.ipynb +0 -274
  62. noshot/data/ML TS XAI/ML/ML 3 (Latest)/10. CNN.ipynb +0 -170
  63. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 2.ipynb +0 -1087
  64. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 3.ipynb +0 -178
  65. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 4.ipynb +0 -185
  66. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM.ipynb +0 -106
  67. noshot/data/ML TS XAI/ML/ML 3 (Latest)/2. KNN.ipynb +0 -177
  68. noshot/data/ML TS XAI/ML/ML 3 (Latest)/3. LDA.ipynb +0 -195
  69. noshot/data/ML TS XAI/ML/ML 3 (Latest)/4. Linear Regression.ipynb +0 -267
  70. noshot/data/ML TS XAI/ML/ML 3 (Latest)/5. Logistic Regression.ipynb +0 -104
  71. noshot/data/ML TS XAI/ML/ML 3 (Latest)/6. Bayesian Classifier.ipynb +0 -109
  72. noshot/data/ML TS XAI/ML/ML 3 (Latest)/7. SVM.ipynb +0 -220
  73. noshot/data/ML TS XAI/ML/ML 3 (Latest)/8. MLP.ipynb +0 -99
  74. noshot/data/ML TS XAI/ML/ML 3 (Latest)/9. Ridge - Lasso.ipynb +0 -211
  75. noshot/data/ML TS XAI/ML/ML 3 (Latest)/9. Ridge Lasso 2.ipynb +0 -99
  76. noshot/data/ML TS XAI/ML/ML 3 (Latest)/Image Load Example.ipynb +0 -118
  77. noshot/data/ML TS XAI/ML/ML 3 (Latest)/Updated_Untitled.ipynb +0 -603
  78. noshot/data/ML TS XAI/ML/ML Lab AllinOne.ipynb +0 -961
  79. noshot/data/ML TS XAI/ML/ML Lab H Sec/1. Iris Dataset (Softmax vs Sigmoid).ipynb +0 -231
  80. noshot/data/ML TS XAI/ML/ML Lab H Sec/2. Student Dataset (Overfit vs Regularized).ipynb +0 -269
  81. noshot/data/ML TS XAI/ML/ML Lab H Sec/3. Insurance Target Categorical (Overfit vs Regularized).ipynb +0 -274
  82. noshot/data/ML TS XAI/ML/ML Lab H Sec/3. Insurance Target Numerical (Overfit vs Regularized).ipynb +0 -263
  83. noshot/data/ML TS XAI/ML/ML Lab H Sec/4. Smart House System HMM.ipynb +0 -198
  84. noshot/data/ML TS XAI/ML/ML Lab H Sec/5. Fraud Detection System HMM.ipynb +0 -201
  85. noshot/data/ML TS XAI/ML/ML Lab H Sec/insurance.csv +0 -1339
  86. noshot/data/ML TS XAI/ML/ML Lab H Sec/iris1.data +0 -151
  87. noshot/data/ML TS XAI/ML/ML Lab H Sec/student-mat.csv +0 -396
  88. noshot/data/ML TS XAI/ML/ML Lab H Sec/student-por.csv +0 -650
  89. noshot/data/ML TS XAI/ML/Rolls Royce AllinOne.ipynb +0 -691
  90. noshot-11.0.0.dist-info/RECORD +0 -72
  91. {noshot-11.0.0.dist-info → noshot-13.0.0.dist-info}/WHEEL +0 -0
  92. {noshot-11.0.0.dist-info → noshot-13.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  93. {noshot-11.0.0.dist-info → noshot-13.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,259 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "jYBOYvgJS3Gn",
11
+ "outputId": "0876b799-d18a-4968-88e8-7e6b4ce3dcf2"
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "from google.colab import drive\n",
16
+ "drive.mount('/content/drive')"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "id": "rAQLygx6XQSM",
27
+ "outputId": "22d76daf-4617-4801-beda-3a41aa19849b"
28
+ },
29
+ "outputs": [],
30
+ "source": [
31
+ "import tensorflow as tf\n",
32
+ "gpus = tf.config.list_physical_devices('GPU')\n",
33
+ "if gpus:\n",
34
+ " try:\n",
35
+ " tf.config.set_visible_devices(gpus[0], 'GPU')\n",
36
+ " tf.config.experimental.set_memory_growth(gpus[0], True)\n",
37
+ " print(\"Connected to GPU:\", gpus[0])\n",
38
+ " except RuntimeError as e:\n",
39
+ " print(e)\n",
40
+ "else:\n",
41
+ " print(\"No GPU detected\")"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {
48
+ "id": "Fg35-trEUVVO"
49
+ },
50
+ "outputs": [],
51
+ "source": [
52
+ "import tensorflow as tf\n",
53
+ "import numpy as np\n",
54
+ "import matplotlib.pyplot as plt\n",
55
+ "from tensorflow.keras.preprocessing import image_dataset_from_directory\n",
56
+ "from tensorflow.keras.applications import VGG16\n",
57
+ "from tensorflow.keras.layers import Dense, Flatten, Input\n",
58
+ "from tensorflow.keras.models import Model"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "metadata": {
65
+ "colab": {
66
+ "base_uri": "https://localhost:8080/"
67
+ },
68
+ "id": "_ktocPHlXiYf",
69
+ "outputId": "80949b79-40b2-47b5-e06c-5e3df88be80b"
70
+ },
71
+ "outputs": [],
72
+ "source": [
73
+ "train_ds=image_dataset_from_directory(\n",
74
+ " '/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset',\n",
75
+ " subset='training',\n",
76
+ " validation_split=0.2,\n",
77
+ " seed=123,\n",
78
+ " image_size=(224,224),\n",
79
+ " batch_size=32\n",
80
+ ")\n",
81
+ "\n",
82
+ "val_ds=image_dataset_from_directory(\n",
83
+ " '/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset',\n",
84
+ " subset='validation',\n",
85
+ " validation_split=0.2,\n",
86
+ " seed=123,\n",
87
+ " image_size=(224,224),\n",
88
+ " batch_size=32\n",
89
+ ")"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {
96
+ "id": "GNxlh9pKYj_e"
97
+ },
98
+ "outputs": [],
99
+ "source": [
100
+ "def preprocess(image, label):\n",
101
+ " image = tf.cast(image, tf.float32) / 255.0\n",
102
+ " bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32)\n",
103
+ "\n",
104
+ " return image, {\n",
105
+ " \"class_output\": tf.one_hot(label, depth=3),\n",
106
+ " \"bbox_output\": bbox\n",
107
+ " }\n",
108
+ "\n",
109
+ "train_ds = train_ds.map(preprocess).prefetch(tf.data.AUTOTUNE)\n",
110
+ "val_ds = val_ds.map(preprocess).prefetch(tf.data.AUTOTUNE)\n"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {
117
+ "id": "bdOD-gfYZ-v_"
118
+ },
119
+ "outputs": [],
120
+ "source": [
121
+ "base_model=VGG16(\n",
122
+ " weights='imagenet',\n",
123
+ " include_top=False,\n",
124
+ " input_tensor=Input(shape=(224,224,3))\n",
125
+ ")\n",
126
+ "\n",
127
+ "for layer in base_model.layers:\n",
128
+ " layer.trainable=False"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": null,
134
+ "metadata": {
135
+ "id": "yvLkjlSWbT_G"
136
+ },
137
+ "outputs": [],
138
+ "source": [
139
+ "x=Flatten()(base_model.output)\n",
140
+ "\n",
141
+ "class_output=Dense(3,activation='softmax',name='class_output')(x)\n",
142
+ "\n",
143
+ "bbox_output=Dense(4,activation='linear',name='bbox_output')(x)"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": null,
149
+ "metadata": {
150
+ "colab": {
151
+ "base_uri": "https://localhost:8080/",
152
+ "height": 1000
153
+ },
154
+ "id": "lt3a7yFkb6oL",
155
+ "outputId": "671b98a2-0cbd-464b-c64d-f5d26b6afb74"
156
+ },
157
+ "outputs": [],
158
+ "source": [
159
+ "model=Model(inputs=base_model.input,outputs=[class_output,bbox_output])\n",
160
+ "model.compile(\n",
161
+ " optimizer=\"adam\",\n",
162
+ " loss={\"class_output\": \"categorical_crossentropy\", \"bbox_output\": \"mse\"},\n",
163
+ " metrics={\"class_output\": \"accuracy\", \"bbox_output\": \"mse\"}\n",
164
+ ")\n",
165
+ "model.summary()"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": null,
171
+ "metadata": {
172
+ "colab": {
173
+ "base_uri": "https://localhost:8080/"
174
+ },
175
+ "id": "j62lCsGaTLbj",
176
+ "outputId": "88c8b743-1013-456a-ccbb-75a80f1ec034"
177
+ },
178
+ "outputs": [],
179
+ "source": [
180
+ "history = model.fit(\n",
181
+ " train_ds,\n",
182
+ " validation_data=val_ds,\n",
183
+ " epochs=5\n",
184
+ ")"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "metadata": {
191
+ "colab": {
192
+ "base_uri": "https://localhost:8080/",
193
+ "height": 423
194
+ },
195
+ "id": "hWUAuWX0TN_5",
196
+ "outputId": "272c9521-23a6-4e92-e623-cc1355c8df8f"
197
+ },
198
+ "outputs": [],
199
+ "source": [
200
+ "import cv2\n",
201
+ "\n",
202
+ "def show_prediction(img_path):\n",
203
+ " img = tf.keras.utils.load_img(img_path, target_size=(224, 224))\n",
204
+ " img_array = tf.keras.utils.img_to_array(img) / 255.0\n",
205
+ " img_input = np.expand_dims(img_array, axis=0)\n",
206
+ "\n",
207
+ " pred_class, pred_bbox = model.predict(img_input)\n",
208
+ "\n",
209
+ " # Get predicted class\n",
210
+ " class_idx = np.argmax(pred_class[0])\n",
211
+ " class_names = [\"class1\", \"class2\", \"class3\"]\n",
212
+ " label = class_names[class_idx]\n",
213
+ " score = np.max(pred_class[0])\n",
214
+ "\n",
215
+ " # Scale bbox back to image size\n",
216
+ " xmin, ymin, xmax, ymax = pred_bbox[0]\n",
217
+ " xmin, xmax = int(xmin*224), int(xmax*224)\n",
218
+ " ymin, ymax = int(ymin*224), int(ymax*224)\n",
219
+ "\n",
220
+ " img_disp = np.array(img_array*255, dtype=np.uint8)\n",
221
+ " img_disp = cv2.rectangle(img_disp, (xmin, ymin), (xmax, ymax), (255,0,0), 2)\n",
222
+ " cv2.putText(img_disp, f\"{label} ({score:.2f})\", (xmin, ymin-10),\n",
223
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,0), 2)\n",
224
+ "\n",
225
+ " plt.imshow(img_disp.astype(\"uint8\"))\n",
226
+ " plt.axis(\"off\")\n",
227
+ " plt.show()\n",
228
+ "\n",
229
+ "show_prediction(\"/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset/angular_leaf_spot/angular_leaf_spot_06.jpg\")\n"
230
+ ]
231
+ }
232
+ ],
233
+ "metadata": {
234
+ "accelerator": "GPU",
235
+ "colab": {
236
+ "gpuType": "T4",
237
+ "provenance": []
238
+ },
239
+ "kernelspec": {
240
+ "display_name": "Python 3 (ipykernel)",
241
+ "language": "python",
242
+ "name": "python3"
243
+ },
244
+ "language_info": {
245
+ "codemirror_mode": {
246
+ "name": "ipython",
247
+ "version": 3
248
+ },
249
+ "file_extension": ".py",
250
+ "mimetype": "text/x-python",
251
+ "name": "python",
252
+ "nbconvert_exporter": "python",
253
+ "pygments_lexer": "ipython3",
254
+ "version": "3.12.4"
255
+ }
256
+ },
257
+ "nbformat": 4,
258
+ "nbformat_minor": 4
259
+ }
@@ -0,0 +1,274 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "0a833f48-b878-49c9-855b-897fe220d717",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import numpy"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "e367e276-98af-4f80-9477-d0b94bfaaeb2",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import tensorflow as tf"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "af5517b9-0250-4268-92cb-a51f5d18415d",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
31
+ "\n",
32
+ "# Paths to images and masks directories\n",
33
+ "image_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Images\"\n",
34
+ "mask_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Mask\""
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": null,
40
+ "id": "782cd1fe-749c-4640-92de-e50baa2fe905",
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "image_datagen = ImageDataGenerator(rescale=1./255)\n",
45
+ "mask_datagen = ImageDataGenerator(rescale=1./255)\n",
46
+ "\n",
47
+ "image_generator = image_datagen.flow_from_directory(\n",
48
+ " image_dir,\n",
49
+ " class_mode=None,\n",
50
+ " color_mode='rgb',\n",
51
+ " target_size=(128, 128),\n",
52
+ " batch_size=32,\n",
53
+ " seed=42\n",
54
+ ")\n",
55
+ "\n",
56
+ "mask_generator = mask_datagen.flow_from_directory(\n",
57
+ " mask_dir,\n",
58
+ " class_mode=None,\n",
59
+ " color_mode='grayscale',\n",
60
+ " target_size=(128, 128),\n",
61
+ " batch_size=32,\n",
62
+ " seed=42\n",
63
+ ")"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": null,
69
+ "id": "bd887bd8-9595-411c-976b-495983003c08",
70
+ "metadata": {},
71
+ "outputs": [],
72
+ "source": [
73
+ "train_generator = zip(image_generator, mask_generator)\n",
74
+ "\n",
75
+ "\n",
76
+ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input\n",
77
+ "from tensorflow.keras.models import Model"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "id": "2df2b32f-10ae-4f29-9996-7f8bbe20d6a8",
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "def build_fcnn():\n",
88
+ " inputs = Input((128, 128, 3))\n",
89
+ "\n",
90
+ " # Encoder\n",
91
+ " conv1 = Conv2D(128, (3, 3), activation='relu', padding='same')(inputs)\n",
92
+ " pool1 = MaxPooling2D((2, 2))(conv1)\n",
93
+ "\n",
94
+ " conv2 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool1)\n",
95
+ " pool2 = MaxPooling2D((2, 2))(conv2)\n",
96
+ "\n",
97
+ " # Decoder\n",
98
+ " conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)\n",
99
+ " up1 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv3)\n",
100
+ "\n",
101
+ " conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)\n",
102
+ " up2 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv4)\n",
103
+ "\n",
104
+ " outputs = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(up2)\n",
105
+ "\n",
106
+ " model = Model(inputs, outputs)\n",
107
+ " return model"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": null,
113
+ "id": "68115a30-7a5f-4b1a-96eb-020b3a96a25a",
114
+ "metadata": {},
115
+ "outputs": [],
116
+ "source": [
117
+ "model = build_fcnn()\n",
118
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
119
+ "model.summary()\n",
120
+ "\n",
121
+ "# Train the FCNN model\n",
122
+ "def combined_generator(image_gen, mask_gen):\n",
123
+ " while True: # Keep yielding data indefinitely\n",
124
+ " img_batch = next(image_gen)\n",
125
+ " mask_batch = next(mask_gen)\n",
126
+ " yield img_batch, mask_batch # Keras expects (input, target)\n",
127
+ "# Fit the model with the custom generator\n",
128
+ "train_generator = combined_generator(image_generator, mask_generator)\n",
129
+ "model.fit(train_generator, steps_per_epoch=len(image_generator), epochs=50)\n",
130
+ "\n",
131
+ "import matplotlib.pyplot as plt\n",
132
+ "import numpy as np\n",
133
+ "\n",
134
+ "# Sample image for prediction\n",
135
+ "sample_image = image_generator[0][0]\n",
136
+ "predicted_mask = model.predict(np.expand_dims(sample_image, axis=0))[0]\n",
137
+ "\n",
138
+ "# Display the original image and predicted mask\n",
139
+ "plt.figure(figsize=(10, 5))\n",
140
+ "\n",
141
+ "plt.subplot(1, 2, 1)\n",
142
+ "plt.title(\"Original Image\")\n",
143
+ "plt.imshow(sample_image)\n",
144
+ "\n",
145
+ "plt.subplot(1, 2, 2)\n",
146
+ "plt.title(\"Predicted Mask\")\n",
147
+ "plt.imshow(predicted_mask.squeeze(), cmap='gray')\n",
148
+ "\n",
149
+ "plt.show()"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": null,
155
+ "id": "31aee94d-c851-4c10-ac38-30ba29aa71a3",
156
+ "metadata": {},
157
+ "outputs": [],
158
+ "source": [
159
+ "import tensorflow as tf\n",
160
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
161
+ "\n",
162
+ "# Paths to images and masks directories\n",
163
+ "image_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Images/Images\"\n",
164
+ "mask_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Mask/Mask\"\n",
165
+ "\n",
166
+ "# Image and mask data generators\n",
167
+ "image_datagen = ImageDataGenerator(rescale=1./255)\n",
168
+ "mask_datagen = ImageDataGenerator(rescale=1./255)\n",
169
+ "\n",
170
+ "image_generator = image_datagen.flow_from_directory(\n",
171
+ " image_dir,\n",
172
+ " class_mode=None,\n",
173
+ " color_mode='rgb',\n",
174
+ " target_size=(128, 128),\n",
175
+ " batch_size=32,\n",
176
+ " seed=42\n",
177
+ ")\n",
178
+ "\n",
179
+ "mask_generator = mask_datagen.flow_from_directory(\n",
180
+ " mask_dir,\n",
181
+ " class_mode=None,\n",
182
+ " color_mode='grayscale',\n",
183
+ " target_size=(128, 128),\n",
184
+ " batch_size=32,\n",
185
+ " seed=42\n",
186
+ ")\n",
187
+ "\n",
188
+ "# Combine generators into one which yields image and mask\n",
189
+ "train_generator = zip(image_generator, mask_generator)\n",
190
+ "\n",
191
+ "\n",
192
+ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input\n",
193
+ "from tensorflow.keras.models import Model\n",
194
+ "\n",
195
+ "def build_fcnn():\n",
196
+ " inputs = Input((128, 128, 3))\n",
197
+ "\n",
198
+ " # Encoder\n",
199
+ " conv1 = Conv2D(128, (3, 3), activation='relu', padding='same')(inputs)\n",
200
+ " pool1 = MaxPooling2D((2, 2))(conv1)\n",
201
+ "\n",
202
+ " conv2 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool1)\n",
203
+ " pool2 = MaxPooling2D((2, 2))(conv2)\n",
204
+ "\n",
205
+ " # Decoder\n",
206
+ " conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)\n",
207
+ " up1 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv3)\n",
208
+ "\n",
209
+ " conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)\n",
210
+ " up2 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv4)\n",
211
+ "\n",
212
+ " outputs = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(up2)\n",
213
+ "\n",
214
+ " model = Model(inputs, outputs)\n",
215
+ " return model\n",
216
+ "\n",
217
+ "model = build_fcnn()\n",
218
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
219
+ "model.summary()\n",
220
+ "\n",
221
+ "# Train the FCNN model\n",
222
+ "def combined_generator(image_gen, mask_gen):\n",
223
+ " while True: # Keep yielding data indefinitely\n",
224
+ " img_batch = next(image_gen)\n",
225
+ " mask_batch = next(mask_gen)\n",
226
+ " yield img_batch, mask_batch # Keras expects (input, target)\n",
227
+ "# Fit the model with the custom generator\n",
228
+ "train_generator = combined_generator(image_generator, mask_generator)\n",
229
+ "model.fit(train_generator, steps_per_epoch=len(image_generator), epochs=50)\n",
230
+ "\n",
231
+ "import matplotlib.pyplot as plt\n",
232
+ "import numpy as np\n",
233
+ "\n",
234
+ "# Sample image for prediction\n",
235
+ "sample_image = image_generator[0][0]\n",
236
+ "predicted_mask = model.predict(np.expand_dims(sample_image, axis=0))[0]\n",
237
+ "\n",
238
+ "# Display the original image and predicted mask\n",
239
+ "plt.figure(figsize=(10, 5))\n",
240
+ "\n",
241
+ "plt.subplot(1, 2, 1)\n",
242
+ "plt.title(\"Original Image\")\n",
243
+ "plt.imshow(sample_image)\n",
244
+ "\n",
245
+ "plt.subplot(1, 2, 2)\n",
246
+ "plt.title(\"Predicted Mask\")\n",
247
+ "plt.imshow(predicted_mask.squeeze(), cmap='gray')\n",
248
+ "\n",
249
+ "plt.show()"
250
+ ]
251
+ }
252
+ ],
253
+ "metadata": {
254
+ "kernelspec": {
255
+ "display_name": "Python 3 (ipykernel)",
256
+ "language": "python",
257
+ "name": "python3"
258
+ },
259
+ "language_info": {
260
+ "codemirror_mode": {
261
+ "name": "ipython",
262
+ "version": 3
263
+ },
264
+ "file_extension": ".py",
265
+ "mimetype": "text/x-python",
266
+ "name": "python",
267
+ "nbconvert_exporter": "python",
268
+ "pygments_lexer": "ipython3",
269
+ "version": "3.12.4"
270
+ }
271
+ },
272
+ "nbformat": 4,
273
+ "nbformat_minor": 5
274
+ }
@@ -0,0 +1,164 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "ab870464-a374-4292-9b59-0fe123e478df",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "import numpy as np\n",
12
+ "from PIL import Image\n",
13
+ "from sklearn.model_selection import train_test_split\n",
14
+ "from sklearn.metrics import classification_report, confusion_matrix\n",
15
+ "import torch\n",
16
+ "import torch.nn as nn\n",
17
+ "import torch.optim as optim\n",
18
+ "from torch.utils.data import Dataset, DataLoader\n",
19
+ "import torchvision.transforms as transforms\n",
20
+ "import matplotlib.pyplot as plt\n",
21
+ "\n",
22
+ "dataset_path = \"<filepath>\"\n",
23
+ "IMG_SIZE = 128\n",
24
+ "BATCH_SIZE = 32\n",
25
+ "EPOCHS = 10\n",
26
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
27
+ "\n",
28
+ "class PetDataset(Dataset):\n",
29
+ " def __init__(self, images, labels, transform=None):\n",
30
+ " self.images = images\n",
31
+ " self.labels = labels\n",
32
+ " self.transform = transform\n",
33
+ " def __len__(self):\n",
34
+ " return len(self.images)\n",
35
+ " def __getitem__(self, idx):\n",
36
+ " img = self.images[idx]\n",
37
+ " if self.transform:\n",
38
+ " img = self.transform(img)\n",
39
+ " label = self.labels[idx]\n",
40
+ " return img, label\n",
41
+ "\n",
42
+ "def load_images(folder_path, image_size=IMG_SIZE):\n",
43
+ " images, labels, class_names = [], [], sorted(os.listdir(folder_path))\n",
44
+ " for label, class_name in enumerate(class_names):\n",
45
+ " class_dir = os.path.join(folder_path, class_name)\n",
46
+ " for file in os.listdir(class_dir):\n",
47
+ " img = Image.open(os.path.join(class_dir, file)).convert('RGB').resize((image_size,image_size))\n",
48
+ " images.append(np.array(img)/255.0)\n",
49
+ " labels.append(label)\n",
50
+ " return np.array(images, dtype=np.float32), np.array(labels), class_names\n",
51
+ "\n",
52
+ "X, y, class_names = load_images(dataset_path)\n",
53
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
54
+ "transform = transforms.Compose([transforms.ToTensor()])\n",
55
+ "train_dataset = PetDataset(X_train, y_train, transform=transform)\n",
56
+ "test_dataset = PetDataset(X_test, y_test, transform=transform)\n",
57
+ "train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n",
58
+ "test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)\n",
59
+ "\n",
60
+ "class SimpleCNN(nn.Module):\n",
61
+ " def __init__(self):\n",
62
+ " super().__init__()\n",
63
+ " self.conv = nn.Sequential(\n",
64
+ " nn.Conv2d(3,32,3,padding=1), nn.ReLU(), nn.MaxPool2d(2),\n",
65
+ " nn.Conv2d(32,64,3,padding=1), nn.ReLU(), nn.MaxPool2d(2)\n",
66
+ " )\n",
67
+ " self.fc = nn.Sequential(\n",
68
+ " nn.Flatten(),\n",
69
+ " nn.Linear(64*32*32,128), nn.ReLU(),\n",
70
+ " nn.Linear(128,len(class_names))\n",
71
+ " )\n",
72
+ " def forward(self,x):\n",
73
+ " return self.fc(self.conv(x))\n",
74
+ "\n",
75
+ "model = SimpleCNN().to(device)\n",
76
+ "criterion = nn.CrossEntropyLoss()\n",
77
+ "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
78
+ "\n",
79
+ "train_losses, val_losses, train_acc, val_acc = [], [], [], []\n",
80
+ "\n",
81
+ "for epoch in range(EPOCHS):\n",
82
+ " model.train()\n",
83
+ " running_loss, correct, total = 0,0,0\n",
84
+ " for imgs, labels in train_loader:\n",
85
+ " imgs, labels = imgs.to(device), labels.to(device)\n",
86
+ " optimizer.zero_grad()\n",
87
+ " outputs = model(imgs)\n",
88
+ " loss = criterion(outputs, labels)\n",
89
+ " loss.backward()\n",
90
+ " optimizer.step()\n",
91
+ " running_loss += loss.item()\n",
92
+ " _, preds = torch.max(outputs,1)\n",
93
+ " correct += (preds==labels).sum().item()\n",
94
+ " total += labels.size(0)\n",
95
+ " train_losses.append(running_loss/len(train_loader))\n",
96
+ " train_acc.append(correct/total)\n",
97
+ "\n",
98
+ " model.eval()\n",
99
+ " val_loss, correct, total = 0,0,0\n",
100
+ " with torch.no_grad():\n",
101
+ " for imgs, labels in test_loader:\n",
102
+ " imgs, labels = imgs.to(device), labels.to(device)\n",
103
+ " outputs = model(imgs)\n",
104
+ " loss = criterion(outputs, labels)\n",
105
+ " val_loss += loss.item()\n",
106
+ " _, preds = torch.max(outputs,1)\n",
107
+ " correct += (preds==labels).sum().item()\n",
108
+ " total += labels.size(0)\n",
109
+ " val_losses.append(val_loss/len(test_loader))\n",
110
+ " val_acc.append(correct/total)\n",
111
+ " print(f\"Epoch {epoch+1}/{EPOCHS} - Train Acc: {train_acc[-1]:.3f}, Val Acc: {val_acc[-1]:.3f}\")\n",
112
+ "\n",
113
+ "y_true, y_pred = [], []\n",
114
+ "model.eval()\n",
115
+ "with torch.no_grad():\n",
116
+ " for imgs, labels in test_loader:\n",
117
+ " imgs, labels = imgs.to(device), labels.to(device)\n",
118
+ " outputs = model(imgs)\n",
119
+ " preds = torch.argmax(outputs,1)\n",
120
+ " y_true.extend(labels.cpu().numpy())\n",
121
+ " y_pred.extend(preds.cpu().numpy())\n",
122
+ "\n",
123
+ "print(\"Classification Report:\")\n",
124
+ "print(classification_report(y_true, y_pred, target_names=class_names))\n",
125
+ "print(\"Confusion Matrix:\")\n",
126
+ "print(confusion_matrix(y_true, y_pred))\n",
127
+ "\n",
128
+ "plt.figure(figsize=(12,4))\n",
129
+ "plt.subplot(1,2,1)\n",
130
+ "plt.plot(train_acc,label='Train Accuracy')\n",
131
+ "plt.plot(val_acc,label='Val Accuracy')\n",
132
+ "plt.title('Accuracy')\n",
133
+ "plt.legend()\n",
134
+ "plt.subplot(1,2,2)\n",
135
+ "plt.plot(train_losses,label='Train Loss')\n",
136
+ "plt.plot(val_losses,label='Val Loss')\n",
137
+ "plt.title('Loss')\n",
138
+ "plt.legend()\n",
139
+ "plt.show()"
140
+ ]
141
+ }
142
+ ],
143
+ "metadata": {
144
+ "kernelspec": {
145
+ "display_name": "Python 3 (ipykernel)",
146
+ "language": "python",
147
+ "name": "python3"
148
+ },
149
+ "language_info": {
150
+ "codemirror_mode": {
151
+ "name": "ipython",
152
+ "version": 3
153
+ },
154
+ "file_extension": ".py",
155
+ "mimetype": "text/x-python",
156
+ "name": "python",
157
+ "nbconvert_exporter": "python",
158
+ "pygments_lexer": "ipython3",
159
+ "version": "3.12.4"
160
+ }
161
+ },
162
+ "nbformat": 4,
163
+ "nbformat_minor": 5
164
+ }