noshot 11.0.0__py3-none-any.whl → 12.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. noshot/data/DLE FSD BDA/DLE/1. DNN (Image Classification).ipynb +389 -0
  2. noshot/data/DLE FSD BDA/DLE/2. DNN vs CNN.ipynb +516 -0
  3. noshot/data/DLE FSD BDA/DLE/3. CNN (Object Detecrion).ipynb +259 -0
  4. noshot/data/DLE FSD BDA/DLE/4. FCN (Image Segmentaion).ipynb +274 -0
  5. noshot/main.py +3 -3
  6. {noshot-11.0.0.dist-info → noshot-12.0.0.dist-info}/METADATA +1 -1
  7. noshot-12.0.0.dist-info/RECORD +13 -0
  8. noshot/data/ML TS XAI/ML/CNN(Image_for_Folders_5).ipynb +0 -201
  9. noshot/data/ML TS XAI/ML/CNN(Image_form_Folder_2).ipynb +0 -201
  10. noshot/data/ML TS XAI/ML/Json Codes/ML LAB CIA 2.ipynb +0 -409
  11. noshot/data/ML TS XAI/ML/ML 1/1. EDA-PCA (Balance Scale Dataset).ipynb +0 -147
  12. noshot/data/ML TS XAI/ML/ML 1/1. EDA-PCA (Rice Dataset).ipynb +0 -181
  13. noshot/data/ML TS XAI/ML/ML 1/10. HMM Veterbi.ipynb +0 -152
  14. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Balance Scale Dataset).ipynb +0 -117
  15. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Iris Dataset).ipynb +0 -156
  16. noshot/data/ML TS XAI/ML/ML 1/2. KNN (Sobar-72 Dataset).ipynb +0 -215
  17. noshot/data/ML TS XAI/ML/ML 1/3. LDA (Balance Scale Dataset).ipynb +0 -78
  18. noshot/data/ML TS XAI/ML/ML 1/3. LDA (NPHA Doctor Visits Dataset).ipynb +0 -114
  19. noshot/data/ML TS XAI/ML/ML 1/4. Linear Regression (Machine Dataset).ipynb +0 -115
  20. noshot/data/ML TS XAI/ML/ML 1/4. Linear Regression (Real Estate Dataset).ipynb +0 -146
  21. noshot/data/ML TS XAI/ML/ML 1/5. Logistic Regression (Magic04 Dataset).ipynb +0 -130
  22. noshot/data/ML TS XAI/ML/ML 1/5. Logistic Regression (Wine Dataset).ipynb +0 -112
  23. noshot/data/ML TS XAI/ML/ML 1/6. Naive Bayes Classifier (Agaricus Lepiota Dataset).ipynb +0 -118
  24. noshot/data/ML TS XAI/ML/ML 1/6. Naive Bayes Classifier (Wine Dataset).ipynb +0 -89
  25. noshot/data/ML TS XAI/ML/ML 1/7. SVM (Rice Dataset).ipynb +0 -120
  26. noshot/data/ML TS XAI/ML/ML 1/8. FeedForward NN (Sobar72 Dataset).ipynb +0 -262
  27. noshot/data/ML TS XAI/ML/ML 1/9. CNN (Cifar10 Dataset).ipynb +0 -156
  28. noshot/data/ML TS XAI/ML/ML 2/1. PCA.ipynb +0 -162
  29. noshot/data/ML TS XAI/ML/ML 2/10. CNN.ipynb +0 -100
  30. noshot/data/ML TS XAI/ML/ML 2/11. HMM.ipynb +0 -336
  31. noshot/data/ML TS XAI/ML/ML 2/2. KNN.ipynb +0 -149
  32. noshot/data/ML TS XAI/ML/ML 2/3. LDA.ipynb +0 -132
  33. noshot/data/ML TS XAI/ML/ML 2/4. Linear Regression.ipynb +0 -86
  34. noshot/data/ML TS XAI/ML/ML 2/5. Logistic Regression.ipynb +0 -115
  35. noshot/data/ML TS XAI/ML/ML 2/6. Naive Bayes (Titanic).ipynb +0 -196
  36. noshot/data/ML TS XAI/ML/ML 2/6. Naive Bayes (Wine).ipynb +0 -98
  37. noshot/data/ML TS XAI/ML/ML 2/7. SVM Linear.ipynb +0 -109
  38. noshot/data/ML TS XAI/ML/ML 2/8. SVM Non-Linear.ipynb +0 -195
  39. noshot/data/ML TS XAI/ML/ML 2/9. FNN With Regularization.ipynb +0 -189
  40. noshot/data/ML TS XAI/ML/ML 2/9. FNN Without Regularization.ipynb +0 -197
  41. noshot/data/ML TS XAI/ML/ML 2/All in One Lab CIA 1 Q.ipynb +0 -1087
  42. noshot/data/ML TS XAI/ML/ML 3 (Latest)/1. PCA EDA.ipynb +0 -274
  43. noshot/data/ML TS XAI/ML/ML 3 (Latest)/10. CNN.ipynb +0 -170
  44. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 2.ipynb +0 -1087
  45. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 3.ipynb +0 -178
  46. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM 4.ipynb +0 -185
  47. noshot/data/ML TS XAI/ML/ML 3 (Latest)/11. HMM.ipynb +0 -106
  48. noshot/data/ML TS XAI/ML/ML 3 (Latest)/2. KNN.ipynb +0 -177
  49. noshot/data/ML TS XAI/ML/ML 3 (Latest)/3. LDA.ipynb +0 -195
  50. noshot/data/ML TS XAI/ML/ML 3 (Latest)/4. Linear Regression.ipynb +0 -267
  51. noshot/data/ML TS XAI/ML/ML 3 (Latest)/5. Logistic Regression.ipynb +0 -104
  52. noshot/data/ML TS XAI/ML/ML 3 (Latest)/6. Bayesian Classifier.ipynb +0 -109
  53. noshot/data/ML TS XAI/ML/ML 3 (Latest)/7. SVM.ipynb +0 -220
  54. noshot/data/ML TS XAI/ML/ML 3 (Latest)/8. MLP.ipynb +0 -99
  55. noshot/data/ML TS XAI/ML/ML 3 (Latest)/9. Ridge - Lasso.ipynb +0 -211
  56. noshot/data/ML TS XAI/ML/ML 3 (Latest)/9. Ridge Lasso 2.ipynb +0 -99
  57. noshot/data/ML TS XAI/ML/ML 3 (Latest)/Image Load Example.ipynb +0 -118
  58. noshot/data/ML TS XAI/ML/ML 3 (Latest)/Updated_Untitled.ipynb +0 -603
  59. noshot/data/ML TS XAI/ML/ML Lab AllinOne.ipynb +0 -961
  60. noshot/data/ML TS XAI/ML/ML Lab H Sec/1. Iris Dataset (Softmax vs Sigmoid).ipynb +0 -231
  61. noshot/data/ML TS XAI/ML/ML Lab H Sec/2. Student Dataset (Overfit vs Regularized).ipynb +0 -269
  62. noshot/data/ML TS XAI/ML/ML Lab H Sec/3. Insurance Target Categorical (Overfit vs Regularized).ipynb +0 -274
  63. noshot/data/ML TS XAI/ML/ML Lab H Sec/3. Insurance Target Numerical (Overfit vs Regularized).ipynb +0 -263
  64. noshot/data/ML TS XAI/ML/ML Lab H Sec/4. Smart House System HMM.ipynb +0 -198
  65. noshot/data/ML TS XAI/ML/ML Lab H Sec/5. Fraud Detection System HMM.ipynb +0 -201
  66. noshot/data/ML TS XAI/ML/ML Lab H Sec/insurance.csv +0 -1339
  67. noshot/data/ML TS XAI/ML/ML Lab H Sec/iris1.data +0 -151
  68. noshot/data/ML TS XAI/ML/ML Lab H Sec/student-mat.csv +0 -396
  69. noshot/data/ML TS XAI/ML/ML Lab H Sec/student-por.csv +0 -650
  70. noshot/data/ML TS XAI/ML/Rolls Royce AllinOne.ipynb +0 -691
  71. noshot-11.0.0.dist-info/RECORD +0 -72
  72. {noshot-11.0.0.dist-info → noshot-12.0.0.dist-info}/WHEEL +0 -0
  73. {noshot-11.0.0.dist-info → noshot-12.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  74. {noshot-11.0.0.dist-info → noshot-12.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,259 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "jYBOYvgJS3Gn",
11
+ "outputId": "0876b799-d18a-4968-88e8-7e6b4ce3dcf2"
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "from google.colab import drive\n",
16
+ "drive.mount('/content/drive')"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "id": "rAQLygx6XQSM",
27
+ "outputId": "22d76daf-4617-4801-beda-3a41aa19849b"
28
+ },
29
+ "outputs": [],
30
+ "source": [
31
+ "import tensorflow as tf\n",
32
+ "gpus = tf.config.list_physical_devices('GPU')\n",
33
+ "if gpus:\n",
34
+ " try:\n",
35
+ " tf.config.set_visible_devices(gpus[0], 'GPU')\n",
36
+ " tf.config.experimental.set_memory_growth(gpus[0], True)\n",
37
+ " print(\"Connected to GPU:\", gpus[0])\n",
38
+ " except RuntimeError as e:\n",
39
+ " print(e)\n",
40
+ "else:\n",
41
+ " print(\"No GPU detected\")"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {
48
+ "id": "Fg35-trEUVVO"
49
+ },
50
+ "outputs": [],
51
+ "source": [
52
+ "import tensorflow as tf\n",
53
+ "import numpy as np\n",
54
+ "import matplotlib.pyplot as plt\n",
55
+ "from tensorflow.keras.preprocessing import image_dataset_from_directory\n",
56
+ "from tensorflow.keras.applications import VGG16\n",
57
+ "from tensorflow.keras.layers import Dense, Flatten, Input\n",
58
+ "from tensorflow.keras.models import Model"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "metadata": {
65
+ "colab": {
66
+ "base_uri": "https://localhost:8080/"
67
+ },
68
+ "id": "_ktocPHlXiYf",
69
+ "outputId": "80949b79-40b2-47b5-e06c-5e3df88be80b"
70
+ },
71
+ "outputs": [],
72
+ "source": [
73
+ "train_ds=image_dataset_from_directory(\n",
74
+ " '/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset',\n",
75
+ " subset='training',\n",
76
+ " validation_split=0.2,\n",
77
+ " seed=123,\n",
78
+ " image_size=(224,224),\n",
79
+ " batch_size=32\n",
80
+ ")\n",
81
+ "\n",
82
+ "val_ds=image_dataset_from_directory(\n",
83
+ " '/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset',\n",
84
+ " subset='validation',\n",
85
+ " validation_split=0.2,\n",
86
+ " seed=123,\n",
87
+ " image_size=(224,224),\n",
88
+ " batch_size=32\n",
89
+ ")"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {
96
+ "id": "GNxlh9pKYj_e"
97
+ },
98
+ "outputs": [],
99
+ "source": [
100
+ "def preprocess(image, label):\n",
101
+ " image = tf.cast(image, tf.float32) / 255.0\n",
102
+ " bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32)\n",
103
+ "\n",
104
+ " return image, {\n",
105
+ " \"class_output\": tf.one_hot(label, depth=3),\n",
106
+ " \"bbox_output\": bbox\n",
107
+ " }\n",
108
+ "\n",
109
+ "train_ds = train_ds.map(preprocess).prefetch(tf.data.AUTOTUNE)\n",
110
+ "val_ds = val_ds.map(preprocess).prefetch(tf.data.AUTOTUNE)\n"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {
117
+ "id": "bdOD-gfYZ-v_"
118
+ },
119
+ "outputs": [],
120
+ "source": [
121
+ "base_model=VGG16(\n",
122
+ " weights='imagenet',\n",
123
+ " include_top=False,\n",
124
+ " input_tensor=Input(shape=(224,224,3))\n",
125
+ ")\n",
126
+ "\n",
127
+ "for layer in base_model.layers:\n",
128
+ " layer.trainable=False"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": null,
134
+ "metadata": {
135
+ "id": "yvLkjlSWbT_G"
136
+ },
137
+ "outputs": [],
138
+ "source": [
139
+ "x=Flatten()(base_model.output)\n",
140
+ "\n",
141
+ "class_output=Dense(3,activation='softmax',name='class_output')(x)\n",
142
+ "\n",
143
+ "bbox_output=Dense(4,activation='linear',name='bbox_output')(x)"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": null,
149
+ "metadata": {
150
+ "colab": {
151
+ "base_uri": "https://localhost:8080/",
152
+ "height": 1000
153
+ },
154
+ "id": "lt3a7yFkb6oL",
155
+ "outputId": "671b98a2-0cbd-464b-c64d-f5d26b6afb74"
156
+ },
157
+ "outputs": [],
158
+ "source": [
159
+ "model=Model(inputs=base_model.input,outputs=[class_output,bbox_output])\n",
160
+ "model.compile(\n",
161
+ " optimizer=\"adam\",\n",
162
+ " loss={\"class_output\": \"categorical_crossentropy\", \"bbox_output\": \"mse\"},\n",
163
+ " metrics={\"class_output\": \"accuracy\", \"bbox_output\": \"mse\"}\n",
164
+ ")\n",
165
+ "model.summary()"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": null,
171
+ "metadata": {
172
+ "colab": {
173
+ "base_uri": "https://localhost:8080/"
174
+ },
175
+ "id": "j62lCsGaTLbj",
176
+ "outputId": "88c8b743-1013-456a-ccbb-75a80f1ec034"
177
+ },
178
+ "outputs": [],
179
+ "source": [
180
+ "history = model.fit(\n",
181
+ " train_ds,\n",
182
+ " validation_data=val_ds,\n",
183
+ " epochs=5\n",
184
+ ")"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "metadata": {
191
+ "colab": {
192
+ "base_uri": "https://localhost:8080/",
193
+ "height": 423
194
+ },
195
+ "id": "hWUAuWX0TN_5",
196
+ "outputId": "272c9521-23a6-4e92-e623-cc1355c8df8f"
197
+ },
198
+ "outputs": [],
199
+ "source": [
200
+ "import cv2\n",
201
+ "\n",
202
+ "def show_prediction(img_path):\n",
203
+ " img = tf.keras.utils.load_img(img_path, target_size=(224, 224))\n",
204
+ " img_array = tf.keras.utils.img_to_array(img) / 255.0\n",
205
+ " img_input = np.expand_dims(img_array, axis=0)\n",
206
+ "\n",
207
+ " pred_class, pred_bbox = model.predict(img_input)\n",
208
+ "\n",
209
+ " # Get predicted class\n",
210
+ " class_idx = np.argmax(pred_class[0])\n",
211
+ " class_names = [\"class1\", \"class2\", \"class3\"]\n",
212
+ " label = class_names[class_idx]\n",
213
+ " score = np.max(pred_class[0])\n",
214
+ "\n",
215
+ " # Scale bbox back to image size\n",
216
+ " xmin, ymin, xmax, ymax = pred_bbox[0]\n",
217
+ " xmin, xmax = int(xmin*224), int(xmax*224)\n",
218
+ " ymin, ymax = int(ymin*224), int(ymax*224)\n",
219
+ "\n",
220
+ " img_disp = np.array(img_array*255, dtype=np.uint8)\n",
221
+ " img_disp = cv2.rectangle(img_disp, (xmin, ymin), (xmax, ymax), (255,0,0), 2)\n",
222
+ " cv2.putText(img_disp, f\"{label} ({score:.2f})\", (xmin, ymin-10),\n",
223
+ " cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,0), 2)\n",
224
+ "\n",
225
+ " plt.imshow(img_disp.astype(\"uint8\"))\n",
226
+ " plt.axis(\"off\")\n",
227
+ " plt.show()\n",
228
+ "\n",
229
+ "show_prediction(\"/content/drive/MyDrive/sem 7/Lab/DL_Lab/Bean_Dataset/angular_leaf_spot/angular_leaf_spot_06.jpg\")\n"
230
+ ]
231
+ }
232
+ ],
233
+ "metadata": {
234
+ "accelerator": "GPU",
235
+ "colab": {
236
+ "gpuType": "T4",
237
+ "provenance": []
238
+ },
239
+ "kernelspec": {
240
+ "display_name": "Python 3 (ipykernel)",
241
+ "language": "python",
242
+ "name": "python3"
243
+ },
244
+ "language_info": {
245
+ "codemirror_mode": {
246
+ "name": "ipython",
247
+ "version": 3
248
+ },
249
+ "file_extension": ".py",
250
+ "mimetype": "text/x-python",
251
+ "name": "python",
252
+ "nbconvert_exporter": "python",
253
+ "pygments_lexer": "ipython3",
254
+ "version": "3.12.4"
255
+ }
256
+ },
257
+ "nbformat": 4,
258
+ "nbformat_minor": 4
259
+ }
@@ -0,0 +1,274 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "0a833f48-b878-49c9-855b-897fe220d717",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import numpy"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "e367e276-98af-4f80-9477-d0b94bfaaeb2",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import tensorflow as tf"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "af5517b9-0250-4268-92cb-a51f5d18415d",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
31
+ "\n",
32
+ "# Paths to images and masks directories\n",
33
+ "image_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Images\"\n",
34
+ "mask_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Mask\""
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": null,
40
+ "id": "782cd1fe-749c-4640-92de-e50baa2fe905",
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "image_datagen = ImageDataGenerator(rescale=1./255)\n",
45
+ "mask_datagen = ImageDataGenerator(rescale=1./255)\n",
46
+ "\n",
47
+ "image_generator = image_datagen.flow_from_directory(\n",
48
+ " image_dir,\n",
49
+ " class_mode=None,\n",
50
+ " color_mode='rgb',\n",
51
+ " target_size=(128, 128),\n",
52
+ " batch_size=32,\n",
53
+ " seed=42\n",
54
+ ")\n",
55
+ "\n",
56
+ "mask_generator = mask_datagen.flow_from_directory(\n",
57
+ " mask_dir,\n",
58
+ " class_mode=None,\n",
59
+ " color_mode='grayscale',\n",
60
+ " target_size=(128, 128),\n",
61
+ " batch_size=32,\n",
62
+ " seed=42\n",
63
+ ")"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": null,
69
+ "id": "bd887bd8-9595-411c-976b-495983003c08",
70
+ "metadata": {},
71
+ "outputs": [],
72
+ "source": [
73
+ "train_generator = zip(image_generator, mask_generator)\n",
74
+ "\n",
75
+ "\n",
76
+ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input\n",
77
+ "from tensorflow.keras.models import Model"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "id": "2df2b32f-10ae-4f29-9996-7f8bbe20d6a8",
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "def build_fcnn():\n",
88
+ " inputs = Input((128, 128, 3))\n",
89
+ "\n",
90
+ " # Encoder\n",
91
+ " conv1 = Conv2D(128, (3, 3), activation='relu', padding='same')(inputs)\n",
92
+ " pool1 = MaxPooling2D((2, 2))(conv1)\n",
93
+ "\n",
94
+ " conv2 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool1)\n",
95
+ " pool2 = MaxPooling2D((2, 2))(conv2)\n",
96
+ "\n",
97
+ " # Decoder\n",
98
+ " conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)\n",
99
+ " up1 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv3)\n",
100
+ "\n",
101
+ " conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)\n",
102
+ " up2 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv4)\n",
103
+ "\n",
104
+ " outputs = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(up2)\n",
105
+ "\n",
106
+ " model = Model(inputs, outputs)\n",
107
+ " return model"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": null,
113
+ "id": "68115a30-7a5f-4b1a-96eb-020b3a96a25a",
114
+ "metadata": {},
115
+ "outputs": [],
116
+ "source": [
117
+ "model = build_fcnn()\n",
118
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
119
+ "model.summary()\n",
120
+ "\n",
121
+ "# Train the FCNN model\n",
122
+ "def combined_generator(image_gen, mask_gen):\n",
123
+ " while True: # Keep yielding data indefinitely\n",
124
+ " img_batch = next(image_gen)\n",
125
+ " mask_batch = next(mask_gen)\n",
126
+ " yield img_batch, mask_batch # Keras expects (input, target)\n",
127
+ "# Fit the model with the custom generator\n",
128
+ "train_generator = combined_generator(image_generator, mask_generator)\n",
129
+ "model.fit(train_generator, steps_per_epoch=len(image_generator), epochs=50)\n",
130
+ "\n",
131
+ "import matplotlib.pyplot as plt\n",
132
+ "import numpy as np\n",
133
+ "\n",
134
+ "# Sample image for prediction\n",
135
+ "sample_image = image_generator[0][0]\n",
136
+ "predicted_mask = model.predict(np.expand_dims(sample_image, axis=0))[0]\n",
137
+ "\n",
138
+ "# Display the original image and predicted mask\n",
139
+ "plt.figure(figsize=(10, 5))\n",
140
+ "\n",
141
+ "plt.subplot(1, 2, 1)\n",
142
+ "plt.title(\"Original Image\")\n",
143
+ "plt.imshow(sample_image)\n",
144
+ "\n",
145
+ "plt.subplot(1, 2, 2)\n",
146
+ "plt.title(\"Predicted Mask\")\n",
147
+ "plt.imshow(predicted_mask.squeeze(), cmap='gray')\n",
148
+ "\n",
149
+ "plt.show()"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": null,
155
+ "id": "31aee94d-c851-4c10-ac38-30ba29aa71a3",
156
+ "metadata": {},
157
+ "outputs": [],
158
+ "source": [
159
+ "import tensorflow as tf\n",
160
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
161
+ "\n",
162
+ "# Paths to images and masks directories\n",
163
+ "image_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Images/Images\"\n",
164
+ "mask_dir = \"C:/Users/Welcome/Downloads/Banana FCN/Mask/Mask\"\n",
165
+ "\n",
166
+ "# Image and mask data generators\n",
167
+ "image_datagen = ImageDataGenerator(rescale=1./255)\n",
168
+ "mask_datagen = ImageDataGenerator(rescale=1./255)\n",
169
+ "\n",
170
+ "image_generator = image_datagen.flow_from_directory(\n",
171
+ " image_dir,\n",
172
+ " class_mode=None,\n",
173
+ " color_mode='rgb',\n",
174
+ " target_size=(128, 128),\n",
175
+ " batch_size=32,\n",
176
+ " seed=42\n",
177
+ ")\n",
178
+ "\n",
179
+ "mask_generator = mask_datagen.flow_from_directory(\n",
180
+ " mask_dir,\n",
181
+ " class_mode=None,\n",
182
+ " color_mode='grayscale',\n",
183
+ " target_size=(128, 128),\n",
184
+ " batch_size=32,\n",
185
+ " seed=42\n",
186
+ ")\n",
187
+ "\n",
188
+ "# Combine generators into one which yields image and mask\n",
189
+ "train_generator = zip(image_generator, mask_generator)\n",
190
+ "\n",
191
+ "\n",
192
+ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input\n",
193
+ "from tensorflow.keras.models import Model\n",
194
+ "\n",
195
+ "def build_fcnn():\n",
196
+ " inputs = Input((128, 128, 3))\n",
197
+ "\n",
198
+ " # Encoder\n",
199
+ " conv1 = Conv2D(128, (3, 3), activation='relu', padding='same')(inputs)\n",
200
+ " pool1 = MaxPooling2D((2, 2))(conv1)\n",
201
+ "\n",
202
+ " conv2 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool1)\n",
203
+ " pool2 = MaxPooling2D((2, 2))(conv2)\n",
204
+ "\n",
205
+ " # Decoder\n",
206
+ " conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)\n",
207
+ " up1 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv3)\n",
208
+ "\n",
209
+ " conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)\n",
210
+ " up2 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv4)\n",
211
+ "\n",
212
+ " outputs = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(up2)\n",
213
+ "\n",
214
+ " model = Model(inputs, outputs)\n",
215
+ " return model\n",
216
+ "\n",
217
+ "model = build_fcnn()\n",
218
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
219
+ "model.summary()\n",
220
+ "\n",
221
+ "# Train the FCNN model\n",
222
+ "def combined_generator(image_gen, mask_gen):\n",
223
+ " while True: # Keep yielding data indefinitely\n",
224
+ " img_batch = next(image_gen)\n",
225
+ " mask_batch = next(mask_gen)\n",
226
+ " yield img_batch, mask_batch # Keras expects (input, target)\n",
227
+ "# Fit the model with the custom generator\n",
228
+ "train_generator = combined_generator(image_generator, mask_generator)\n",
229
+ "model.fit(train_generator, steps_per_epoch=len(image_generator), epochs=50)\n",
230
+ "\n",
231
+ "import matplotlib.pyplot as plt\n",
232
+ "import numpy as np\n",
233
+ "\n",
234
+ "# Sample image for prediction\n",
235
+ "sample_image = image_generator[0][0]\n",
236
+ "predicted_mask = model.predict(np.expand_dims(sample_image, axis=0))[0]\n",
237
+ "\n",
238
+ "# Display the original image and predicted mask\n",
239
+ "plt.figure(figsize=(10, 5))\n",
240
+ "\n",
241
+ "plt.subplot(1, 2, 1)\n",
242
+ "plt.title(\"Original Image\")\n",
243
+ "plt.imshow(sample_image)\n",
244
+ "\n",
245
+ "plt.subplot(1, 2, 2)\n",
246
+ "plt.title(\"Predicted Mask\")\n",
247
+ "plt.imshow(predicted_mask.squeeze(), cmap='gray')\n",
248
+ "\n",
249
+ "plt.show()"
250
+ ]
251
+ }
252
+ ],
253
+ "metadata": {
254
+ "kernelspec": {
255
+ "display_name": "Python 3 (ipykernel)",
256
+ "language": "python",
257
+ "name": "python3"
258
+ },
259
+ "language_info": {
260
+ "codemirror_mode": {
261
+ "name": "ipython",
262
+ "version": 3
263
+ },
264
+ "file_extension": ".py",
265
+ "mimetype": "text/x-python",
266
+ "name": "python",
267
+ "nbconvert_exporter": "python",
268
+ "pygments_lexer": "ipython3",
269
+ "version": "3.12.4"
270
+ }
271
+ },
272
+ "nbformat": 4,
273
+ "nbformat_minor": 5
274
+ }
noshot/main.py CHANGED
@@ -2,15 +2,15 @@ from noshot.utils.shell_utils import get_folder
2
2
  from noshot.utils.shell_utils import get_file
3
3
  from noshot.utils.shell_utils import remove_folder
4
4
 
5
- available = {'-1 ' : "ML TS XAI(Folder)",
5
+ available = {'-1 ' : "DLE FSD BDA(Folder)",
6
6
  '0 ' : "Remove Folder"}
7
7
 
8
8
  def get(name = None, open = False):
9
9
  try:
10
10
  if name is not None:
11
11
  name = str(name)
12
- if name in ['-1'] : get_folder("ML TS XAI", loc = True)
13
- elif name in ['0'] : remove_folder("ML TS XAI")
12
+ if name in ['-1'] : get_folder("DLE FSD BDA", loc = True)
13
+ elif name in ['0'] : remove_folder("DLE FSD BDA")
14
14
  else:
15
15
  for k, v in available.items():
16
16
  sep = " : " if v else ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: noshot
3
- Version: 11.0.0
3
+ Version: 12.0.0
4
4
  Summary: Support library for Artificial Intelligence, Machine Learning and Data Science tools
5
5
  Author: Tim Stan S
6
6
  License: MIT
@@ -0,0 +1,13 @@
1
+ noshot/__init__.py,sha256=000R40tii8lDFU8C1fBaD3SOnxD0PWRNWZU-km49YrU,21
2
+ noshot/main.py,sha256=Y92i47Aa0XctPccKQ-hoFlkRbxFmb1NWOf-OtPb_oVU,669
3
+ noshot/data/DLE FSD BDA/DLE/1. DNN (Image Classification).ipynb,sha256=397KrOUOxsmKB5VZIAhG7QTxFdmLi7IV-CzsYyIIJJQ,8651
4
+ noshot/data/DLE FSD BDA/DLE/2. DNN vs CNN.ipynb,sha256=yUHoexSUzeD1KbrhOIhPAg_Yd-WWLlMDuqBUmkdq70M,12138
5
+ noshot/data/DLE FSD BDA/DLE/3. CNN (Object Detecrion).ipynb,sha256=FjeGzLcrwxfGnER5aNc523_otdU_wlsBYiVYvgBrkVk,6953
6
+ noshot/data/DLE FSD BDA/DLE/4. FCN (Image Segmentaion).ipynb,sha256=6h4eV8A6tuGrB72iqSiI98qv80Eb_H_XoKdyIKM431I,8785
7
+ noshot/utils/__init__.py,sha256=QVrN1ZpzPXxZqDOqot5-t_ulFjZXVx7Cvr-Is9AK0po,110
8
+ noshot/utils/shell_utils.py,sha256=-XfgYlNQlULa_rRJ3vsfTns4m_jiueGEj396J_y0Gus,2611
9
+ noshot-12.0.0.dist-info/licenses/LICENSE.txt,sha256=fgCruaVm5cUjFGOeEoGIimT6nnUunBqcNZHpGzK8TSw,1086
10
+ noshot-12.0.0.dist-info/METADATA,sha256=N2rDT6xfFJdZiqqoKM7iCRqwbeibXMjuthszOU-prvQ,2574
11
+ noshot-12.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ noshot-12.0.0.dist-info/top_level.txt,sha256=UL-c0HffdRwohz-y9icY_rnY48pQDdxGcBsgyCKh2Q8,7
13
+ noshot-12.0.0.dist-info/RECORD,,