noshot 13.0.0__tar.gz → 15.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {noshot-13.0.0 → noshot-15.0.0}/PKG-INFO +1 -1
  2. {noshot-13.0.0 → noshot-15.0.0}/noshot.egg-info/PKG-INFO +1 -1
  3. {noshot-13.0.0 → noshot-15.0.0}/noshot.egg-info/SOURCES.txt +4 -0
  4. {noshot-13.0.0 → noshot-15.0.0}/setup.py +1 -1
  5. noshot-15.0.0/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. Yolo Object Detection.ipynb +231 -0
  6. noshot-15.0.0/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/5. Auto Encoder.ipynb +190 -0
  7. noshot-15.0.0/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/3 Bounding Boxes.ipynb +109 -0
  8. noshot-15.0.0/src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex5.ipynb +190 -0
  9. {noshot-13.0.0 → noshot-15.0.0}/LICENSE.txt +0 -0
  10. {noshot-13.0.0 → noshot-15.0.0}/README.md +0 -0
  11. {noshot-13.0.0 → noshot-15.0.0}/noshot.egg-info/dependency_links.txt +0 -0
  12. {noshot-13.0.0 → noshot-15.0.0}/noshot.egg-info/not-zip-safe +0 -0
  13. {noshot-13.0.0 → noshot-15.0.0}/noshot.egg-info/top_level.txt +0 -0
  14. {noshot-13.0.0 → noshot-15.0.0}/setup.cfg +0 -0
  15. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/__init__.py +0 -0
  16. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/1. DNN (Image Classification).ipynb +0 -0
  17. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/2. DNN vs CNN.ipynb +0 -0
  18. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. CNN (Object Detecrion).ipynb +0 -0
  19. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb +0 -0
  20. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb +0 -0
  21. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb +0 -0
  22. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb +0 -0
  23. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb +0 -0
  24. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb +0 -0
  25. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb +0 -0
  26. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb +0 -0
  27. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb +0 -0
  28. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb +0 -0
  29. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb +0 -0
  30. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb +0 -0
  31. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb +0 -0
  32. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb +0 -0
  33. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb +0 -0
  34. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb +0 -0
  35. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb +0 -0
  36. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb +0 -0
  37. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb +0 -0
  38. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb +0 -0
  39. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/main.py +0 -0
  40. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/utils/__init__.py +0 -0
  41. {noshot-13.0.0 → noshot-15.0.0}/src/noshot/utils/shell_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: noshot
3
- Version: 13.0.0
3
+ Version: 15.0.0
4
4
  Summary: Support library for Artificial Intelligence, Machine Learning and Data Science tools
5
5
  Author: Tim Stan S
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: noshot
3
- Version: 13.0.0
3
+ Version: 15.0.0
4
4
  Summary: Support library for Artificial Intelligence, Machine Learning and Data Science tools
5
5
  Author: Tim Stan S
6
6
  License: MIT
@@ -11,12 +11,15 @@ src/noshot/main.py
11
11
  src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/1. DNN (Image Classification).ipynb
12
12
  src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/2. DNN vs CNN.ipynb
13
13
  src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. CNN (Object Detecrion).ipynb
14
+ src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/3. Yolo Object Detection.ipynb
14
15
  src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb
16
+ src/noshot/data/DLE FSD BDA/DLE/DLE 1 (Json)/5. Auto Encoder.ipynb
15
17
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb
16
18
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb
17
19
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb
18
20
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb
19
21
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb
22
+ src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/3 Bounding Boxes.ipynb
20
23
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb
21
24
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb
22
25
  src/noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb
@@ -24,6 +27,7 @@ src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb
24
27
  src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb
25
28
  src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb
26
29
  src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb
30
+ src/noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex5.ipynb
27
31
  src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb
28
32
  src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb
29
33
  src/noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="noshot",
8
- version="13.0.0",
8
+ version="15.0.0",
9
9
  author="Tim Stan S",
10
10
  description="Support library for Artificial Intelligence, Machine Learning and Data Science tools",
11
11
  long_description=long_description,
@@ -0,0 +1,231 @@
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "source": [
22
+ "pip install ultralytics"
23
+ ],
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "okrAbLmdTUrP",
29
+ "outputId": "39ded965-c6cf-4596-99c5-262653e91d78"
30
+ },
31
+ "execution_count": 20,
32
+ "outputs": [
33
+ {
34
+ "output_type": "stream",
35
+ "name": "stdout",
36
+ "text": [
37
+ "Requirement already satisfied: ultralytics in /usr/local/lib/python3.12/dist-packages (8.3.197)\n",
38
+ "Requirement already satisfied: numpy>=1.23.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (2.0.2)\n",
39
+ "Requirement already satisfied: matplotlib>=3.3.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (3.10.0)\n",
40
+ "Requirement already satisfied: opencv-python>=4.6.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (4.12.0.88)\n",
41
+ "Requirement already satisfied: pillow>=7.1.2 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (11.3.0)\n",
42
+ "Requirement already satisfied: pyyaml>=5.3.1 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (6.0.2)\n",
43
+ "Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (2.32.4)\n",
44
+ "Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (1.16.1)\n",
45
+ "Requirement already satisfied: torch>=1.8.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (2.8.0+cu126)\n",
46
+ "Requirement already satisfied: torchvision>=0.9.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (0.23.0+cu126)\n",
47
+ "Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ultralytics) (5.9.5)\n",
48
+ "Requirement already satisfied: polars in /usr/local/lib/python3.12/dist-packages (from ultralytics) (1.25.2)\n",
49
+ "Requirement already satisfied: ultralytics-thop>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ultralytics) (2.0.17)\n",
50
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.3.3)\n",
51
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (0.12.1)\n",
52
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (4.59.2)\n",
53
+ "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.4.9)\n",
54
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (25.0)\n",
55
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (3.2.3)\n",
56
+ "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.3.0->ultralytics) (2.9.0.post0)\n",
57
+ "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests>=2.23.0->ultralytics) (3.4.3)\n",
58
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests>=2.23.0->ultralytics) (3.10)\n",
59
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests>=2.23.0->ultralytics) (2.5.0)\n",
60
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests>=2.23.0->ultralytics) (2025.8.3)\n",
61
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (3.19.1)\n",
62
+ "Requirement already satisfied: typing-extensions>=4.10.0 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (4.15.0)\n",
63
+ "Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (75.2.0)\n",
64
+ "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (1.13.3)\n",
65
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (3.5)\n",
66
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (3.1.6)\n",
67
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (2025.3.0)\n",
68
+ "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.77)\n",
69
+ "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.77)\n",
70
+ "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.80)\n",
71
+ "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (9.10.2.21)\n",
72
+ "Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.4.1)\n",
73
+ "Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (11.3.0.4)\n",
74
+ "Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (10.3.7.77)\n",
75
+ "Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (11.7.1.2)\n",
76
+ "Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.5.4.2)\n",
77
+ "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (0.7.1)\n",
78
+ "Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (2.27.3)\n",
79
+ "Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.77)\n",
80
+ "Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (12.6.85)\n",
81
+ "Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (1.11.1.6)\n",
82
+ "Requirement already satisfied: triton==3.4.0 in /usr/local/lib/python3.12/dist-packages (from torch>=1.8.0->ultralytics) (3.4.0)\n",
83
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics) (1.17.0)\n",
84
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch>=1.8.0->ultralytics) (1.3.0)\n",
85
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=1.8.0->ultralytics) (3.0.2)\n"
86
+ ]
87
+ }
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "source": [
93
+ "!unzip \"Object detection dataset (1).zip\""
94
+ ],
95
+ "metadata": {
96
+ "colab": {
97
+ "base_uri": "https://localhost:8080/"
98
+ },
99
+ "id": "RpCw7yD1k6zC",
100
+ "outputId": "7caf76a3-92cf-48a2-c5ca-da11bcafcc5d"
101
+ },
102
+ "execution_count": 21,
103
+ "outputs": [
104
+ {
105
+ "output_type": "stream",
106
+ "name": "stdout",
107
+ "text": [
108
+ "unzip: cannot find or open Object detection dataset (1).zip, Object detection dataset (1).zip.zip or Object detection dataset (1).zip.ZIP.\n"
109
+ ]
110
+ }
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "source": [
116
+ "from ultralytics import YOLO\n",
117
+ "import os\n",
118
+ "import glob"
119
+ ],
120
+ "metadata": {
121
+ "id": "PVdbYKuaTM7a"
122
+ },
123
+ "execution_count": 22,
124
+ "outputs": []
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "source": [
129
+ "model=YOLO('yolov8n.pt')"
130
+ ],
131
+ "metadata": {
132
+ "id": "3XIDWbufTi4U"
133
+ },
134
+ "execution_count": 23,
135
+ "outputs": []
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "source": [
140
+ "image_folder = 'Object detection dataset/train/train'\n",
141
+ "output_folder = 'output1'\n",
142
+ "os.makedirs(output_folder, exist_ok=True)"
143
+ ],
144
+ "metadata": {
145
+ "id": "rShfURYelBg1"
146
+ },
147
+ "execution_count": 24,
148
+ "outputs": []
149
+ },
150
+ {
151
+ "cell_type": "code",
152
+ "source": [
153
+ "image_paths = glob.glob(os.path.join(image_folder, '*.png')) + glob.glob(os.path.join(image_folder, '*.jpg'))\n"
154
+ ],
155
+ "metadata": {
156
+ "id": "Pd-i76a1lI1R"
157
+ },
158
+ "execution_count": 25,
159
+ "outputs": []
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "source": [
164
+ "for img_path in image_paths:\n",
165
+ " r = model(img_path)\n",
166
+ "\n",
167
+ " # Save output image\n",
168
+ " out_path = os.path.join(output_folder, os.path.basename(img_path))\n",
169
+ " r[0].save(out_path)"
170
+ ],
171
+ "metadata": {
172
+ "id": "LkmIUkotlOui"
173
+ },
174
+ "execution_count": 26,
175
+ "outputs": []
176
+ },
177
+ {
178
+ "cell_type": "code",
179
+ "source": [
180
+ "import os\n",
181
+ "from IPython.display import Image, display"
182
+ ],
183
+ "metadata": {
184
+ "id": "B4ibhsIilZGX"
185
+ },
186
+ "execution_count": 27,
187
+ "outputs": []
188
+ },
189
+ {
190
+ "cell_type": "code",
191
+ "source": [
192
+ "output_files = os.listdir(output_folder)\n",
193
+ "\n",
194
+ "image_files = [f for f in output_files if f.endswith('.jpg') or f.endswith('.png')]\n",
195
+ "\n",
196
+ "if image_files:\n",
197
+ " first_image_path = os.path.join(output_folder, image_files[0])\n",
198
+ " print(f\"Displaying: {first_image_path}\")\n",
199
+ " display(Image(filename=first_image_path))\n",
200
+ "else:\n",
201
+ " print(\"No image files found in the output folder.\")"
202
+ ],
203
+ "metadata": {
204
+ "colab": {
205
+ "base_uri": "https://localhost:8080/"
206
+ },
207
+ "id": "jo1XjaASlaNN",
208
+ "outputId": "c2a50c9e-8ce7-462a-e4d5-c29405dc691d"
209
+ },
210
+ "execution_count": 28,
211
+ "outputs": [
212
+ {
213
+ "output_type": "stream",
214
+ "name": "stdout",
215
+ "text": [
216
+ "No image files found in the output folder.\n"
217
+ ]
218
+ }
219
+ ]
220
+ },
221
+ {
222
+ "cell_type": "code",
223
+ "source": [],
224
+ "metadata": {
225
+ "id": "BYLCoI5Olcoo"
226
+ },
227
+ "execution_count": 28,
228
+ "outputs": []
229
+ }
230
+ ]
231
+ }
@@ -0,0 +1,190 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "IQb02ekPErC1"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "import numpy as np\n",
12
+ "import pandas as pd\n",
13
+ "import tensorflow as tf\n",
14
+ "from tensorflow.keras.datasets import mnist\n",
15
+ "from tensorflow.keras.models import Model\n",
16
+ "from tensorflow.keras.layers import Input,Dense"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "id": "JKrxqZpvFW4X"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "(X_train, _),(X_test, _)=mnist.load_data()"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "metadata": {
34
+ "colab": {
35
+ "base_uri": "https://localhost:8080/"
36
+ },
37
+ "id": "FmNHIehxFgna",
38
+ "outputId": "d5a6d856-094b-436d-c63b-a4b318e2dddc"
39
+ },
40
+ "outputs": [],
41
+ "source": [
42
+ "print(X_train)"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {
49
+ "colab": {
50
+ "base_uri": "https://localhost:8080/"
51
+ },
52
+ "id": "ue7nZ4rVHRm8",
53
+ "outputId": "132813b9-4c0f-42ee-ceb2-e1108b87ba1a"
54
+ },
55
+ "outputs": [],
56
+ "source": [
57
+ "X_train=X_train.astype('float32')/255.0\n",
58
+ "X_test=X_test.astype('float32')/255.0\n",
59
+ "print(X_train)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {
66
+ "id": "fje2VkFHHrPw"
67
+ },
68
+ "outputs": [],
69
+ "source": [
70
+ "X_train=X_train.reshape((len(X_train),np.prod((X_train.shape[1:]))))\n",
71
+ "X_test=X_test.reshape((len(X_test),np.prod((X_test.shape[1:]))))"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": null,
77
+ "metadata": {
78
+ "id": "HfKxKikFIaSR"
79
+ },
80
+ "outputs": [],
81
+ "source": [
82
+ "input_dims=X_train.shape[1]\n",
83
+ "encoded_dims=32\n",
84
+ "input_layer=Input(shape=(input_dims,))\n",
85
+ "\n",
86
+ "encoded=Dense(encoded_dims,activation='relu')(input_layer)\n",
87
+ "\n",
88
+ "decoded=Dense(input_dims,activation='sigmoid')(encoded)"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": null,
94
+ "metadata": {
95
+ "id": "7rTRxX8kKsif"
96
+ },
97
+ "outputs": [],
98
+ "source": [
99
+ "encoder=Model(input_layer,encoded)\n",
100
+ "autoencoder=Model(input_layer,decoded)"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "metadata": {
107
+ "colab": {
108
+ "base_uri": "https://localhost:8080/"
109
+ },
110
+ "id": "V4bP4-RDK7LH",
111
+ "outputId": "769c12fc-49e4-4376-d501-4d4159138d17"
112
+ },
113
+ "outputs": [],
114
+ "source": [
115
+ "autoencoder.compile(optimizer='adam',loss='binary_crossentropy')\n",
116
+ "\n",
117
+ "autoencoder.fit(X_train,X_train,epochs=5,batch_size=32,validation_data=(X_test,X_test))\n"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {
124
+ "colab": {
125
+ "base_uri": "https://localhost:8080/"
126
+ },
127
+ "id": "CrqJare2KONO",
128
+ "outputId": "67104eb8-4143-4700-85d7-30c05490a74f"
129
+ },
130
+ "outputs": [],
131
+ "source": [
132
+ "encoded_img=encoder.predict(X_test)\n",
133
+ "\n",
134
+ "decoded_imgs=autoencoder.predict(X_test)"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": null,
140
+ "metadata": {
141
+ "colab": {
142
+ "base_uri": "https://localhost:8080/",
143
+ "height": 1000
144
+ },
145
+ "id": "HScdtV42LYBH",
146
+ "outputId": "81059a8f-f806-4f10-cfd3-666e87282085"
147
+ },
148
+ "outputs": [],
149
+ "source": [
150
+ "import matplotlib.pyplot as plt\n",
151
+ "for i in range(10):\n",
152
+ " plt.imshow(X_test[i].reshape(28,28))\n",
153
+ " plt.title('Original Image')\n",
154
+ " plt.show()\n",
155
+ " plt.imshow(decoded_imgs[i].reshape(28,28))\n",
156
+ " plt.title('Reconstructed Image')\n",
157
+ " plt.show()\n",
158
+ " plt.imshow(encoded_img[i].reshape(4,8))\n",
159
+ " plt.title('Encoded Image')\n",
160
+ " plt.show()"
161
+ ]
162
+ }
163
+ ],
164
+ "metadata": {
165
+ "accelerator": "GPU",
166
+ "colab": {
167
+ "gpuType": "T4",
168
+ "provenance": []
169
+ },
170
+ "kernelspec": {
171
+ "display_name": "Python 3 (ipykernel)",
172
+ "language": "python",
173
+ "name": "python3"
174
+ },
175
+ "language_info": {
176
+ "codemirror_mode": {
177
+ "name": "ipython",
178
+ "version": 3
179
+ },
180
+ "file_extension": ".py",
181
+ "mimetype": "text/x-python",
182
+ "name": "python",
183
+ "nbconvert_exporter": "python",
184
+ "pygments_lexer": "ipython3",
185
+ "version": "3.12.4"
186
+ }
187
+ },
188
+ "nbformat": 4,
189
+ "nbformat_minor": 4
190
+ }
@@ -0,0 +1,109 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "1c5ce2ae-0abb-45b4-a94a-7548d9af6b6a",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import torch\n",
11
+ "import torchvision\n",
12
+ "from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n",
13
+ "from torchvision.datasets import VOCDetection\n",
14
+ "from torch.utils.data import DataLoader\n",
15
+ "import torchvision.transforms as T\n",
16
+ "import matplotlib.pyplot as plt\n",
17
+ "import matplotlib.patches as patches\n",
18
+ "\n",
19
+ "classes = [\"__background__\", \"apple\", \"banana\", \"orange\"]\n",
20
+ "\n",
21
+ "def get_model(num_classes):\n",
22
+ " model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=\"DEFAULT\")\n",
23
+ " in_features = model.roi_heads.box_predictor.cls_score.in_features\n",
24
+ " model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n",
25
+ " return model\n",
26
+ "\n",
27
+ "def collate_fn(batch):\n",
28
+ " return tuple(zip(*batch))\n",
29
+ "\n",
30
+ "transform = T.Compose([T.ToTensor()])\n",
31
+ "\n",
32
+ "def target_transform(target):\n",
33
+ " objs = target[\"annotation\"][\"object\"]\n",
34
+ " if not isinstance(objs, list):\n",
35
+ " objs = [objs]\n",
36
+ " boxes = []\n",
37
+ " labels = []\n",
38
+ " for obj in objs:\n",
39
+ " name = obj[\"name\"]\n",
40
+ " bbox = obj[\"bndbox\"]\n",
41
+ " xmin, ymin, xmax, ymax = int(bbox[\"xmin\"]), int(bbox[\"ymin\"]), int(bbox[\"xmax\"]), int(bbox[\"ymax\"])\n",
42
+ " boxes.append([xmin, ymin, xmax, ymax])\n",
43
+ " labels.append(classes.index(name) if name in classes else 0)\n",
44
+ " return {\"boxes\": torch.tensor(boxes, dtype=torch.float32), \"labels\": torch.tensor(labels, dtype=torch.int64)}\n",
45
+ "\n",
46
+ "train_dataset = VOCDetection(\"./\", year=\"2012\", image_set=\"train\", download=True, transform=transform, target_transform=target_transform)\n",
47
+ "train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True, collate_fn=collate_fn)\n",
48
+ "\n",
49
+ "model = get_model(len(classes))\n",
50
+ "device = torch.device(\"cpu\")\n",
51
+ "model.to(device)\n",
52
+ "\n",
53
+ "params = [p for p in model.parameters() if p.requires_grad]\n",
54
+ "optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n",
55
+ "\n",
56
+ "num_epochs = 1\n",
57
+ "for epoch in range(num_epochs):\n",
58
+ " model.train()\n",
59
+ " for images, targets in train_loader:\n",
60
+ " images = [img.to(device) for img in images]\n",
61
+ " targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n",
62
+ " loss_dict = model(images, targets)\n",
63
+ " losses = sum(loss for loss in loss_dict.values())\n",
64
+ " optimizer.zero_grad()\n",
65
+ " losses.backward()\n",
66
+ " optimizer.step()\n",
67
+ " print(f\"Epoch {epoch+1}, Loss: {losses.item():.4f}\")\n",
68
+ "\n",
69
+ "model.eval()\n",
70
+ "images, _ = next(iter(train_loader))\n",
71
+ "img = images[0].to(device)\n",
72
+ "with torch.no_grad():\n",
73
+ " prediction = model([img])\n",
74
+ "\n",
75
+ "img_np = img.permute(1, 2, 0).numpy()\n",
76
+ "fig, ax = plt.subplots(1)\n",
77
+ "ax.imshow(img_np)\n",
78
+ "for box, label, score in zip(prediction[0][\"boxes\"], prediction[0][\"labels\"], prediction[0][\"scores\"]):\n",
79
+ " if score > 0.5:\n",
80
+ " xmin, ymin, xmax, ymax = box\n",
81
+ " rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=2, edgecolor='r', facecolor='none')\n",
82
+ " ax.add_patch(rect)\n",
83
+ " ax.text(xmin, ymin, classes[label], bbox=dict(facecolor='yellow', alpha=0.5))\n",
84
+ "plt.show()\n"
85
+ ]
86
+ }
87
+ ],
88
+ "metadata": {
89
+ "kernelspec": {
90
+ "display_name": "Python 3 (ipykernel)",
91
+ "language": "python",
92
+ "name": "python3"
93
+ },
94
+ "language_info": {
95
+ "codemirror_mode": {
96
+ "name": "ipython",
97
+ "version": 3
98
+ },
99
+ "file_extension": ".py",
100
+ "mimetype": "text/x-python",
101
+ "name": "python",
102
+ "nbconvert_exporter": "python",
103
+ "pygments_lexer": "ipython3",
104
+ "version": "3.12.4"
105
+ }
106
+ },
107
+ "nbformat": 4,
108
+ "nbformat_minor": 5
109
+ }
@@ -0,0 +1,190 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "IQb02ekPErC1"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "import numpy as np\n",
12
+ "import pandas as pd\n",
13
+ "import tensorflow as tf\n",
14
+ "from tensorflow.keras.datasets import mnist\n",
15
+ "from tensorflow.keras.models import Model\n",
16
+ "from tensorflow.keras.layers import Input,Dense"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "id": "JKrxqZpvFW4X"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "(X_train, _),(X_test, _)=mnist.load_data()"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "metadata": {
34
+ "colab": {
35
+ "base_uri": "https://localhost:8080/"
36
+ },
37
+ "id": "FmNHIehxFgna",
38
+ "outputId": "d5a6d856-094b-436d-c63b-a4b318e2dddc"
39
+ },
40
+ "outputs": [],
41
+ "source": [
42
+ "print(X_train)"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {
49
+ "colab": {
50
+ "base_uri": "https://localhost:8080/"
51
+ },
52
+ "id": "ue7nZ4rVHRm8",
53
+ "outputId": "132813b9-4c0f-42ee-ceb2-e1108b87ba1a"
54
+ },
55
+ "outputs": [],
56
+ "source": [
57
+ "X_train=X_train.astype('float32')/255.0\n",
58
+ "X_test=X_test.astype('float32')/255.0\n",
59
+ "print(X_train)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {
66
+ "id": "fje2VkFHHrPw"
67
+ },
68
+ "outputs": [],
69
+ "source": [
70
+ "X_train=X_train.reshape((len(X_train),np.prod((X_train.shape[1:]))))\n",
71
+ "X_test=X_test.reshape((len(X_test),np.prod((X_test.shape[1:]))))"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": null,
77
+ "metadata": {
78
+ "id": "HfKxKikFIaSR"
79
+ },
80
+ "outputs": [],
81
+ "source": [
82
+ "input_dims=X_train.shape[1]\n",
83
+ "encoded_dims=32\n",
84
+ "input_layer=Input(shape=(input_dims,))\n",
85
+ "\n",
86
+ "encoded=Dense(encoded_dims,activation='relu')(input_layer)\n",
87
+ "\n",
88
+ "decoded=Dense(input_dims,activation='sigmoid')(encoded)"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": null,
94
+ "metadata": {
95
+ "id": "7rTRxX8kKsif"
96
+ },
97
+ "outputs": [],
98
+ "source": [
99
+ "encoder=Model(input_layer,encoded)\n",
100
+ "autoencoder=Model(input_layer,decoded)"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "metadata": {
107
+ "colab": {
108
+ "base_uri": "https://localhost:8080/"
109
+ },
110
+ "id": "V4bP4-RDK7LH",
111
+ "outputId": "769c12fc-49e4-4376-d501-4d4159138d17"
112
+ },
113
+ "outputs": [],
114
+ "source": [
115
+ "autoencoder.compile(optimizer='adam',loss='binary_crossentropy')\n",
116
+ "\n",
117
+ "autoencoder.fit(X_train,X_train,epochs=5,batch_size=32,validation_data=(X_test,X_test))\n"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {
124
+ "colab": {
125
+ "base_uri": "https://localhost:8080/"
126
+ },
127
+ "id": "CrqJare2KONO",
128
+ "outputId": "67104eb8-4143-4700-85d7-30c05490a74f"
129
+ },
130
+ "outputs": [],
131
+ "source": [
132
+ "encoded_img=encoder.predict(X_test)\n",
133
+ "\n",
134
+ "decoded_imgs=autoencoder.predict(X_test)"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": null,
140
+ "metadata": {
141
+ "colab": {
142
+ "base_uri": "https://localhost:8080/",
143
+ "height": 1000
144
+ },
145
+ "id": "HScdtV42LYBH",
146
+ "outputId": "81059a8f-f806-4f10-cfd3-666e87282085"
147
+ },
148
+ "outputs": [],
149
+ "source": [
150
+ "import matplotlib.pyplot as plt\n",
151
+ "for i in range(10):\n",
152
+ " plt.imshow(X_test[i].reshape(28,28))\n",
153
+ " plt.title('Original Image')\n",
154
+ " plt.show()\n",
155
+ " plt.imshow(decoded_imgs[i].reshape(28,28))\n",
156
+ " plt.title('Reconstructed Image')\n",
157
+ " plt.show()\n",
158
+ " plt.imshow(encoded_img[i].reshape(4,8))\n",
159
+ " plt.title('Encoded Image')\n",
160
+ " plt.show()"
161
+ ]
162
+ }
163
+ ],
164
+ "metadata": {
165
+ "accelerator": "GPU",
166
+ "colab": {
167
+ "gpuType": "T4",
168
+ "provenance": []
169
+ },
170
+ "kernelspec": {
171
+ "display_name": "Python 3 (ipykernel)",
172
+ "language": "python",
173
+ "name": "python3"
174
+ },
175
+ "language_info": {
176
+ "codemirror_mode": {
177
+ "name": "ipython",
178
+ "version": 3
179
+ },
180
+ "file_extension": ".py",
181
+ "mimetype": "text/x-python",
182
+ "name": "python",
183
+ "nbconvert_exporter": "python",
184
+ "pygments_lexer": "ipython3",
185
+ "version": "3.12.4"
186
+ }
187
+ },
188
+ "nbformat": 4,
189
+ "nbformat_minor": 4
190
+ }
File without changes
File without changes
File without changes
File without changes
File without changes