noshot 12.0.0__py3-none-any.whl → 13.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb +164 -0
  2. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb +94 -0
  3. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb +134 -0
  4. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb +127 -0
  5. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb +123 -0
  6. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb +108 -0
  7. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb +646 -0
  8. noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb +553 -0
  9. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb +216 -0
  10. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb +195 -0
  11. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb +427 -0
  12. noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb +186 -0
  13. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb +398 -0
  14. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb +171 -0
  15. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb +401 -0
  16. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb +8511 -0
  17. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb +737 -0
  18. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb +591 -0
  19. noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb +551 -0
  20. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/METADATA +1 -1
  21. noshot-13.0.0.dist-info/RECORD +32 -0
  22. noshot-12.0.0.dist-info/RECORD +0 -13
  23. /noshot/data/DLE FSD BDA/DLE/{1. DNN (Image Classification).ipynb → DLE 1 (Json)/1. DNN (Image Classification).ipynb} +0 -0
  24. /noshot/data/DLE FSD BDA/DLE/{2. DNN vs CNN.ipynb → DLE 1 (Json)/2. DNN vs CNN.ipynb} +0 -0
  25. /noshot/data/DLE FSD BDA/DLE/{3. CNN (Object Detecrion).ipynb → DLE 1 (Json)/3. CNN (Object Detecrion).ipynb} +0 -0
  26. /noshot/data/DLE FSD BDA/DLE/{4. FCN (Image Segmentaion).ipynb → DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb} +0 -0
  27. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/WHEEL +0 -0
  28. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  29. {noshot-12.0.0.dist-info → noshot-13.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,553 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "raw",
5
+ "id": "1d42b33e-2478-496c-844a-0828baead407",
6
+ "metadata": {},
7
+ "source": [
8
+ "dataset/\n",
9
+ "├── daisy/\n",
10
+ "│ ├── img1.jpg\n",
11
+ "│ ├── img2.jpg\n",
12
+ "├── rose/\n",
13
+ "│ ├── img3.jpg\n",
14
+ "├── tulip/\n",
15
+ "│ ├── img4.jpg"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "id": "77e0b81d-421d-40dd-828c-9981081938d1",
22
+ "metadata": {},
23
+ "outputs": [],
24
+ "source": [
25
+ "1. PyTorch Dataset Loader"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "id": "dc1b94b0-2935-4e9f-9574-c90a1837c5e2",
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "import torch\n",
36
+ "from torchvision import datasets, transforms\n",
37
+ "from torch.utils.data import DataLoader\n",
38
+ "from torch.utils.data import random_split\n",
39
+ "\n",
40
+ "transform = transforms.Compose([\n",
41
+ " transforms.Resize((224, 224)),\n",
42
+ " transforms.ToTensor(),\n",
43
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
44
+ " std=[0.229, 0.224, 0.225])\n",
45
+ "])\n",
46
+ "\n",
47
+ "dataset_path = r\"Dataset\"\n",
48
+ "dataset = datasets.ImageFolder(root=dataset_path, transform=transform)\n",
49
+ "\n",
50
+ "train_size = int(0.8 * len(dataset))\n",
51
+ "test_size = len(dataset) - train_size\n",
52
+ "train_dataset, test_dataset = random_split(dataset, [train_size, test_size])\n",
53
+ "\n",
54
+ "train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
55
+ "test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)\n",
56
+ "\n",
57
+ "print(dataset.classes)"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "raw",
62
+ "id": "4cd738fb-59f3-4d8b-b026-22c06a925092",
63
+ "metadata": {},
64
+ "source": [
65
+ "2. TensorFlow/Keras Dataset Loader"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": null,
71
+ "id": "3ec13664-fd81-48f9-864b-3f784d52d79d",
72
+ "metadata": {},
73
+ "outputs": [],
74
+ "source": [
75
+ "import tensorflow as tf\n",
76
+ "\n",
77
+ "dataset_path = r\"Dataset\"\n",
78
+ "\n",
79
+ "train_ds = tf.keras.utils.image_dataset_from_directory(\n",
80
+ " dataset_path,\n",
81
+ " validation_split=0.2,\n",
82
+ " subset=\"training\",\n",
83
+ " seed=42,\n",
84
+ " image_size=(224, 224),\n",
85
+ " batch_size=32\n",
86
+ ")\n",
87
+ "\n",
88
+ "val_ds = tf.keras.utils.image_dataset_from_directory(\n",
89
+ " dataset_path,\n",
90
+ " validation_split=0.2,\n",
91
+ " subset=\"validation\",\n",
92
+ " seed=42,\n",
93
+ " image_size=(224, 224),\n",
94
+ " batch_size=32\n",
95
+ ")\n",
96
+ "\n",
97
+ "print(train_ds.class_names)"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "raw",
102
+ "id": "641bd949-4908-4685-920d-38b505a74ad4",
103
+ "metadata": {},
104
+ "source": [
105
+ "dataset/\n",
106
+ "├── train/\n",
107
+ "│ ├── cats/\n",
108
+ "│ └── dogs/\n",
109
+ "├── val/\n",
110
+ "│ ├── cats/\n",
111
+ "│ └── dogs/\n",
112
+ "└── test/\n",
113
+ " ├── cats/\n",
114
+ " └── dogs/"
115
+ ]
116
+ },
117
+ {
118
+ "cell_type": "raw",
119
+ "id": "654addfa-a0a3-412d-a9a0-7bc395e8489d",
120
+ "metadata": {},
121
+ "source": [
122
+ "1. Keras: image_dataset_from_directory()"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "id": "d849171a-a9ea-4b1d-ae3b-d3114fcbd55b",
129
+ "metadata": {},
130
+ "outputs": [],
131
+ "source": [
132
+ "import tensorflow as tf\n",
133
+ "\n",
134
+ "train_ds = tf.keras.utils.image_dataset_from_directory(\n",
135
+ " \"dataset/train\",\n",
136
+ " image_size=(224, 224),\n",
137
+ " batch_size=32\n",
138
+ ")\n",
139
+ "\n",
140
+ "val_ds = tf.keras.utils.image_dataset_from_directory(\n",
141
+ " \"dataset/val\",\n",
142
+ " image_size=(224, 224),\n",
143
+ " batch_size=32\n",
144
+ ")\n",
145
+ "\n",
146
+ "test_ds = tf.keras.utils.image_dataset_from_directory(\n",
147
+ " \"dataset/test\",\n",
148
+ " image_size=(224, 224),\n",
149
+ " batch_size=32\n",
150
+ ")"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "raw",
155
+ "id": "d7462834-d3fb-4e25-8783-61fc72cb2da1",
156
+ "metadata": {},
157
+ "source": [
158
+ "2. Keras: load_img() + img_to_array()"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": null,
164
+ "id": "b94e104f-6a13-4d30-a342-82870afacbcc",
165
+ "metadata": {},
166
+ "outputs": [],
167
+ "source": [
168
+ "from tensorflow.keras.preprocessing.image import load_img, img_to_array\n",
169
+ "import os\n",
170
+ "\n",
171
+ "train_dir = \"dataset/train/cats\"\n",
172
+ "for file in os.listdir(train_dir):\n",
173
+ " img = load_img(os.path.join(train_dir, file), target_size=(224, 224))\n",
174
+ " img_array = img_to_array(img)\n",
175
+ " print(img_array.shape)"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "raw",
180
+ "id": "026d9a82-4182-4054-a0e2-b15de5ab2f2f",
181
+ "metadata": {},
182
+ "source": [
183
+ "3. OpenCV (cv2)"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "id": "5503b804-f8f3-4cd7-a456-014be0cca786",
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": [
193
+ "import cv2, os\n",
194
+ "\n",
195
+ "train_dir = \"dataset/train/dogs\"\n",
196
+ "for file in os.listdir(train_dir):\n",
197
+ " img = cv2.imread(os.path.join(train_dir, file))\n",
198
+ " img = cv2.resize(img, (224, 224))\n",
199
+ " print(img.shape)"
200
+ ]
201
+ },
202
+ {
203
+ "cell_type": "raw",
204
+ "id": "ef43b949-ca39-4b67-ba01-3191e683559a",
205
+ "metadata": {},
206
+ "source": [
207
+ "4. PIL"
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "code",
212
+ "execution_count": null,
213
+ "id": "26f177c9-5f59-489b-b28d-7e8a0551c78f",
214
+ "metadata": {},
215
+ "outputs": [],
216
+ "source": [
217
+ "from PIL import Image\n",
218
+ "import os\n",
219
+ "\n",
220
+ "val_dir = \"dataset/val/cats\"\n",
221
+ "for file in os.listdir(val_dir):\n",
222
+ " img = Image.open(os.path.join(val_dir, file)).resize((224, 224))\n",
223
+ " img.show()"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "raw",
228
+ "id": "55011c13-c1ec-4329-8764-1ad40e3448b8",
229
+ "metadata": {},
230
+ "source": [
231
+ "5. Keras ImageDataGenerator"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": null,
237
+ "id": "daf3afd9-7bbe-41e7-b783-5fb0117f2c9f",
238
+ "metadata": {},
239
+ "outputs": [],
240
+ "source": [
241
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
242
+ "\n",
243
+ "datagen = ImageDataGenerator(rescale=1./255)\n",
244
+ "\n",
245
+ "train_gen = datagen.flow_from_directory(\n",
246
+ " \"dataset/train\",\n",
247
+ " target_size=(224, 224),\n",
248
+ " batch_size=32,\n",
249
+ " class_mode=\"categorical\"\n",
250
+ ")\n",
251
+ "\n",
252
+ "val_gen = datagen.flow_from_directory(\n",
253
+ " \"dataset/val\",\n",
254
+ " target_size=(224, 224),\n",
255
+ " batch_size=32,\n",
256
+ " class_mode=\"categorical\"\n",
257
+ ")\n",
258
+ "\n",
259
+ "test_gen = datagen.flow_from_directory(\n",
260
+ " \"dataset/test\",\n",
261
+ " target_size=(224, 224),\n",
262
+ " batch_size=32,\n",
263
+ " class_mode=\"categorical\"\n",
264
+ ")"
265
+ ]
266
+ },
267
+ {
268
+ "cell_type": "raw",
269
+ "id": "f1e24547-5d8b-4a23-aafb-40683eea3191",
270
+ "metadata": {},
271
+ "source": [
272
+ "6. PyTorch (torchvision + DataLoader)"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "id": "e58cd557-829c-4877-a79c-b7075c69ef10",
279
+ "metadata": {},
280
+ "outputs": [],
281
+ "source": [
282
+ "from torchvision import datasets, transforms\n",
283
+ "from torch.utils.data import DataLoader\n",
284
+ "\n",
285
+ "transform = transforms.Compose([\n",
286
+ " transforms.Resize((224, 224)),\n",
287
+ " transforms.ToTensor()\n",
288
+ "])\n",
289
+ "\n",
290
+ "train_data = datasets.ImageFolder(\"dataset/train\", transform=transform)\n",
291
+ "val_data = datasets.ImageFolder(\"dataset/val\", transform=transform)\n",
292
+ "test_data = datasets.ImageFolder(\"dataset/test\", transform=transform)\n",
293
+ "\n",
294
+ "train_loader = DataLoader(train_data, batch_size=32, shuffle=True)\n",
295
+ "val_loader = DataLoader(val_data, batch_size=32, shuffle=False)\n",
296
+ "test_loader = DataLoader(test_data, batch_size=32, shuffle=False)"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "raw",
301
+ "id": "35ded701-4a97-4513-ab0a-81f3f0678de8",
302
+ "metadata": {},
303
+ "source": [
304
+ "7. Custom Dataset (PyTorch)"
305
+ ]
306
+ },
307
+ {
308
+ "cell_type": "code",
309
+ "execution_count": null,
310
+ "id": "d0799119-49f5-408f-938f-cce0c47d6fe0",
311
+ "metadata": {},
312
+ "outputs": [],
313
+ "source": [
314
+ "from torch.utils.data import Dataset, DataLoader\n",
315
+ "from PIL import Image\n",
316
+ "import os\n",
317
+ "\n",
318
+ "class CustomDataset(Dataset):\n",
319
+ " def __init__(self, root_dir, transform=None):\n",
320
+ " self.files, self.labels = [], []\n",
321
+ " self.transform = transform\n",
322
+ " classes = os.listdir(root_dir)\n",
323
+ "\n",
324
+ " for label, cls in enumerate(classes):\n",
325
+ " cls_folder = os.path.join(root_dir, cls)\n",
326
+ " for f in os.listdir(cls_folder):\n",
327
+ " self.files.append(os.path.join(cls_folder, f))\n",
328
+ " self.labels.append(label)\n",
329
+ "\n",
330
+ " def __len__(self): return len(self.files)\n",
331
+ "\n",
332
+ " def __getitem__(self, idx):\n",
333
+ " img = Image.open(self.files[idx]).convert(\"RGB\")\n",
334
+ " if self.transform: img = self.transform(img)\n",
335
+ " return img, self.labels[idx]\n",
336
+ "\n",
337
+ "transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor()])\n",
338
+ "\n",
339
+ "train_set = CustomDataset(\"dataset/train\", transform=transform)\n",
340
+ "train_loader = DataLoader(train_set, batch_size=16, shuffle=True)"
341
+ ]
342
+ },
343
+ {
344
+ "cell_type": "raw",
345
+ "id": "75dced80-cabf-4366-afbb-01ed9a100514",
346
+ "metadata": {},
347
+ "source": [
348
+ "dataset/\n",
349
+ "├── cats/\n",
350
+ "│ ├── cat1.jpg\n",
351
+ "│ ├── cat2.jpg\n",
352
+ "├── dogs/\n",
353
+ "│ ├── dog1.jpg\n",
354
+ "│ ├── dog2.jpg"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "raw",
359
+ "id": "eab1bc9f-362d-46ca-a1fc-a7c10c397e12",
360
+ "metadata": {},
361
+ "source": [
362
+ "1. Keras: image_dataset_from_directory()"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": null,
368
+ "id": "f0f0915b-5a20-4dcd-87f5-5292a1f58094",
369
+ "metadata": {},
370
+ "outputs": [],
371
+ "source": [
372
+ "import tensorflow as tf\n",
373
+ "\n",
374
+ "dataset = tf.keras.utils.image_dataset_from_directory(\n",
375
+ " \"dataset\",\n",
376
+ " image_size=(224, 224),\n",
377
+ " batch_size=32,\n",
378
+ " validation_split=0.2,\n",
379
+ " subset=\"training\",\n",
380
+ " seed=123\n",
381
+ ")\n",
382
+ "\n",
383
+ "val_dataset = tf.keras.utils.image_dataset_from_directory(\n",
384
+ " \"dataset\",\n",
385
+ " image_size=(224, 224),\n",
386
+ " batch_size=32,\n",
387
+ " validation_split=0.2,\n",
388
+ " subset=\"validation\",\n",
389
+ " seed=123\n",
390
+ ")"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "raw",
395
+ "id": "4ba50372-bb83-4e3a-81a2-4689f93c7332",
396
+ "metadata": {},
397
+ "source": [
398
+ "2. Keras: load_img() + img_to_array()"
399
+ ]
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "execution_count": null,
404
+ "id": "94259a6e-4974-4fe6-bb94-d192b7f5629b",
405
+ "metadata": {},
406
+ "outputs": [],
407
+ "source": [
408
+ "from tensorflow.keras.preprocessing.image import load_img, img_to_array\n",
409
+ "import os\n",
410
+ "\n",
411
+ "cat_dir = \"dataset/cats\"\n",
412
+ "for f in os.listdir(cat_dir):\n",
413
+ " img = load_img(os.path.join(cat_dir, f), target_size=(224,224))\n",
414
+ " arr = img_to_array(img)\n",
415
+ " print(arr.shape)"
416
+ ]
417
+ },
418
+ {
419
+ "cell_type": "raw",
420
+ "id": "6fe2ee61-8371-4b92-9b16-5c4bd05cd787",
421
+ "metadata": {},
422
+ "source": [
423
+ "3. OpenCV"
424
+ ]
425
+ },
426
+ {
427
+ "cell_type": "code",
428
+ "execution_count": null,
429
+ "id": "db34c646-d85b-4337-8fb7-b828d48cc36e",
430
+ "metadata": {},
431
+ "outputs": [],
432
+ "source": [
433
+ "import cv2, os\n",
434
+ "\n",
435
+ "dog_dir = \"dataset/dogs\"\n",
436
+ "for f in os.listdir(dog_dir):\n",
437
+ " img = cv2.imread(os.path.join(dog_dir, f))\n",
438
+ " img = cv2.resize(img, (224,224))\n",
439
+ " print(img.shape)"
440
+ ]
441
+ },
442
+ {
443
+ "cell_type": "raw",
444
+ "id": "1f30d625-720a-47fb-9d30-cca3db879262",
445
+ "metadata": {},
446
+ "source": [
447
+ "4. PIL"
448
+ ]
449
+ },
450
+ {
451
+ "cell_type": "code",
452
+ "execution_count": null,
453
+ "id": "2a015757-5327-4ace-abb2-0cb42890ee8c",
454
+ "metadata": {},
455
+ "outputs": [],
456
+ "source": [
457
+ "from PIL import Image\n",
458
+ "import os\n",
459
+ "\n",
460
+ "cat_dir = \"dataset/cats\"\n",
461
+ "for f in os.listdir(cat_dir):\n",
462
+ " img = Image.open(os.path.join(cat_dir, f)).resize((224,224))\n",
463
+ " img.show()"
464
+ ]
465
+ },
466
+ {
467
+ "cell_type": "raw",
468
+ "id": "5505e51f-7d32-4050-a131-7dd3248c004c",
469
+ "metadata": {},
470
+ "source": [
471
+ "5. ImageDataGenerator"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "code",
476
+ "execution_count": null,
477
+ "id": "2810b7f4-4602-43ed-b2ff-0af52198e24f",
478
+ "metadata": {},
479
+ "outputs": [],
480
+ "source": [
481
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
482
+ "\n",
483
+ "datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)\n",
484
+ "\n",
485
+ "train_gen = datagen.flow_from_directory(\n",
486
+ " \"dataset\",\n",
487
+ " target_size=(224,224),\n",
488
+ " batch_size=32,\n",
489
+ " class_mode=\"categorical\",\n",
490
+ " subset=\"training\"\n",
491
+ ")\n",
492
+ "\n",
493
+ "val_gen = datagen.flow_from_directory(\n",
494
+ " \"dataset\",\n",
495
+ " target_size=(224,224),\n",
496
+ " batch_size=32,\n",
497
+ " class_mode=\"categorical\",\n",
498
+ " subset=\"validation\"\n",
499
+ ")"
500
+ ]
501
+ },
502
+ {
503
+ "cell_type": "raw",
504
+ "id": "75c3bab0-205b-46f8-a323-0547294c2945",
505
+ "metadata": {},
506
+ "source": [
507
+ "6. PyTorch (ImageFolder + DataLoader)"
508
+ ]
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": null,
513
+ "id": "b16988fd-7a73-4877-a71d-b646039d13ea",
514
+ "metadata": {},
515
+ "outputs": [],
516
+ "source": [
517
+ "from torchvision import datasets, transforms\n",
518
+ "from torch.utils.data import random_split, DataLoader\n",
519
+ "\n",
520
+ "transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor()])\n",
521
+ "dataset = datasets.ImageFolder(\"dataset\", transform=transform)\n",
522
+ "\n",
523
+ "train_size = int(0.8 * len(dataset))\n",
524
+ "val_size = len(dataset) - train_size\n",
525
+ "train_data, val_data = random_split(dataset, [train_size, val_size])\n",
526
+ "\n",
527
+ "train_loader = DataLoader(train_data, batch_size=32, shuffle=True)\n",
528
+ "val_loader = DataLoader(val_data, batch_size=32, shuffle=False)"
529
+ ]
530
+ }
531
+ ],
532
+ "metadata": {
533
+ "kernelspec": {
534
+ "display_name": "Python 3 (ipykernel)",
535
+ "language": "python",
536
+ "name": "python3"
537
+ },
538
+ "language_info": {
539
+ "codemirror_mode": {
540
+ "name": "ipython",
541
+ "version": 3
542
+ },
543
+ "file_extension": ".py",
544
+ "mimetype": "text/x-python",
545
+ "name": "python",
546
+ "nbconvert_exporter": "python",
547
+ "pygments_lexer": "ipython3",
548
+ "version": "3.12.4"
549
+ }
550
+ },
551
+ "nbformat": 4,
552
+ "nbformat_minor": 5
553
+ }