noshot 12.0.0__py3-none-any.whl → 14.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex1/input.txt +1 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex1/mapper.py +6 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex1/reducer.py +22 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex2/Weatherdataset.csv +200 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex2/mapper.py +20 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex2/reducer.py +32 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/BF_Map.py +11 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/BF_Red.py +30 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/bloom_filter.py +71 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/bloom_filter_mapper.py +71 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/bloom_filter_reducer.py +71 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex3/weblog.csv +100 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex4/FMA_mapper.py +14 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex4/FMA_reducer.py +14 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Ex4/Tweets.csv +92 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab/Instructions.txt +56 -0
- noshot/data/DLE FSD BDA/BDA/BDA Lab.iso +0 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.1 DNN (Pytorch).ipynb +164 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.2 DNN (Tensorflow).ipynb +94 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/1.3 DNN (Image Classification).ipynb +134 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.1 DNN vs CNN.ipynb +127 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/2.2 DNN vs CNN.ipynb +123 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/3 Bounding Boxes.ipynb +109 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/4. FCNN (Image Segmentation).ipynb +108 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Lab Excercise (Training DNN).ipynb +646 -0
- noshot/data/DLE FSD BDA/DLE/DLE 2 (tim stan s)/Load-Images.ipynb +553 -0
- noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex1.ipynb +216 -0
- noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex2.ipynb +195 -0
- noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex3.ipynb +427 -0
- noshot/data/DLE FSD BDA/DLE/DLE 3 (sonic boy)/Ex4.ipynb +186 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/DNN Ex No 1.ipynb +398 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Ex No 1 Build in dataset.ipynb +171 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/Exp1-Short-DL_ANN_ImageClassification.ipynb +401 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp01/OR GATE .ipynb +8511 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp02/Exp2-Short-DL_CNN_ImageClassification.ipynb +737 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp03/DL-Ex3-RNN.ipynb +591 -0
- noshot/data/DLE FSD BDA/DLE/DLE 4 (senior)/Exp04/Ex no 4.ipynb +551 -0
- {noshot-12.0.0.dist-info → noshot-14.0.0.dist-info}/METADATA +1 -1
- noshot-14.0.0.dist-info/RECORD +50 -0
- noshot-12.0.0.dist-info/RECORD +0 -13
- /noshot/data/DLE FSD BDA/DLE/{1. DNN (Image Classification).ipynb → DLE 1 (Json)/1. DNN (Image Classification).ipynb} +0 -0
- /noshot/data/DLE FSD BDA/DLE/{2. DNN vs CNN.ipynb → DLE 1 (Json)/2. DNN vs CNN.ipynb} +0 -0
- /noshot/data/DLE FSD BDA/DLE/{3. CNN (Object Detecrion).ipynb → DLE 1 (Json)/3. CNN (Object Detecrion).ipynb} +0 -0
- /noshot/data/DLE FSD BDA/DLE/{4. FCN (Image Segmentaion).ipynb → DLE 1 (Json)/4. FCN (Image Segmentaion).ipynb} +0 -0
- {noshot-12.0.0.dist-info → noshot-14.0.0.dist-info}/WHEEL +0 -0
- {noshot-12.0.0.dist-info → noshot-14.0.0.dist-info}/licenses/LICENSE.txt +0 -0
- {noshot-12.0.0.dist-info → noshot-14.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,123 @@
|
|
1
|
+
{
|
2
|
+
"cells": [
|
3
|
+
{
|
4
|
+
"cell_type": "code",
|
5
|
+
"execution_count": null,
|
6
|
+
"id": "e859c172-a86c-4cdf-93e3-7c24a9dd3292",
|
7
|
+
"metadata": {},
|
8
|
+
"outputs": [],
|
9
|
+
"source": [
|
10
|
+
"import tensorflow as tf\n",
|
11
|
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
12
|
+
"from tensorflow.keras.models import Sequential\n",
|
13
|
+
"from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n",
|
14
|
+
"from tensorflow.keras.optimizers import Adam\n",
|
15
|
+
"import matplotlib.pyplot as plt\n",
|
16
|
+
"import pandas as pd\n",
|
17
|
+
"\n",
|
18
|
+
"data_dir = \"Bean_Dataset/Bean_Dataset\"\n",
|
19
|
+
"img_height, img_width = 128, 128\n",
|
20
|
+
"batch_size = 32\n",
|
21
|
+
"\n",
|
22
|
+
"datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)\n",
|
23
|
+
"\n",
|
24
|
+
"train_gen = datagen.flow_from_directory(\n",
|
25
|
+
" data_dir, target_size=(img_height, img_width),\n",
|
26
|
+
" batch_size=batch_size, class_mode='categorical', subset='training'\n",
|
27
|
+
")\n",
|
28
|
+
"\n",
|
29
|
+
"val_gen = datagen.flow_from_directory(\n",
|
30
|
+
" data_dir, target_size=(img_height, img_width),\n",
|
31
|
+
" batch_size=batch_size, class_mode='categorical', subset='validation'\n",
|
32
|
+
")\n",
|
33
|
+
"\n",
|
34
|
+
"num_classes = len(train_gen.class_indices)\n",
|
35
|
+
"\n",
|
36
|
+
"dnn_model = Sequential([\n",
|
37
|
+
" Flatten(input_shape=(img_height, img_width, 3)),\n",
|
38
|
+
" Dense(512, activation='relu'),\n",
|
39
|
+
" Dropout(0.5),\n",
|
40
|
+
" Dense(256, activation='relu'),\n",
|
41
|
+
" Dropout(0.5),\n",
|
42
|
+
" Dense(num_classes, activation='softmax')\n",
|
43
|
+
"])\n",
|
44
|
+
"dnn_model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])\n",
|
45
|
+
"\n",
|
46
|
+
"cnn_model = Sequential([\n",
|
47
|
+
" Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)),\n",
|
48
|
+
" MaxPooling2D(2,2),\n",
|
49
|
+
" Conv2D(64, (3,3), activation='relu'),\n",
|
50
|
+
" MaxPooling2D(2,2),\n",
|
51
|
+
" Conv2D(128, (3,3), activation='relu'),\n",
|
52
|
+
" MaxPooling2D(2,2),\n",
|
53
|
+
" Flatten(),\n",
|
54
|
+
" Dense(512, activation='relu'),\n",
|
55
|
+
" Dropout(0.5),\n",
|
56
|
+
" Dense(num_classes, activation='softmax')\n",
|
57
|
+
"])\n",
|
58
|
+
"cnn_model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])\n",
|
59
|
+
"\n",
|
60
|
+
"dnn_history = dnn_model.fit(train_gen, epochs=20, validation_data=val_gen)\n",
|
61
|
+
"cnn_history = cnn_model.fit(train_gen, epochs=20, validation_data=val_gen)\n",
|
62
|
+
"\n",
|
63
|
+
"comparison = pd.DataFrame({\n",
|
64
|
+
" 'Model': ['DNN', 'CNN'],\n",
|
65
|
+
" 'Parameters': [dnn_model.count_params(), cnn_model.count_params()],\n",
|
66
|
+
" 'Final Train Accuracy': [\n",
|
67
|
+
" dnn_history.history['accuracy'][-1],\n",
|
68
|
+
" cnn_history.history['accuracy'][-1]\n",
|
69
|
+
" ],\n",
|
70
|
+
" 'Final Val Accuracy': [\n",
|
71
|
+
" dnn_history.history['val_accuracy'][-1],\n",
|
72
|
+
" cnn_history.history['val_accuracy'][-1]\n",
|
73
|
+
" ]\n",
|
74
|
+
"})\n",
|
75
|
+
"\n",
|
76
|
+
"print(comparison)\n",
|
77
|
+
"\n",
|
78
|
+
"plt.figure(figsize=(12,5))\n",
|
79
|
+
"plt.subplot(1,2,1)\n",
|
80
|
+
"plt.plot(dnn_history.history['accuracy'], label='DNN Train')\n",
|
81
|
+
"plt.plot(dnn_history.history['val_accuracy'], label='DNN Val')\n",
|
82
|
+
"plt.plot(cnn_history.history['accuracy'], label='CNN Train')\n",
|
83
|
+
"plt.plot(cnn_history.history['val_accuracy'], label='CNN Val')\n",
|
84
|
+
"plt.title('Accuracy')\n",
|
85
|
+
"plt.xlabel('Epoch')\n",
|
86
|
+
"plt.ylabel('Accuracy')\n",
|
87
|
+
"plt.legend()\n",
|
88
|
+
"\n",
|
89
|
+
"plt.subplot(1,2,2)\n",
|
90
|
+
"plt.plot(dnn_history.history['loss'], label='DNN Train')\n",
|
91
|
+
"plt.plot(dnn_history.history['val_loss'], label='DNN Val')\n",
|
92
|
+
"plt.plot(cnn_history.history['loss'], label='CNN Train')\n",
|
93
|
+
"plt.plot(cnn_history.history['val_loss'], label='CNN Val')\n",
|
94
|
+
"plt.title('Loss')\n",
|
95
|
+
"plt.xlabel('Epoch')\n",
|
96
|
+
"plt.ylabel('Loss')\n",
|
97
|
+
"plt.legend()\n",
|
98
|
+
"plt.show()"
|
99
|
+
]
|
100
|
+
}
|
101
|
+
],
|
102
|
+
"metadata": {
|
103
|
+
"kernelspec": {
|
104
|
+
"display_name": "Python 3 (ipykernel)",
|
105
|
+
"language": "python",
|
106
|
+
"name": "python3"
|
107
|
+
},
|
108
|
+
"language_info": {
|
109
|
+
"codemirror_mode": {
|
110
|
+
"name": "ipython",
|
111
|
+
"version": 3
|
112
|
+
},
|
113
|
+
"file_extension": ".py",
|
114
|
+
"mimetype": "text/x-python",
|
115
|
+
"name": "python",
|
116
|
+
"nbconvert_exporter": "python",
|
117
|
+
"pygments_lexer": "ipython3",
|
118
|
+
"version": "3.12.4"
|
119
|
+
}
|
120
|
+
},
|
121
|
+
"nbformat": 4,
|
122
|
+
"nbformat_minor": 5
|
123
|
+
}
|
@@ -0,0 +1,109 @@
|
|
1
|
+
{
|
2
|
+
"cells": [
|
3
|
+
{
|
4
|
+
"cell_type": "code",
|
5
|
+
"execution_count": null,
|
6
|
+
"id": "1c5ce2ae-0abb-45b4-a94a-7548d9af6b6a",
|
7
|
+
"metadata": {},
|
8
|
+
"outputs": [],
|
9
|
+
"source": [
|
10
|
+
"import torch\n",
|
11
|
+
"import torchvision\n",
|
12
|
+
"from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n",
|
13
|
+
"from torchvision.datasets import VOCDetection\n",
|
14
|
+
"from torch.utils.data import DataLoader\n",
|
15
|
+
"import torchvision.transforms as T\n",
|
16
|
+
"import matplotlib.pyplot as plt\n",
|
17
|
+
"import matplotlib.patches as patches\n",
|
18
|
+
"\n",
|
19
|
+
"classes = [\"__background__\", \"apple\", \"banana\", \"orange\"]\n",
|
20
|
+
"\n",
|
21
|
+
"def get_model(num_classes):\n",
|
22
|
+
" model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=\"DEFAULT\")\n",
|
23
|
+
" in_features = model.roi_heads.box_predictor.cls_score.in_features\n",
|
24
|
+
" model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n",
|
25
|
+
" return model\n",
|
26
|
+
"\n",
|
27
|
+
"def collate_fn(batch):\n",
|
28
|
+
" return tuple(zip(*batch))\n",
|
29
|
+
"\n",
|
30
|
+
"transform = T.Compose([T.ToTensor()])\n",
|
31
|
+
"\n",
|
32
|
+
"def target_transform(target):\n",
|
33
|
+
" objs = target[\"annotation\"][\"object\"]\n",
|
34
|
+
" if not isinstance(objs, list):\n",
|
35
|
+
" objs = [objs]\n",
|
36
|
+
" boxes = []\n",
|
37
|
+
" labels = []\n",
|
38
|
+
" for obj in objs:\n",
|
39
|
+
" name = obj[\"name\"]\n",
|
40
|
+
" bbox = obj[\"bndbox\"]\n",
|
41
|
+
" xmin, ymin, xmax, ymax = int(bbox[\"xmin\"]), int(bbox[\"ymin\"]), int(bbox[\"xmax\"]), int(bbox[\"ymax\"])\n",
|
42
|
+
" boxes.append([xmin, ymin, xmax, ymax])\n",
|
43
|
+
" labels.append(classes.index(name) if name in classes else 0)\n",
|
44
|
+
" return {\"boxes\": torch.tensor(boxes, dtype=torch.float32), \"labels\": torch.tensor(labels, dtype=torch.int64)}\n",
|
45
|
+
"\n",
|
46
|
+
"train_dataset = VOCDetection(\"./\", year=\"2012\", image_set=\"train\", download=True, transform=transform, target_transform=target_transform)\n",
|
47
|
+
"train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True, collate_fn=collate_fn)\n",
|
48
|
+
"\n",
|
49
|
+
"model = get_model(len(classes))\n",
|
50
|
+
"device = torch.device(\"cpu\")\n",
|
51
|
+
"model.to(device)\n",
|
52
|
+
"\n",
|
53
|
+
"params = [p for p in model.parameters() if p.requires_grad]\n",
|
54
|
+
"optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n",
|
55
|
+
"\n",
|
56
|
+
"num_epochs = 1\n",
|
57
|
+
"for epoch in range(num_epochs):\n",
|
58
|
+
" model.train()\n",
|
59
|
+
" for images, targets in train_loader:\n",
|
60
|
+
" images = [img.to(device) for img in images]\n",
|
61
|
+
" targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n",
|
62
|
+
" loss_dict = model(images, targets)\n",
|
63
|
+
" losses = sum(loss for loss in loss_dict.values())\n",
|
64
|
+
" optimizer.zero_grad()\n",
|
65
|
+
" losses.backward()\n",
|
66
|
+
" optimizer.step()\n",
|
67
|
+
" print(f\"Epoch {epoch+1}, Loss: {losses.item():.4f}\")\n",
|
68
|
+
"\n",
|
69
|
+
"model.eval()\n",
|
70
|
+
"images, _ = next(iter(train_loader))\n",
|
71
|
+
"img = images[0].to(device)\n",
|
72
|
+
"with torch.no_grad():\n",
|
73
|
+
" prediction = model([img])\n",
|
74
|
+
"\n",
|
75
|
+
"img_np = img.permute(1, 2, 0).numpy()\n",
|
76
|
+
"fig, ax = plt.subplots(1)\n",
|
77
|
+
"ax.imshow(img_np)\n",
|
78
|
+
"for box, label, score in zip(prediction[0][\"boxes\"], prediction[0][\"labels\"], prediction[0][\"scores\"]):\n",
|
79
|
+
" if score > 0.5:\n",
|
80
|
+
" xmin, ymin, xmax, ymax = box\n",
|
81
|
+
" rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=2, edgecolor='r', facecolor='none')\n",
|
82
|
+
" ax.add_patch(rect)\n",
|
83
|
+
" ax.text(xmin, ymin, classes[label], bbox=dict(facecolor='yellow', alpha=0.5))\n",
|
84
|
+
"plt.show()\n"
|
85
|
+
]
|
86
|
+
}
|
87
|
+
],
|
88
|
+
"metadata": {
|
89
|
+
"kernelspec": {
|
90
|
+
"display_name": "Python 3 (ipykernel)",
|
91
|
+
"language": "python",
|
92
|
+
"name": "python3"
|
93
|
+
},
|
94
|
+
"language_info": {
|
95
|
+
"codemirror_mode": {
|
96
|
+
"name": "ipython",
|
97
|
+
"version": 3
|
98
|
+
},
|
99
|
+
"file_extension": ".py",
|
100
|
+
"mimetype": "text/x-python",
|
101
|
+
"name": "python",
|
102
|
+
"nbconvert_exporter": "python",
|
103
|
+
"pygments_lexer": "ipython3",
|
104
|
+
"version": "3.12.4"
|
105
|
+
}
|
106
|
+
},
|
107
|
+
"nbformat": 4,
|
108
|
+
"nbformat_minor": 5
|
109
|
+
}
|
@@ -0,0 +1,108 @@
|
|
1
|
+
{
|
2
|
+
"cells": [
|
3
|
+
{
|
4
|
+
"cell_type": "code",
|
5
|
+
"execution_count": null,
|
6
|
+
"id": "b417a942-8300-4101-9851-d65880f3bbb4",
|
7
|
+
"metadata": {},
|
8
|
+
"outputs": [],
|
9
|
+
"source": [
|
10
|
+
"import tensorflow as tf\n",
|
11
|
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
12
|
+
"from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input\n",
|
13
|
+
"from tensorflow.keras.models import Model\n",
|
14
|
+
"import matplotlib.pyplot as plt\n",
|
15
|
+
"import numpy as np\n",
|
16
|
+
"\n",
|
17
|
+
"image_dir = \"image path\"\n",
|
18
|
+
"mask_dir = \"mask path\"\n",
|
19
|
+
"\n",
|
20
|
+
"image_datagen = ImageDataGenerator(rescale=1./255)\n",
|
21
|
+
"mask_datagen = ImageDataGenerator(rescale=1./255)\n",
|
22
|
+
"\n",
|
23
|
+
"image_generator = image_datagen.flow_from_directory(\n",
|
24
|
+
" image_dir,\n",
|
25
|
+
" class_mode=None,\n",
|
26
|
+
" color_mode='rgb',\n",
|
27
|
+
" target_size=(128, 128),\n",
|
28
|
+
" batch_size=32,\n",
|
29
|
+
" seed=42\n",
|
30
|
+
")\n",
|
31
|
+
"\n",
|
32
|
+
"mask_generator = mask_datagen.flow_from_directory(\n",
|
33
|
+
" mask_dir,\n",
|
34
|
+
" class_mode=None,\n",
|
35
|
+
" color_mode='grayscale',\n",
|
36
|
+
" target_size=(128, 128),\n",
|
37
|
+
" batch_size=32,\n",
|
38
|
+
" seed=42\n",
|
39
|
+
")\n",
|
40
|
+
"\n",
|
41
|
+
"def build_fcnn():\n",
|
42
|
+
" inputs = Input((128, 128, 3))\n",
|
43
|
+
" conv1 = Conv2D(128, (3, 3), activation='relu', padding='same')(inputs)\n",
|
44
|
+
" pool1 = MaxPooling2D((2, 2))(conv1)\n",
|
45
|
+
"\n",
|
46
|
+
" conv2 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool1)\n",
|
47
|
+
" pool2 = MaxPooling2D((2, 2))(conv2)\n",
|
48
|
+
"\n",
|
49
|
+
" conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)\n",
|
50
|
+
" up1 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv3)\n",
|
51
|
+
"\n",
|
52
|
+
" conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)\n",
|
53
|
+
" up2 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv4)\n",
|
54
|
+
"\n",
|
55
|
+
" outputs = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(up2)\n",
|
56
|
+
" return Model(inputs, outputs)\n",
|
57
|
+
"\n",
|
58
|
+
"model = build_fcnn()\n",
|
59
|
+
"model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
|
60
|
+
"model.summary()\n",
|
61
|
+
"\n",
|
62
|
+
"def combined_generator(image_gen, mask_gen):\n",
|
63
|
+
" while True:\n",
|
64
|
+
" img_batch = next(image_gen)\n",
|
65
|
+
" mask_batch = next(mask_gen)\n",
|
66
|
+
" yield img_batch, mask_batch\n",
|
67
|
+
"\n",
|
68
|
+
"train_generator = combined_generator(image_generator, mask_generator)\n",
|
69
|
+
"\n",
|
70
|
+
"model.fit(train_generator, steps_per_epoch=len(image_generator), epochs=50)\n",
|
71
|
+
"\n",
|
72
|
+
"sample_image = image_generator[0][0][0]\n",
|
73
|
+
"predicted_mask = model.predict(np.expand_dims(sample_image, axis=0))[0]\n",
|
74
|
+
"\n",
|
75
|
+
"plt.figure(figsize=(10, 5))\n",
|
76
|
+
"plt.subplot(1, 2, 1)\n",
|
77
|
+
"plt.title(\"Original Image\")\n",
|
78
|
+
"plt.imshow(sample_image)\n",
|
79
|
+
"\n",
|
80
|
+
"plt.subplot(1, 2, 2)\n",
|
81
|
+
"plt.title(\"Predicted Mask\")\n",
|
82
|
+
"plt.imshow(predicted_mask.squeeze(), cmap='gray')\n",
|
83
|
+
"plt.show()"
|
84
|
+
]
|
85
|
+
}
|
86
|
+
],
|
87
|
+
"metadata": {
|
88
|
+
"kernelspec": {
|
89
|
+
"display_name": "Python 3 (ipykernel)",
|
90
|
+
"language": "python",
|
91
|
+
"name": "python3"
|
92
|
+
},
|
93
|
+
"language_info": {
|
94
|
+
"codemirror_mode": {
|
95
|
+
"name": "ipython",
|
96
|
+
"version": 3
|
97
|
+
},
|
98
|
+
"file_extension": ".py",
|
99
|
+
"mimetype": "text/x-python",
|
100
|
+
"name": "python",
|
101
|
+
"nbconvert_exporter": "python",
|
102
|
+
"pygments_lexer": "ipython3",
|
103
|
+
"version": "3.12.4"
|
104
|
+
}
|
105
|
+
},
|
106
|
+
"nbformat": 4,
|
107
|
+
"nbformat_minor": 5
|
108
|
+
}
|