noshot 1.0.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. noshot/data/ML TS XAI/TS/bill-charge.ipynb +239 -0
  2. noshot/data/ML TS XAI/{XAI/XAI 2/Exp-3 (EDA-loan).ipynb → TS/daily-min-temperatures.ipynb } +68 -50
  3. noshot/data/ML TS XAI/TS/data/bill-data.csv +21 -0
  4. noshot/data/ML TS XAI/TS/data/daily-min-temperatures.csv +3651 -0
  5. noshot/data/ML TS XAI/TS/data/monthly-sunspots.csv +2821 -0
  6. noshot/data/ML TS XAI/TS/monthly-sunspots.ipynb +241 -0
  7. {noshot-1.0.0.dist-info → noshot-2.0.0.dist-info}/METADATA +1 -1
  8. noshot-2.0.0.dist-info/RECORD +15 -0
  9. {noshot-1.0.0.dist-info → noshot-2.0.0.dist-info}/WHEEL +1 -1
  10. noshot/data/ML TS XAI/TS/10. Seasonal ARIMA Forecasting.ipynb +0 -246
  11. noshot/data/ML TS XAI/TS/11. Multivariate ARIMA Forecasting.ipynb +0 -228
  12. noshot/data/ML TS XAI/TS/6. ACF PACF.ipynb +0 -77
  13. noshot/data/ML TS XAI/TS/7. Differencing.ipynb +0 -167
  14. noshot/data/ML TS XAI/TS/8. ARMA Forecasting.ipynb +0 -197
  15. noshot/data/ML TS XAI/TS/9. ARIMA Forecasting.ipynb +0 -220
  16. noshot/data/ML TS XAI/XAI/XAI 1/EDA2_chipsdatset.ipynb +0 -633
  17. noshot/data/ML TS XAI/XAI/XAI 1/EDA_IRISH_8thjan.ipynb +0 -326
  18. noshot/data/ML TS XAI/XAI/XAI 1/XAI_EX1 MODEL BIAS (FINAL).ipynb +0 -487
  19. noshot/data/ML TS XAI/XAI/XAI 1/complete_guide_to_eda_on_text_data.ipynb +0 -845
  20. noshot/data/ML TS XAI/XAI/XAI 1/deepchecksframeworks.ipynb +0 -100
  21. noshot/data/ML TS XAI/XAI/XAI 1/deepexplainers (mnist).ipynb +0 -90
  22. noshot/data/ML TS XAI/XAI/XAI 1/guidedbackpropagation.ipynb +0 -203
  23. noshot/data/ML TS XAI/XAI/XAI 1/updated_image_EDA1_with_LRP.ipynb +0 -3998
  24. noshot/data/ML TS XAI/XAI/XAI 1/zebrastripes.ipynb +0 -271
  25. noshot/data/ML TS XAI/XAI/XAI 2/EXP_5.ipynb +0 -1545
  26. noshot/data/ML TS XAI/XAI/XAI 2/Exp-3 (EDA-movie).ipynb +0 -229
  27. noshot/data/ML TS XAI/XAI/XAI 2/Exp-4(Flower dataset).ipynb +0 -237
  28. noshot/data/ML TS XAI/XAI/XAI 2/Exp-4.ipynb +0 -241
  29. noshot/data/ML TS XAI/XAI/XAI 2/Exp_2.ipynb +0 -352
  30. noshot/data/ML TS XAI/XAI/XAI 2/Exp_7.ipynb +0 -110
  31. noshot/data/ML TS XAI/XAI/XAI 2/FeatureImportance_SensitivityAnalysis.ipynb +0 -708
  32. noshot-1.0.0.dist-info/RECORD +0 -32
  33. {noshot-1.0.0.dist-info → noshot-2.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  34. {noshot-1.0.0.dist-info → noshot-2.0.0.dist-info}/top_level.txt +0 -0
@@ -1,229 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "id": "ccf7df96",
7
- "metadata": {},
8
- "outputs": [],
9
- "source": [
10
- "import pandas as pd \n",
11
- "import numpy as np\n",
12
- "import matplotlib.pyplot as plt\n",
13
- "import seaborn as sns"
14
- ]
15
- },
16
- {
17
- "cell_type": "code",
18
- "execution_count": null,
19
- "id": "05fcafeb",
20
- "metadata": {},
21
- "outputs": [],
22
- "source": [
23
- "auto=pd.read_csv(\"IMDB-Movie-Data.csv\")\n",
24
- "#auto.reset_index(inplace=True)\n",
25
- "auto.head()"
26
- ]
27
- },
28
- {
29
- "cell_type": "code",
30
- "execution_count": null,
31
- "id": "774d8ecf",
32
- "metadata": {},
33
- "outputs": [],
34
- "source": [
35
- "auto.head()"
36
- ]
37
- },
38
- {
39
- "cell_type": "code",
40
- "execution_count": null,
41
- "id": "66bef117",
42
- "metadata": {},
43
- "outputs": [],
44
- "source": [
45
- "auto.tail()"
46
- ]
47
- },
48
- {
49
- "cell_type": "code",
50
- "execution_count": null,
51
- "id": "d12303ac",
52
- "metadata": {},
53
- "outputs": [],
54
- "source": [
55
- "auto.info()"
56
- ]
57
- },
58
- {
59
- "cell_type": "code",
60
- "execution_count": null,
61
- "id": "13ca9a52",
62
- "metadata": {},
63
- "outputs": [],
64
- "source": [
65
- "auto.nunique()"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": null,
71
- "id": "406b67f4",
72
- "metadata": {
73
- "scrolled": true
74
- },
75
- "outputs": [],
76
- "source": [
77
- "auto.isnull()"
78
- ]
79
- },
80
- {
81
- "cell_type": "code",
82
- "execution_count": null,
83
- "id": "eaa5f031",
84
- "metadata": {},
85
- "outputs": [],
86
- "source": [
87
- "auto.describe()"
88
- ]
89
- },
90
- {
91
- "cell_type": "code",
92
- "execution_count": null,
93
- "id": "214537c9",
94
- "metadata": {},
95
- "outputs": [],
96
- "source": [
97
- "data = auto.drop(['index'], axis = 1)\n",
98
- "data"
99
- ]
100
- },
101
- {
102
- "cell_type": "code",
103
- "execution_count": null,
104
- "id": "9e22f6d6",
105
- "metadata": {
106
- "scrolled": true
107
- },
108
- "outputs": [],
109
- "source": [
110
- "print(auto.Title.unique())"
111
- ]
112
- },
113
- {
114
- "cell_type": "code",
115
- "execution_count": null,
116
- "id": "95a9706b",
117
- "metadata": {},
118
- "outputs": [],
119
- "source": [
120
- "print(auto.Title.nunique())"
121
- ]
122
- },
123
- {
124
- "cell_type": "code",
125
- "execution_count": null,
126
- "id": "0c895e16",
127
- "metadata": {
128
- "scrolled": true
129
- },
130
- "outputs": [],
131
- "source": [
132
- "au=auto.head(50)\n",
133
- "au"
134
- ]
135
- },
136
- {
137
- "cell_type": "code",
138
- "execution_count": null,
139
- "id": "ff300c7a",
140
- "metadata": {
141
- "scrolled": true
142
- },
143
- "outputs": [],
144
- "source": [
145
- "plt.title(\"MOVIES REVENUES\")\n",
146
- "plt.xlabel(\"Title\")\n",
147
- "plt.ylabel(\"Revenue (Millions)\")\n",
148
- "plt.plot(au['Title'],au['Revenue (Millions)'],color=\"blue\")\n",
149
- "plt.show(10,20)"
150
- ]
151
- },
152
- {
153
- "cell_type": "code",
154
- "execution_count": null,
155
- "id": "f53a7f1d",
156
- "metadata": {},
157
- "outputs": [],
158
- "source": [
159
- "plt.title(\"MOVIES REVENUES\")\n",
160
- "plt.xlabel(\"Title\")\n",
161
- "plt.ylabel(\"Revenue (Millions)\")\n",
162
- "plt.bar(au['Title'],au['Revenue (Millions)'],color=\"green\")\n",
163
- "plt.show()\n"
164
- ]
165
- },
166
- {
167
- "cell_type": "code",
168
- "execution_count": null,
169
- "id": "f0625d78",
170
- "metadata": {},
171
- "outputs": [],
172
- "source": [
173
- "sns.pairplot(au)"
174
- ]
175
- },
176
- {
177
- "cell_type": "code",
178
- "execution_count": null,
179
- "id": "88d11bbd",
180
- "metadata": {},
181
- "outputs": [],
182
- "source": [
183
- "plt.hist(au['Revenue (Millions)'],bins=10,color='cyan')\n",
184
- "plt.show()"
185
- ]
186
- },
187
- {
188
- "cell_type": "code",
189
- "execution_count": null,
190
- "id": "bfc6edb5",
191
- "metadata": {},
192
- "outputs": [],
193
- "source": [
194
- "plt.figure(figsize=(12, 7))\n",
195
- "sns.heatmap(au.corr(), annot = True, vmin = -1, vmax = 1)\n",
196
- "plt.show()"
197
- ]
198
- },
199
- {
200
- "cell_type": "code",
201
- "execution_count": null,
202
- "id": "adc44fdc-323a-4b9f-9ec2-1227ef925241",
203
- "metadata": {},
204
- "outputs": [],
205
- "source": []
206
- }
207
- ],
208
- "metadata": {
209
- "kernelspec": {
210
- "display_name": "Python 3 (ipykernel)",
211
- "language": "python",
212
- "name": "python3"
213
- },
214
- "language_info": {
215
- "codemirror_mode": {
216
- "name": "ipython",
217
- "version": 3
218
- },
219
- "file_extension": ".py",
220
- "mimetype": "text/x-python",
221
- "name": "python",
222
- "nbconvert_exporter": "python",
223
- "pygments_lexer": "ipython3",
224
- "version": "3.12.4"
225
- }
226
- },
227
- "nbformat": 4,
228
- "nbformat_minor": 5
229
- }
@@ -1,237 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "id": "9c6bb3ac-e1f5-4b1e-8944-9fd6d95c5cf9",
7
- "metadata": {},
8
- "outputs": [],
9
- "source": [
10
- "import torch \n",
11
- "import torch.nn as nn \n",
12
- "import torch.optim as optim\n",
13
- "import torch.nn.functional as F\n",
14
- "import torchvision \n",
15
- "import torchvision.transforms as transforms\n",
16
- "import numpy as np\n",
17
- "import matplotlib.pyplot as plt \n",
18
- "from sklearn.tree import DecisionTreeClassifier\n",
19
- "from sklearn.tree import plot_tree"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": null,
25
- "id": "11e04672-e757-459c-99d8-b100bf4e8a2a",
26
- "metadata": {},
27
- "outputs": [],
28
- "source": [
29
- "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n",
30
- "trainset = torchvision.datasets.MNIST(root='126156009/17flowerdataset.zip', train=True, download=True, transform=transform)\n",
31
- "trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n",
32
- "\n",
33
- "testset = torchvision.datasets.MNIST(root='126156009/17flowerdataset.zip', train=False, download=True, transform=transform)\n",
34
- "testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": null,
40
- "id": "7107c76c-5ed7-43c9-a61d-4908cd06c062",
41
- "metadata": {},
42
- "outputs": [],
43
- "source": [
44
- "class CNN(nn.Module):\n",
45
- " def __init__(self):\n",
46
- " super(CNN, self).__init__()\n",
47
- " self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)\n",
48
- " self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)\n",
49
- " self.fc1 = nn.Linear(32 * 7 * 7, 128)\n",
50
- " self.fc2 = nn.Linear(128, 10)\n",
51
- "\n",
52
- " def forward(self, x):\n",
53
- " x = F.relu(self.conv1(x))\n",
54
- " x = F.max_pool2d(x, 2, 2)\n",
55
- " x = F.relu(self.conv2(x))\n",
56
- " x = F.max_pool2d(x, 2, 2)\n",
57
- " x = x.view(-1, 32 * 7 * 7)\n",
58
- " x = F.relu(self.fc1(x))\n",
59
- " x = self.fc2(x)\n",
60
- " return x\n",
61
- "\n",
62
- "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
63
- "model = CNN().to(device)\n",
64
- "criterion = nn.CrossEntropyLoss()\n",
65
- "optimizer = optim.Adam(model.parameters(), lr=0.001)"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": null,
71
- "id": "9085fd94-535d-4e3d-9937-f3c5049a426d",
72
- "metadata": {},
73
- "outputs": [],
74
- "source": [
75
- "def train(model, trainloader, criterion, optimizer, epochs=3):\n",
76
- " model.train()\n",
77
- " for epoch in range(epochs):\n",
78
- " running_loss = 0.0\n",
79
- " for images, labels in trainloader:\n",
80
- " images, labels = images.to(device), labels.to(device)\n",
81
- " optimizer.zero_grad()\n",
82
- " outputs = model(images) \n",
83
- " \n",
84
- " print(f\"Logits for first image in batch: {outputs[0]}\") \n",
85
- "\n",
86
- " loss = criterion(outputs, labels)\n",
87
- " loss.backward()\n",
88
- " optimizer.step()\n",
89
- " running_loss += loss.item()\n",
90
- " print(f\"Epoch {epoch + 1}, Loss: {running_loss / len(trainloader):.4f}\")\n",
91
- "\n",
92
- "train(model, trainloader, criterion, optimizer)\n"
93
- ]
94
- },
95
- {
96
- "cell_type": "code",
97
- "execution_count": null,
98
- "id": "27e125ef-9b2d-4768-b097-ff905f3cbcc8",
99
- "metadata": {},
100
- "outputs": [],
101
- "source": [
102
- "def extract_features(model, dataloader):\n",
103
- " model.eval()\n",
104
- " features, labels = [], []\n",
105
- " with torch.no_grad():\n",
106
- " for images, lbls in dataloader:\n",
107
- " images = images.to(device)\n",
108
- " outputs = model(images) \n",
109
- "\n",
110
- " print(f\"Logits for first image in batch: {outputs[0]}\") \n",
111
- "\n",
112
- " features.extend(outputs.cpu().numpy()) \n",
113
- " labels.extend(lbls.numpy())\n",
114
- " return np.array(features), np.array(labels)\n",
115
- "\n",
116
- "X_train, y_train = extract_features(model, trainloader)\n",
117
- "X_test, y_test = extract_features(model, testloader)"
118
- ]
119
- },
120
- {
121
- "cell_type": "code",
122
- "execution_count": null,
123
- "id": "6807872d-6d75-40fb-a168-21b110c0f168",
124
- "metadata": {},
125
- "outputs": [],
126
- "source": [
127
- "dt = DecisionTreeClassifier(max_depth=5) \n",
128
- "dt.fit(X_train, y_train)\n",
129
- "\n",
130
- "\n",
131
- "acc = dt.score(X_test, y_test)\n",
132
- "print(f\"Surrogate Model Accuracy: {acc * 100:.2f}%\")\n"
133
- ]
134
- },
135
- {
136
- "cell_type": "code",
137
- "execution_count": null,
138
- "id": "d57a082e-4d32-48ed-a0bc-d8bb1e6b2e08",
139
- "metadata": {},
140
- "outputs": [],
141
- "source": [
142
- "\n",
143
- "def visualize_surrogate_model(dt):\n",
144
- " plt.figure(figsize=(20, 16))\n",
145
- " plot_tree(dt, filled=True, feature_names=[f\"Feature {i}\" for i in range(X_train.shape[1])], class_names=[str(i) for i in range(10)], rounded=True,fontsize=14)\n",
146
- " plt.title(\"Surrogate Model - Decision Tree\")\n",
147
- " plt.show()\n",
148
- "\n",
149
- "visualize_surrogate_model(dt)\n",
150
- "\n",
151
- "\n",
152
- "def plot_feature_importance(dt, feature_names):\n",
153
- " feature_importances = dt.feature_importances_\n",
154
- " indices = np.argsort(feature_importances)[::-1]\n",
155
- "\n",
156
- " plt.figure(figsize=(10, 6))\n",
157
- " plt.title(\"Feature Importances (Surrogate Model)\")\n",
158
- " plt.barh(range(X_train.shape[1]), feature_importances[indices], align=\"center\")\n",
159
- " plt.yticks(range(X_train.shape[1]), [f\"Feature {i}\" for i in indices])\n",
160
- " plt.xlabel(\"Importance\")\n",
161
- " plt.show()\n",
162
- "\n",
163
- "plot_feature_importance(dt, [f\"Feature {i}\" for i in range(X_train.shape[1])])\n",
164
- "\n",
165
- "\n",
166
- "def visualize_feature_maps(model, input_image):\n",
167
- " model.eval()\n",
168
- " layers = [model.conv1, model.conv2]\n",
169
- " activations = []\n",
170
- "\n",
171
- " def save_activation(name):\n",
172
- " def hook(model, input, output):\n",
173
- " activations.append(output)\n",
174
- " return hook\n",
175
- "\n",
176
- " \n",
177
- " hooks = []\n",
178
- " for layer in layers:\n",
179
- " hooks.append(layer.register_forward_hook(save_activation(layer.__class__.__name__)))\n",
180
- "\n",
181
- " \n",
182
- " input_image = input_image.unsqueeze(0).to(device)\n",
183
- " model(input_image)\n",
184
- "\n",
185
- " \n",
186
- " for i, activation in enumerate(activations):\n",
187
- " activation = activation.squeeze(0).cpu().detach().numpy()\n",
188
- " num_filters = activation.shape[0]\n",
189
- "\n",
190
- " \n",
191
- " fig, axes = plt.subplots(1, num_filters, figsize=(15, 8))\n",
192
- " for j in range(num_filters):\n",
193
- " axes[j].imshow(activation[j], cmap='gray')\n",
194
- " axes[j].axis('off')\n",
195
- " axes[j].set_title(f\"Filter {j + 1}\")\n",
196
- " plt.show()\n",
197
- "\n",
198
- " \n",
199
- " for hook in hooks:\n",
200
- " hook.remove()\n",
201
- "\n",
202
- "\n",
203
- "sample_image, sample_label = testset[0]\n",
204
- "visualize_feature_maps(model, sample_image)"
205
- ]
206
- },
207
- {
208
- "cell_type": "code",
209
- "execution_count": null,
210
- "id": "f6f63b48-11b5-4c46-adf8-9c504d6256c8",
211
- "metadata": {},
212
- "outputs": [],
213
- "source": []
214
- }
215
- ],
216
- "metadata": {
217
- "kernelspec": {
218
- "display_name": "Python 3 (ipykernel)",
219
- "language": "python",
220
- "name": "python3"
221
- },
222
- "language_info": {
223
- "codemirror_mode": {
224
- "name": "ipython",
225
- "version": 3
226
- },
227
- "file_extension": ".py",
228
- "mimetype": "text/x-python",
229
- "name": "python",
230
- "nbconvert_exporter": "python",
231
- "pygments_lexer": "ipython3",
232
- "version": "3.12.4"
233
- }
234
- },
235
- "nbformat": 4,
236
- "nbformat_minor": 5
237
- }
@@ -1,241 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "id": "a956d239-b4b0-4e65-ac1a-d8047cfc883f",
7
- "metadata": {},
8
- "outputs": [],
9
- "source": [
10
- "import torch \n",
11
- "import torch.nn as nn \n",
12
- "import torch.optim as optim\n",
13
- "import torch.nn.functional as F\n",
14
- "import torchvision \n",
15
- "import torchvision.transforms as transforms\n",
16
- "import numpy as np\n",
17
- "import matplotlib.pyplot as plt \n",
18
- "from sklearn.tree import DecisionTreeClassifier\n",
19
- "from sklearn.tree import plot_tree"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": null,
25
- "id": "7a0d4f98-bfb5-40a0-9c0e-8843361cb7a7",
26
- "metadata": {},
27
- "outputs": [],
28
- "source": [
29
- "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n",
30
- "trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
31
- "trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n",
32
- "\n",
33
- "testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
34
- "testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": null,
40
- "id": "7c323e6e-58bf-498a-bc60-1e02a1aea4ef",
41
- "metadata": {},
42
- "outputs": [],
43
- "source": [
44
- "class CNN(nn.Module):\n",
45
- " def __init__(self):\n",
46
- " super(CNN, self).__init__()\n",
47
- " self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)\n",
48
- " self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)\n",
49
- " self.fc1 = nn.Linear(32 * 7 * 7, 128)\n",
50
- " self.fc2 = nn.Linear(128, 10)\n",
51
- "\n",
52
- " def forward(self, x):\n",
53
- " x = F.relu(self.conv1(x))\n",
54
- " x = F.max_pool2d(x, 2, 2)\n",
55
- " x = F.relu(self.conv2(x))\n",
56
- " x = F.max_pool2d(x, 2, 2)\n",
57
- " x = x.view(-1, 32 * 7 * 7)\n",
58
- " x = F.relu(self.fc1(x))\n",
59
- " x = self.fc2(x)\n",
60
- " return x\n",
61
- "\n",
62
- "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
63
- "model = CNN().to(device)\n",
64
- "criterion = nn.CrossEntropyLoss()\n",
65
- "optimizer = optim.Adam(model.parameters(), lr=0.001)"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": null,
71
- "id": "57e73bc1-cbfb-46ca-b5d2-4798b8b59a89",
72
- "metadata": {},
73
- "outputs": [],
74
- "source": [
75
- "def train(model, trainloader, criterion, optimizer, epochs=3):\n",
76
- " model.train()\n",
77
- " for epoch in range(epochs):\n",
78
- " running_loss = 0.0\n",
79
- " for images, labels in trainloader:\n",
80
- " images, labels = images.to(device), labels.to(device)\n",
81
- " optimizer.zero_grad()\n",
82
- " outputs = model(images) # CNN outputs (logits)\n",
83
- "\n",
84
- " # Print CNN output (logits) for the first image in the batch\n",
85
- " print(f\"Logits for first image in batch: {outputs[0]}\") # First image in the batch\n",
86
- "\n",
87
- " loss = criterion(outputs, labels)\n",
88
- " loss.backward()\n",
89
- " optimizer.step()\n",
90
- " running_loss += loss.item()\n",
91
- " print(f\"Epoch {epoch + 1}, Loss: {running_loss / len(trainloader):.4f}\")\n",
92
- "\n",
93
- "train(model, trainloader, criterion, optimizer)\n"
94
- ]
95
- },
96
- {
97
- "cell_type": "code",
98
- "execution_count": null,
99
- "id": "93b6189f-889e-419e-95d6-9154c2acca26",
100
- "metadata": {},
101
- "outputs": [],
102
- "source": [
103
- "def extract_features(model, dataloader):\n",
104
- " model.eval()\n",
105
- " features, labels = [], []\n",
106
- " with torch.no_grad():\n",
107
- " for images, lbls in dataloader:\n",
108
- " images = images.to(device)\n",
109
- " outputs = model(images) # CNN outputs (logits)\n",
110
- "\n",
111
- " # Print CNN outputs (logits) for the first image in each batch\n",
112
- " print(f\"Logits for first image in batch: {outputs[0]}\") # This will print the logits for the first image\n",
113
- "\n",
114
- " features.extend(outputs.cpu().numpy()) # Extract CNN outputs as features\n",
115
- " labels.extend(lbls.numpy())\n",
116
- " return np.array(features), np.array(labels)\n",
117
- "\n",
118
- "X_train, y_train = extract_features(model, trainloader)\n",
119
- "X_test, y_test = extract_features(model, testloader)"
120
- ]
121
- },
122
- {
123
- "cell_type": "code",
124
- "execution_count": null,
125
- "id": "44442993-376b-41c0-9dc9-f55cfdc0268c",
126
- "metadata": {},
127
- "outputs": [],
128
- "source": [
129
- "dt = DecisionTreeClassifier(max_depth=5) \n",
130
- "dt.fit(X_train, y_train)\n",
131
- "\n",
132
- "\n",
133
- "acc = dt.score(X_test, y_test)\n",
134
- "print(f\"Surrogate Model Accuracy: {acc * 100:.2f}%\")\n"
135
- ]
136
- },
137
- {
138
- "cell_type": "code",
139
- "execution_count": null,
140
- "id": "79bb7a9f-b4ba-4bc1-95ef-ecd1cf525b45",
141
- "metadata": {},
142
- "outputs": [],
143
- "source": [
144
- "\n",
145
- "def visualize_surrogate_model(dt):\n",
146
- " plt.figure(figsize=(12, 8))\n",
147
- " plot_tree(dt, filled=True, feature_names=[f\"Feature {i}\" for i in range(X_train.shape[1])], class_names=[str(i) for i in range(10)], rounded=True)\n",
148
- " plt.title(\"Surrogate Model - Decision Tree\")\n",
149
- " plt.show()\n",
150
- "\n",
151
- "visualize_surrogate_model(dt)\n",
152
- "\n",
153
- "\n",
154
- "def plot_feature_importance(dt, feature_names):\n",
155
- " feature_importances = dt.feature_importances_\n",
156
- " indices = np.argsort(feature_importances)[::-1]\n",
157
- "\n",
158
- " plt.figure(figsize=(10, 6))\n",
159
- " plt.title(\"Feature Importances (Surrogate Model)\")\n",
160
- " plt.barh(range(X_train.shape[1]), feature_importances[indices], align=\"center\")\n",
161
- " plt.yticks(range(X_train.shape[1]), [f\"Feature {i}\" for i in indices])\n",
162
- " plt.xlabel(\"Importance\")\n",
163
- " plt.show()\n",
164
- "\n",
165
- "\n",
166
- "plot_feature_importance(dt, [f\"Feature {i}\" for i in range(X_train.shape[1])])\n",
167
- "\n",
168
- "\n",
169
- "\n",
170
- "def visualize_feature_maps(model, input_image):\n",
171
- " model.eval()\n",
172
- " layers = [model.conv1, model.conv2]\n",
173
- " activations = []\n",
174
- "\n",
175
- " def save_activation(name):\n",
176
- " def hook(model, input, output):\n",
177
- " activations.append(output)\n",
178
- " return hook\n",
179
- "\n",
180
- " \n",
181
- " hooks = []\n",
182
- " for layer in layers:\n",
183
- " hooks.append(layer.register_forward_hook(save_activation(layer.__class__.__name__)))\n",
184
- "\n",
185
- " \n",
186
- " input_image = input_image.unsqueeze(0).to(device)\n",
187
- " model(input_image)\n",
188
- "\n",
189
- " \n",
190
- " for i, activation in enumerate(activations):\n",
191
- " activation = activation.squeeze(0).cpu().detach().numpy()\n",
192
- " num_filters = activation.shape[0]\n",
193
- "\n",
194
- " \n",
195
- " fig, axes = plt.subplots(1, num_filters, figsize=(15, 8))\n",
196
- " for j in range(num_filters):\n",
197
- " axes[j].imshow(activation[j], cmap='gray')\n",
198
- " axes[j].axis('off')\n",
199
- " axes[j].set_title(f\"Filter {j + 1}\")\n",
200
- " plt.show()\n",
201
- "\n",
202
- " \n",
203
- " for hook in hooks:\n",
204
- " hook.remove()\n",
205
- "\n",
206
- "\n",
207
- "sample_image, sample_label = testset[0]\n",
208
- "visualize_feature_maps(model, sample_image)"
209
- ]
210
- },
211
- {
212
- "cell_type": "code",
213
- "execution_count": null,
214
- "id": "d572de18-1df8-41b8-8b24-618790d7d0aa",
215
- "metadata": {},
216
- "outputs": [],
217
- "source": []
218
- }
219
- ],
220
- "metadata": {
221
- "kernelspec": {
222
- "display_name": "Python 3 (ipykernel)",
223
- "language": "python",
224
- "name": "python3"
225
- },
226
- "language_info": {
227
- "codemirror_mode": {
228
- "name": "ipython",
229
- "version": 3
230
- },
231
- "file_extension": ".py",
232
- "mimetype": "text/x-python",
233
- "name": "python",
234
- "nbconvert_exporter": "python",
235
- "pygments_lexer": "ipython3",
236
- "version": "3.12.4"
237
- }
238
- },
239
- "nbformat": 4,
240
- "nbformat_minor": 5
241
- }