noshot 5.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. noshot/data/ML TS XAI/ML/Rolls Royce AllinOne.ipynb +691 -0
  2. noshot/data/ML TS XAI/ML/Tamilan Code/1. EDA-PCA (Balance Scale Dataset).ipynb +147 -0
  3. noshot/data/ML TS XAI/ML/Tamilan Code/1. EDA-PCA (Rice Dataset).ipynb +181 -0
  4. noshot/data/ML TS XAI/ML/Tamilan Code/10. HMM Veterbi.ipynb +152 -0
  5. noshot/data/ML TS XAI/ML/Tamilan Code/2. KNN (Balance Scale Dataset).ipynb +117 -0
  6. noshot/data/ML TS XAI/ML/Tamilan Code/2. KNN (Iris Dataset).ipynb +156 -0
  7. noshot/data/ML TS XAI/ML/Tamilan Code/2. KNN (Sobar-72 Dataset).ipynb +215 -0
  8. noshot/data/ML TS XAI/ML/Tamilan Code/3. LDA (Balance Scale Dataset).ipynb +78 -0
  9. noshot/data/ML TS XAI/ML/Tamilan Code/3. LDA (NPHA Doctor Visits Dataset).ipynb +114 -0
  10. noshot/data/ML TS XAI/ML/Tamilan Code/4. Linear Regression (Machine Dataset).ipynb +115 -0
  11. noshot/data/ML TS XAI/ML/Tamilan Code/4. Linear Regression (Real Estate Dataset).ipynb +146 -0
  12. noshot/data/ML TS XAI/ML/Tamilan Code/5. Logistic Regression (Magic04 Dataset).ipynb +130 -0
  13. noshot/data/ML TS XAI/ML/Tamilan Code/5. Logistic Regression (Wine Dataset).ipynb +112 -0
  14. noshot/data/ML TS XAI/ML/Tamilan Code/6. Naive Bayes Classifier (Agaricus Lepiota Dataset).ipynb +118 -0
  15. noshot/data/ML TS XAI/ML/Tamilan Code/6. Naive Bayes Classifier (Wine Dataset).ipynb +89 -0
  16. noshot/data/ML TS XAI/ML/Tamilan Code/7. SVM (Rice Dataset).ipynb +120 -0
  17. noshot/data/ML TS XAI/ML/Tamilan Code/8. FeedForward NN (Sobar72 Dataset).ipynb +262 -0
  18. noshot/data/ML TS XAI/ML/Tamilan Code/9. CNN (Cifar10 Dataset).ipynb +156 -0
  19. noshot/data/ML TS XAI/ML/Whitefang Code/1. PCA.ipynb +162 -0
  20. noshot/data/ML TS XAI/ML/Whitefang Code/10. CNN.ipynb +100 -0
  21. noshot/data/ML TS XAI/ML/Whitefang Code/11. HMM.ipynb +336 -0
  22. noshot/data/ML TS XAI/ML/Whitefang Code/2. KNN.ipynb +149 -0
  23. noshot/data/ML TS XAI/ML/Whitefang Code/3. LDA.ipynb +132 -0
  24. noshot/data/ML TS XAI/ML/Whitefang Code/4. Linear Regression.ipynb +86 -0
  25. noshot/data/ML TS XAI/ML/Whitefang Code/5. Logistic Regression.ipynb +115 -0
  26. noshot/data/ML TS XAI/ML/Whitefang Code/6. Naive Bayes (Titanic).ipynb +196 -0
  27. noshot/data/ML TS XAI/ML/Whitefang Code/6. Naive Bayes (Wine).ipynb +98 -0
  28. noshot/data/ML TS XAI/ML/Whitefang Code/7. SVM Linear.ipynb +109 -0
  29. noshot/data/ML TS XAI/ML/Whitefang Code/8. SVM Non-Linear.ipynb +195 -0
  30. noshot/data/ML TS XAI/ML/Whitefang Code/9. FNN With Regularization.ipynb +189 -0
  31. noshot/data/ML TS XAI/ML/Whitefang Code/9. FNN Without Regularization.ipynb +197 -0
  32. noshot/data/ML TS XAI/ML/Whitefang Code/All in One Lab CIA 1 Q.ipynb +1087 -0
  33. {noshot-5.0.0.dist-info → noshot-7.0.0.dist-info}/METADATA +1 -1
  34. noshot-7.0.0.dist-info/RECORD +41 -0
  35. {noshot-5.0.0.dist-info → noshot-7.0.0.dist-info}/WHEEL +1 -1
  36. noshot/data/ML TS XAI/XAI/Q1.ipynb +0 -535
  37. noshot/data/ML TS XAI/XAI/Q2.ipynb +0 -38129
  38. noshot/data/ML TS XAI/XAI/Q3.ipynb +0 -1340
  39. noshot/data/ML TS XAI/XAI/Q4.ipynb +0 -246
  40. noshot/data/ML TS XAI/XAI/Q5.ipynb +0 -2450
  41. noshot-5.0.0.dist-info/RECORD +0 -14
  42. {noshot-5.0.0.dist-info → noshot-7.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  43. {noshot-5.0.0.dist-info → noshot-7.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,109 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/",
9
+ "height": 1000
10
+ },
11
+ "executionInfo": {
12
+ "elapsed": 14185,
13
+ "status": "ok",
14
+ "timestamp": 1743393909812,
15
+ "user": {
16
+ "displayName": "Jaison A",
17
+ "userId": "07006398627763032071"
18
+ },
19
+ "user_tz": -330
20
+ },
21
+ "id": "TLczBA7kx_ck",
22
+ "outputId": "062f709d-6dab-4287-b47a-75e93d3aa086"
23
+ },
24
+ "outputs": [],
25
+ "source": [
26
+ "import pandas as pd\n",
27
+ "import numpy as np\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "from sklearn.model_selection import train_test_split\n",
30
+ "from sklearn.preprocessing import StandardScaler\n",
31
+ "from sklearn.svm import SVC\n",
32
+ "from sklearn.decomposition import PCA\n",
33
+ "from sklearn.metrics import classification_report, accuracy_score,confusion_matrix, ConfusionMatrixDisplay\n",
34
+ "\n",
35
+ "df =pd.read_csv('/content/heart_disease_uci.csv')\n",
36
+ "print(df.isnull().sum())\n",
37
+ "\n",
38
+ "for col in df.columns:\n",
39
+ " if df[col].dtype == 'object':\n",
40
+ " df[col].fillna(df[col].mode()[0], inplace=True)\n",
41
+ " else:\n",
42
+ " df[col].fillna(df[col].mean(), inplace=True)\n",
43
+ "\n",
44
+ "df = pd.get_dummies(df, drop_first=True)\n",
45
+ "X = df.drop('num', axis=1)\n",
46
+ "y = df['num']\n",
47
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
48
+ "scaler = StandardScaler()\n",
49
+ "X_train = scaler.fit_transform(X_train)\n",
50
+ "X_test = scaler.transform(X_test)\n",
51
+ "\n",
52
+ "# Reduce to 2D using PCA\n",
53
+ "pca = PCA(n_components=2)\n",
54
+ "X_train_2d = pca.fit_transform(X_train)\n",
55
+ "X_test_2d = pca.transform(X_test)\n",
56
+ "\n",
57
+ "# Train SVM on 2D data\n",
58
+ "svm_model = SVC(kernel='linear')\n",
59
+ "svm_model.fit(X_train_2d, y_train)\n",
60
+ "y_pred = svm_model.predict(X_test_2d)\n",
61
+ "print(\"Classification Report:\")\n",
62
+ "print(classification_report(y_test, y_pred))\n",
63
+ "print(\"Accuracy Score:\", accuracy_score(y_test, y_pred))\n",
64
+ "ConfusionMatrixDisplay(confusion_matrix(y_test, y_pred)).plot()\n",
65
+ "plt.show()\n",
66
+ "\n",
67
+ "# Plot decision boundary\n",
68
+ "def plot_decision_boundary(X, y, model):\n",
69
+ " x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n",
70
+ " y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n",
71
+ " xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01))\n",
72
+ " Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n",
73
+ " Z = Z.reshape(xx.shape)\n",
74
+ " plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.coolwarm)\n",
75
+ " plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o', cmap=plt.cm.coolwarm)\n",
76
+ " plt.xlabel('PCA Component 1')\n",
77
+ " plt.ylabel('PCA Component 2')\n",
78
+ " plt.title('SVM Decision Boundary (2D)')\n",
79
+ " plt.show()\n",
80
+ "\n",
81
+ "plot_decision_boundary(X_train_2d, y_train, svm_model)"
82
+ ]
83
+ }
84
+ ],
85
+ "metadata": {
86
+ "colab": {
87
+ "provenance": []
88
+ },
89
+ "kernelspec": {
90
+ "display_name": "Python 3 (ipykernel)",
91
+ "language": "python",
92
+ "name": "python3"
93
+ },
94
+ "language_info": {
95
+ "codemirror_mode": {
96
+ "name": "ipython",
97
+ "version": 3
98
+ },
99
+ "file_extension": ".py",
100
+ "mimetype": "text/x-python",
101
+ "name": "python",
102
+ "nbconvert_exporter": "python",
103
+ "pygments_lexer": "ipython3",
104
+ "version": "3.12.4"
105
+ }
106
+ },
107
+ "nbformat": 4,
108
+ "nbformat_minor": 4
109
+ }
@@ -0,0 +1,195 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "4HHtR8-qzyql"
7
+ },
8
+ "source": [
9
+ "### **Iris Data**"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {
16
+ "colab": {
17
+ "base_uri": "https://localhost:8080/",
18
+ "height": 680
19
+ },
20
+ "executionInfo": {
21
+ "elapsed": 2075,
22
+ "status": "ok",
23
+ "timestamp": 1742277187382,
24
+ "user": {
25
+ "displayName": "Jaison A",
26
+ "userId": "07006398627763032071"
27
+ },
28
+ "user_tz": -330
29
+ },
30
+ "id": "RNGrFpf-z7_7",
31
+ "outputId": "5d8ff477-fbee-40cf-9746-4ef35b48397e"
32
+ },
33
+ "outputs": [],
34
+ "source": [
35
+ "import numpy as np\n",
36
+ "import matplotlib.pyplot as plt\n",
37
+ "from sklearn.datasets import load_iris\n",
38
+ "from sklearn.model_selection import train_test_split\n",
39
+ "from sklearn.preprocessing import StandardScaler\n",
40
+ "from sklearn.svm import SVC\n",
41
+ "from sklearn.decomposition import PCA\n",
42
+ "from sklearn.metrics import classification_report, accuracy_score,ConfusionMatrixDisplay\n",
43
+ "\n",
44
+ "iris = load_iris()\n",
45
+ "X = iris.data\n",
46
+ "y = iris.target\n",
47
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
48
+ "scaler = StandardScaler()\n",
49
+ "X_train = scaler.fit_transform(X_train)\n",
50
+ "X_test = scaler.transform(X_test)\n",
51
+ "\n",
52
+ "# Reduce to 2D using PCA\n",
53
+ "pca = PCA(n_components=2)\n",
54
+ "X_train_2d = pca.fit_transform(X_train)\n",
55
+ "X_test_2d = pca.transform(X_test)\n",
56
+ "\n",
57
+ "# Train SVM on 2D data\n",
58
+ "svm_model = SVC(kernel='rbf')\n",
59
+ "svm_model.fit(X_train_2d, y_train)\n",
60
+ "y_pred = svm_model.predict(X_test_2d)\n",
61
+ "print(\"Classification Report:\")\n",
62
+ "print(classification_report(y_test, y_pred))\n",
63
+ "print(\"Accuracy Score:\", accuracy_score(y_test, y_pred))\n",
64
+ "\n",
65
+ "# Plot decision boundary\n",
66
+ "def plot_decision_boundary(X, y, model):\n",
67
+ " x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n",
68
+ " y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n",
69
+ " xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01))\n",
70
+ " Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n",
71
+ " Z = Z.reshape(xx.shape)\n",
72
+ " plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.coolwarm)\n",
73
+ " plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o', cmap=plt.cm.coolwarm)\n",
74
+ " plt.xlabel('PCA Component 1')\n",
75
+ " plt.ylabel('PCA Component 2')\n",
76
+ " plt.title('SVM Decision Boundary (2D)')\n",
77
+ " plt.show()\n",
78
+ "\n",
79
+ "plot_decision_boundary(X_train_2d, y_train, svm_model)"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "markdown",
84
+ "metadata": {
85
+ "id": "x6eBRP8vztnF"
86
+ },
87
+ "source": [
88
+ "### **Heart Data**"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": null,
94
+ "metadata": {
95
+ "colab": {
96
+ "base_uri": "https://localhost:8080/",
97
+ "height": 1000
98
+ },
99
+ "executionInfo": {
100
+ "elapsed": 49651,
101
+ "status": "ok",
102
+ "timestamp": 1743412125091,
103
+ "user": {
104
+ "displayName": "Jaison A",
105
+ "userId": "07006398627763032071"
106
+ },
107
+ "user_tz": -330
108
+ },
109
+ "id": "7pI03vdRzYV0",
110
+ "outputId": "14feb814-e167-4351-a662-bb1d044531f8"
111
+ },
112
+ "outputs": [],
113
+ "source": [
114
+ "import pandas as pd\n",
115
+ "import numpy as np\n",
116
+ "import matplotlib.pyplot as plt\n",
117
+ "from sklearn.model_selection import train_test_split\n",
118
+ "from sklearn.preprocessing import StandardScaler\n",
119
+ "from sklearn.svm import SVC\n",
120
+ "from sklearn.decomposition import PCA\n",
121
+ "from sklearn.metrics import classification_report, accuracy_score,ConfusionMatrixDisplay,confusion_matrix\n",
122
+ "\n",
123
+ "df = pd.read_csv('/content/heart_disease_uci.csv')\n",
124
+ "for col in df.columns:\n",
125
+ " if df[col].dtype == 'object':\n",
126
+ " df[col].fillna(df[col].mode()[0], inplace=True)\n",
127
+ " else:\n",
128
+ " df[col].fillna(df[col].mean(), inplace=True)\n",
129
+ "df = pd.get_dummies(df, drop_first=True)\n",
130
+ "X = df.drop('num', axis=1)\n",
131
+ "y = df['num']\n",
132
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
133
+ "scaler = StandardScaler()\n",
134
+ "X_train = scaler.fit_transform(X_train)\n",
135
+ "X_test = scaler.transform(X_test)\n",
136
+ "\n",
137
+ "# Reduce to 2D using PCA\n",
138
+ "pca = PCA(n_components=2)\n",
139
+ "X_train_2d = pca.fit_transform(X_train)\n",
140
+ "X_test_2d = pca.transform(X_test)\n",
141
+ "\n",
142
+ "# Train SVM on 2D data\n",
143
+ "svm_model = SVC(kernel='rbf')\n",
144
+ "svm_model.fit(X_train_2d, y_train)\n",
145
+ "y_pred = svm_model.predict(X_test_2d)\n",
146
+ "print(\"Classification Report:\")\n",
147
+ "print(classification_report(y_test, y_pred))\n",
148
+ "print(\"Accuracy Score:\", accuracy_score(y_test, y_pred))\n",
149
+ "ConfusionMatrixDisplay(confusion_matrix(y_test, y_pred)).plot()\n",
150
+ "plt.show()\n",
151
+ "\n",
152
+ "# Plot decision boundary\n",
153
+ "def plot_decision_boundary(X, y, model):\n",
154
+ " x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n",
155
+ " y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n",
156
+ " xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01))\n",
157
+ " Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n",
158
+ " Z = Z.reshape(xx.shape)\n",
159
+ " plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.coolwarm)\n",
160
+ " plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o', cmap=plt.cm.coolwarm)\n",
161
+ " plt.xlabel('PCA Component 1')\n",
162
+ " plt.ylabel('PCA Component 2')\n",
163
+ " plt.title('SVM Decision Boundary (2D)')\n",
164
+ " plt.show()\n",
165
+ "\n",
166
+ "plot_decision_boundary(X_train_2d, y_train, svm_model)"
167
+ ]
168
+ }
169
+ ],
170
+ "metadata": {
171
+ "colab": {
172
+ "authorship_tag": "ABX9TyMpHE13jXi9Fv5ZNpyOjg3b",
173
+ "provenance": []
174
+ },
175
+ "kernelspec": {
176
+ "display_name": "Python 3 (ipykernel)",
177
+ "language": "python",
178
+ "name": "python3"
179
+ },
180
+ "language_info": {
181
+ "codemirror_mode": {
182
+ "name": "ipython",
183
+ "version": 3
184
+ },
185
+ "file_extension": ".py",
186
+ "mimetype": "text/x-python",
187
+ "name": "python",
188
+ "nbconvert_exporter": "python",
189
+ "pygments_lexer": "ipython3",
190
+ "version": "3.12.4"
191
+ }
192
+ },
193
+ "nbformat": 4,
194
+ "nbformat_minor": 4
195
+ }
@@ -0,0 +1,189 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/",
9
+ "height": 1000
10
+ },
11
+ "executionInfo": {
12
+ "elapsed": 45818,
13
+ "status": "ok",
14
+ "timestamp": 1743487279036,
15
+ "user": {
16
+ "displayName": "Jaison A",
17
+ "userId": "07006398627763032071"
18
+ },
19
+ "user_tz": -330
20
+ },
21
+ "id": "TNO6tqQ977Mr",
22
+ "outputId": "67fbc432-5c26-4e05-8928-06c959724944"
23
+ },
24
+ "outputs": [],
25
+ "source": [
26
+ "import pandas as pd\n",
27
+ "import numpy as np\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "from sklearn.neural_network import MLPClassifier\n",
30
+ "from sklearn.model_selection import train_test_split\n",
31
+ "from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n",
32
+ "from sklearn.preprocessing import StandardScaler, LabelEncoder\n",
33
+ "from sklearn.impute import SimpleImputer\n",
34
+ "\n",
35
+ "# Load and preprocess data\n",
36
+ "df = pd.read_csv('/content/heart_disease_uci.csv')\n",
37
+ "\n",
38
+ "# Data preprocessing\n",
39
+ "numeric_cols = ['age', 'trestbps', 'chol', 'thalch', 'oldpeak']\n",
40
+ "categorical_cols = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal']\n",
41
+ "\n",
42
+ "# Impute missing values\n",
43
+ "imputer_num = SimpleImputer(strategy='median')\n",
44
+ "df[numeric_cols] = imputer_num.fit_transform(df[numeric_cols])\n",
45
+ "\n",
46
+ "imputer_cat = SimpleImputer(strategy='most_frequent')\n",
47
+ "df[categorical_cols] = imputer_cat.fit_transform(df[categorical_cols])\n",
48
+ "\n",
49
+ "# Encode categorical variables\n",
50
+ "for col in categorical_cols:\n",
51
+ " le = LabelEncoder()\n",
52
+ " df[col] = le.fit_transform(df[col].astype(str))\n",
53
+ "\n",
54
+ "# Target variable\n",
55
+ "df['target'] = (df['num'] > 0).astype(int)\n",
56
+ "\n",
57
+ "# Select features and target\n",
58
+ "features = numeric_cols + categorical_cols\n",
59
+ "X = df[features]\n",
60
+ "y = df['target']\n",
61
+ "\n",
62
+ "# Split data\n",
63
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n",
64
+ "\n",
65
+ "# Standardize features\n",
66
+ "scaler = StandardScaler()\n",
67
+ "X_train = scaler.fit_transform(X_train)\n",
68
+ "X_test = scaler.transform(X_test)\n",
69
+ "\n",
70
+ "## Experiment 10: Regularization Techniques\n",
71
+ "\n",
72
+ "# Best architecture from Experiment 9\n",
73
+ "best_architecture = (20, 10)\n",
74
+ "\n",
75
+ "def evaluate_regularized_model(regularization_params):\n",
76
+ " # Remove 'name' key if present\n",
77
+ " params = {k:v for k,v in regularization_params.items() if k != \"name\"}\n",
78
+ "\n",
79
+ " # For dropout simulation, we'll adjust alpha instead\n",
80
+ " if \"dropout\" in params:\n",
81
+ " # In scikit-learn, we can simulate dropout effect by increasing alpha\n",
82
+ " params[\"alpha\"] = params.pop(\"dropout\") * 10 # Scaling factor\n",
83
+ " params[\"solver\"] = \"adam\" # Adam works better with this approach\n",
84
+ "\n",
85
+ " model = MLPClassifier(hidden_layer_sizes=best_architecture,\n",
86
+ " activation='relu',\n",
87
+ " max_iter=500,\n",
88
+ " random_state=42,\n",
89
+ " **params)\n",
90
+ " model.fit(X_train, y_train)\n",
91
+ " y_pred = model.predict(X_test)\n",
92
+ " accuracy = accuracy_score(y_test, y_pred)\n",
93
+ " return accuracy, model.loss_curve_\n",
94
+ "\n",
95
+ "# Test different regularization techniques\n",
96
+ "regularization_tests = [\n",
97
+ " {\"name\": \"No Regularization\", \"alpha\": 0},\n",
98
+ " {\"alpha\": 0.0001, \"name\": \"L2 (alpha=0.0001)\"},\n",
99
+ " {\"alpha\": 0.001, \"name\": \"L2 (alpha=0.001)\"},\n",
100
+ " {\"alpha\": 0.01, \"name\": \"L2 (alpha=0.01)\"},\n",
101
+ " {\"early_stopping\": True, \"validation_fraction\": 0.2, \"name\": \"Early Stopping\"},\n",
102
+ " {\"alpha\": 0.001, \"early_stopping\": True, \"name\": \"L2 + Early Stopping\"},\n",
103
+ " {\"dropout\": 0.2, \"name\": \"Simulated Dropout (alpha=2)\"},\n",
104
+ " {\"alpha\": 0.001, \"early_stopping\": True, \"solver\": \"adam\", \"name\": \"L2 + Early Stop + Adam\"}\n",
105
+ "]\n",
106
+ "\n",
107
+ "results = []\n",
108
+ "loss_curves = []\n",
109
+ "\n",
110
+ "for reg_test in regularization_tests:\n",
111
+ " accuracy, loss_curve = evaluate_regularized_model(reg_test)\n",
112
+ " results.append({\n",
113
+ " \"Regularization\": reg_test[\"name\"],\n",
114
+ " \"Accuracy\": accuracy\n",
115
+ " })\n",
116
+ " loss_curves.append((reg_test[\"name\"], loss_curve))\n",
117
+ "\n",
118
+ " print(f\"\\nRegularization: {reg_test['name']}\")\n",
119
+ " print(\"Classification Report:\")\n",
120
+ " model_params = {k:v for k,v in reg_test.items() if k not in [\"name\", \"dropout\"]}\n",
121
+ " if \"dropout\" in reg_test:\n",
122
+ " model_params[\"alpha\"] = reg_test[\"dropout\"] * 10\n",
123
+ " model = MLPClassifier(hidden_layer_sizes=st_architecture,\n",
124
+ " activation='relu',\n",
125
+ " max_iter=500,\n",
126
+ " random_state=42,\n",
127
+ " **model_params)\n",
128
+ " model.fit(X_train, y_train)\n",
129
+ " y_pred = model.predict(X_test)\n",
130
+ " print(classification_report(y_test, y_pred))\n",
131
+ " print(\"Confusion Matrix:\")\n",
132
+ " print(confusion_matrix(y_test, y_pred))\n",
133
+ "\n",
134
+ "# Display results\n",
135
+ "results_df = pd.DataFrame(results)\n",
136
+ "print(\"\\nSummary of Regularization Results:\")\n",
137
+ "print(results_df.sort_values(by=\"Accuracy\", ascending=False))\n",
138
+ "\n",
139
+ "# Visualization\n",
140
+ "plt.figure(figsize=(14, 6))\n",
141
+ "\n",
142
+ "# Accuracy comparison\n",
143
+ "plt.subplot(1, 2, 1)\n",
144
+ "plt.barh(results_df['Regularization'], results_df['Accuracy'], color='lightgreen')\n",
145
+ "plt.title('Test Accuracy by Regularization Technique')\n",
146
+ "plt.xlabel('Accuracy')\n",
147
+ "plt.xlim(0.7, 0.9)\n",
148
+ "\n",
149
+ "# Training loss curves\n",
150
+ "plt.subplot(1, 2, 2)\n",
151
+ "for name, curve in loss_curves:\n",
152
+ " plt.plot(curve, label=name)\n",
153
+ "plt.title('Training Loss with Regularization')\n",
154
+ "plt.xlabel('Iterations')\n",
155
+ "plt.ylabel('Loss')\n",
156
+ "plt.legend()\n",
157
+ "plt.grid()\n",
158
+ "\n",
159
+ "plt.tight_layout()\n",
160
+ "plt.show()"
161
+ ]
162
+ }
163
+ ],
164
+ "metadata": {
165
+ "colab": {
166
+ "authorship_tag": "ABX9TyNefA49SOjY7QnPMPjmtmQE",
167
+ "provenance": []
168
+ },
169
+ "kernelspec": {
170
+ "display_name": "Python 3 (ipykernel)",
171
+ "language": "python",
172
+ "name": "python3"
173
+ },
174
+ "language_info": {
175
+ "codemirror_mode": {
176
+ "name": "ipython",
177
+ "version": 3
178
+ },
179
+ "file_extension": ".py",
180
+ "mimetype": "text/x-python",
181
+ "name": "python",
182
+ "nbconvert_exporter": "python",
183
+ "pygments_lexer": "ipython3",
184
+ "version": "3.12.4"
185
+ }
186
+ },
187
+ "nbformat": 4,
188
+ "nbformat_minor": 4
189
+ }