noshot 2.0.0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. noshot/data/ML TS XAI/ML/Main/1. EDA-PCA (Balance Scale Dataset).ipynb +139 -0
  2. noshot/data/ML TS XAI/ML/Main/1. EDA-PCA (Rice Dataset).ipynb +181 -0
  3. noshot/data/ML TS XAI/ML/Main/10. HMM Veterbi.ipynb +228 -0
  4. noshot/data/ML TS XAI/ML/Main/2. KNN (Balance Scale Dataset).ipynb +117 -0
  5. noshot/data/ML TS XAI/ML/Main/2. KNN (Iris Dataset).ipynb +165 -0
  6. noshot/data/ML TS XAI/ML/Main/2. KNN (Sobar-72 Dataset).ipynb +251 -0
  7. noshot/data/ML TS XAI/ML/Main/3. LDA (Balance Scale Dataset).ipynb +78 -0
  8. noshot/data/ML TS XAI/ML/Main/3. LDA (NPHA Doctor Visits Dataset).ipynb +114 -0
  9. noshot/data/ML TS XAI/ML/Main/4. Linear Regression (Machine Dataset).ipynb +115 -0
  10. noshot/data/ML TS XAI/ML/Main/4. Linear Regression (Real Estate Dataset).ipynb +159 -0
  11. noshot/data/ML TS XAI/ML/Main/5. Logistic Regression (Magic04 Dataset).ipynb +200 -0
  12. noshot/data/ML TS XAI/ML/Main/5. Logistic Regression (Wine Dataset).ipynb +112 -0
  13. noshot/data/ML TS XAI/ML/Main/6. Naive Bayes Classifier (Agaricus Lepiota Dataset).ipynb +153 -0
  14. noshot/data/ML TS XAI/ML/Main/6. Naive Bayes Classifier (Wine Dataset).ipynb +89 -0
  15. noshot/data/ML TS XAI/ML/Main/7. SVM (Rice Dataset).ipynb +208 -0
  16. noshot/data/ML TS XAI/ML/Main/8. FeedForward NN (Sobar72 Dataset).ipynb +260 -0
  17. noshot/data/ML TS XAI/ML/Main/9. CNN (Cifar10 Dataset).ipynb +238 -0
  18. noshot/data/ML TS XAI/ML/Main/data/agaricus-lepiota.data +8124 -0
  19. noshot/data/ML TS XAI/ML/Main/data/balance-scale.txt +625 -0
  20. noshot/data/ML TS XAI/ML/Main/data/doctor-visits.csv +715 -0
  21. noshot/data/ML TS XAI/ML/Main/data/iris.csv +151 -0
  22. noshot/data/ML TS XAI/ML/Main/data/machine-data.csv +210 -0
  23. noshot/data/ML TS XAI/ML/Main/data/magic04.data +19020 -0
  24. noshot/data/ML TS XAI/ML/Main/data/real-estate.xlsx +0 -0
  25. noshot/data/ML TS XAI/ML/Main/data/rice.arff +3826 -0
  26. noshot/data/ML TS XAI/ML/Main/data/sobar-72.csv +73 -0
  27. noshot/data/ML TS XAI/ML/Main/data/wine-dataset.csv +179 -0
  28. noshot/data/ML TS XAI/ML/Other Codes.ipynb +158 -0
  29. noshot/data/ML TS XAI/ML/Rolls Royce AllinOne.ipynb +691 -0
  30. {noshot-2.0.0.dist-info → noshot-3.0.0.dist-info}/METADATA +1 -1
  31. noshot-3.0.0.dist-info/RECORD +38 -0
  32. {noshot-2.0.0.dist-info → noshot-3.0.0.dist-info}/WHEEL +1 -1
  33. noshot/data/ML TS XAI/TS/bill-charge.ipynb +0 -239
  34. noshot/data/ML TS XAI/TS/daily-min-temperatures.ipynb +0 -239
  35. noshot/data/ML TS XAI/TS/data/bill-data.csv +0 -21
  36. noshot/data/ML TS XAI/TS/data/daily-min-temperatures.csv +0 -3651
  37. noshot/data/ML TS XAI/TS/data/monthly-sunspots.csv +0 -2821
  38. noshot/data/ML TS XAI/TS/monthly-sunspots.ipynb +0 -241
  39. noshot-2.0.0.dist-info/RECORD +0 -15
  40. {noshot-2.0.0.dist-info → noshot-3.0.0.dist-info}/licenses/LICENSE.txt +0 -0
  41. {noshot-2.0.0.dist-info → noshot-3.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,139 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "1919dce4",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd\n",
11
+ "import numpy as np\n",
12
+ "import seaborn as sns\n",
13
+ "import matplotlib.pyplot as plt\n",
14
+ "from sklearn.preprocessing import StandardScaler\n",
15
+ "from sklearn.decomposition import PCA\n",
16
+ "\n",
17
+ "import warnings\n",
18
+ "warnings.filterwarnings('ignore')"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "id": "459c19c9",
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "cols = ['class name', 'left-weight', 'left-distance', 'right-weight', 'right-distance']\n",
29
+ "df = pd.read_table('data/balance-scale.txt', delimiter=',', names=cols)\n",
30
+ "print(\"Shape:\", df.shape)\n",
31
+ "df.head()"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": null,
37
+ "id": "ceb17e01",
38
+ "metadata": {},
39
+ "outputs": [],
40
+ "source": [
41
+ "df.describe()"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "id": "c3950e04",
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "df.info()"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "id": "2fd3e589-3f8f-4203-aa0b-7d3261a3c5b1",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "sns.countplot(df, x='class name', hue='class name')\n",
62
+ "plt.title(\"Count Plot ['B', 'R', 'L']\")\n",
63
+ "plt.show()"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": null,
69
+ "id": "b9d4bb7e",
70
+ "metadata": {},
71
+ "outputs": [],
72
+ "source": [
73
+ "features = ['left-weight', 'left-distance', 'right-weight', 'right-distance']\n",
74
+ "x = df.loc[:, features]\n",
75
+ "y = df.loc[:, 'class name']"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "id": "de2b55cc",
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "x = StandardScaler().fit_transform(x)\n",
86
+ "pca = PCA(n_components=2)\n",
87
+ "pct = pca.fit_transform(x)"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "id": "08a8f4c9-05d6-4422-a6f3-5c6f7605220b",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "principal_df = pd.DataFrame(pct, columns=['pc1', 'pc2'])\n",
98
+ "principal_df['class name'] = df['class name']\n",
99
+ "principal_df.head()\n",
100
+ "\n",
101
+ "fig = plt.figure(figsize=(8,8))\n",
102
+ "ax = fig.add_subplot(1, 1, 1)\n",
103
+ "\n",
104
+ "targets = ['L','B','R']\n",
105
+ "colors = ['r', 'g','b']\n",
106
+ "for target, color in zip(targets, colors):\n",
107
+ " selected = principal_df[principal_df['class name'] == target]\n",
108
+ " ax.scatter(selected['pc1'], selected['pc2'], c=color, s=50)\n",
109
+ "\n",
110
+ "ax.set_xlabel('Principal Component 1')\n",
111
+ "ax.set_ylabel('Principal Component 2')\n",
112
+ "ax.set_title('2 component PCA')\n",
113
+ "ax.legend(targets)\n",
114
+ "ax.grid()"
115
+ ]
116
+ }
117
+ ],
118
+ "metadata": {
119
+ "kernelspec": {
120
+ "display_name": "Python 3 (ipykernel)",
121
+ "language": "python",
122
+ "name": "python3"
123
+ },
124
+ "language_info": {
125
+ "codemirror_mode": {
126
+ "name": "ipython",
127
+ "version": 3
128
+ },
129
+ "file_extension": ".py",
130
+ "mimetype": "text/x-python",
131
+ "name": "python",
132
+ "nbconvert_exporter": "python",
133
+ "pygments_lexer": "ipython3",
134
+ "version": "3.12.4"
135
+ }
136
+ },
137
+ "nbformat": 4,
138
+ "nbformat_minor": 5
139
+ }
@@ -0,0 +1,181 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "d27dd480-bff5-4179-8b5d-145b15fe2527",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import numpy as np\n",
11
+ "import pandas as pd\n",
12
+ "import seaborn as sns\n",
13
+ "import scipy\n",
14
+ "import matplotlib.pyplot as plt"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "id": "c34c5236-de26-47a7-b047-c084519f6ef7",
21
+ "metadata": {},
22
+ "outputs": [],
23
+ "source": [
24
+ "data, _ = scipy.io.arff.loadarff('data/rice.arff')\n",
25
+ "df = pd.DataFrame(data)\n",
26
+ "print(\"Shape:\", df.shape)\n",
27
+ "df.head()"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "id": "9ef13fe2-6f7f-4e9e-97bf-f44c0b10694e",
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "df.describe()"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": null,
43
+ "id": "0a51ae34-5fa4-4f78-a08c-de89bcc1f6b2",
44
+ "metadata": {},
45
+ "outputs": [],
46
+ "source": [
47
+ "df.info()"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": null,
53
+ "id": "3a30c508-273f-46cd-8ecd-8361b255488b",
54
+ "metadata": {},
55
+ "outputs": [],
56
+ "source": [
57
+ "df.isnull().sum()"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "id": "f0651a71-0bc1-489e-ad5f-7b080a9cb978",
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "sns.countplot(df, x='Class', hue='Class')\n",
68
+ "plt.title(\"Class Distribution\")\n",
69
+ "plt.show()"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": null,
75
+ "id": "e2d1c93f-e097-4b08-b9ff-48c7d5fd9659",
76
+ "metadata": {},
77
+ "outputs": [],
78
+ "source": [
79
+ "print(\"Mean of Features\")\n",
80
+ "df.iloc[:, :-1].mean()"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": null,
86
+ "id": "0977bb62-192e-43f4-8cd9-e054ea3526b9",
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "cov = df.iloc[:, :-1].cov().round(3)\n",
91
+ "print(\"Covariance Matrix\")\n",
92
+ "cov"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "id": "e2862d9a-76b9-4367-a135-0fc36ae60478",
99
+ "metadata": {},
100
+ "outputs": [],
101
+ "source": [
102
+ "eigen_vals, eigen_vecs = np.linalg.eig(cov)\n",
103
+ "mapping = {round(eigen_vals[i], 2): eigen_vecs[:, i].round(2) \n",
104
+ " for i in range(len(eigen_vals))}\n",
105
+ "print(\"Eigen Value-Vector Pairs:\")\n",
106
+ "mapping"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": null,
112
+ "id": "06a3c671-6cae-44c7-9960-56702042c0e5",
113
+ "metadata": {},
114
+ "outputs": [],
115
+ "source": [
116
+ "n = 2\n",
117
+ "sorted_eigen_vals = sorted(mapping.keys(), reverse=True)\n",
118
+ "top_eigen_vals = sorted_eigen_vals[:n]\n",
119
+ "top_eigen_vals"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": null,
125
+ "id": "6473c348-871e-428d-a3d6-7942fc0df706",
126
+ "metadata": {},
127
+ "outputs": [],
128
+ "source": [
129
+ "projection_matrix = np.array([mapping[val] for val in top_eigen_vals]).T\n",
130
+ "projection_matrix"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": null,
136
+ "id": "6014c705-cb9a-470f-905f-ec05eb5f89f9",
137
+ "metadata": {},
138
+ "outputs": [],
139
+ "source": [
140
+ "X = df.iloc[:, :-1].values\n",
141
+ "reduced_data = X.dot(projection_matrix)\n",
142
+ "reduced_df = pd.DataFrame(reduced_data, columns=[f'PC{i+1}' for i in range(n)])\n",
143
+ "reduced_df['Class'] = df['Class'].values\n",
144
+ "reduced_df.head()"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "f53faa36-3044-4a48-9512-f7bc947bd9ba",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "sns.scatterplot(data=reduced_df, x='PC1', y='PC2', hue='Class')\n",
155
+ "plt.title('Principle Component Analysis')\n",
156
+ "plt.show()"
157
+ ]
158
+ }
159
+ ],
160
+ "metadata": {
161
+ "kernelspec": {
162
+ "display_name": "Python 3 (ipykernel)",
163
+ "language": "python",
164
+ "name": "python3"
165
+ },
166
+ "language_info": {
167
+ "codemirror_mode": {
168
+ "name": "ipython",
169
+ "version": 3
170
+ },
171
+ "file_extension": ".py",
172
+ "mimetype": "text/x-python",
173
+ "name": "python",
174
+ "nbconvert_exporter": "python",
175
+ "pygments_lexer": "ipython3",
176
+ "version": "3.12.4"
177
+ }
178
+ },
179
+ "nbformat": 4,
180
+ "nbformat_minor": 5
181
+ }
@@ -0,0 +1,228 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "GrBOlID9_Jiz",
11
+ "outputId": "f547fe18-4b89-4b41-928d-4fcc75fe5916"
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "transition = {\"AtoA\":0.7,\"AtoB\":0.3,\"BtoA\":0.5,\"BtoB\":0.5}\n",
16
+ "emission = {\"A\":{\"S1\":0.6,\"S2\":0.1,\"S3\":0.3},\"B\":{\"S1\":0.1,\"S2\":0.7,\"S3\":0.2}}\n",
17
+ "pi=(1,0)\n",
18
+ "\n",
19
+ "a,b=pi\n",
20
+ "alpha1=[a]\n",
21
+ "alpha2=[b]\n",
22
+ "for i in [\"S3\",\"S2\",\"S1\"]:\n",
23
+ " temp1=[]\n",
24
+ " temp2=[]\n",
25
+ " for j in [\"AtoA\",\"AtoB\",\"BtoA\",\"BtoB\"]:\n",
26
+ " if j.endswith(\"A\"):\n",
27
+ " if j.startswith(\"A\"): temp1.append(a*transition[j]*emission[\"A\"][i])\n",
28
+ " else: temp1.append(b*transition[j]*emission[\"B\"][i])\n",
29
+ " else:\n",
30
+ " if j.startswith(\"A\"): temp2.append(a*transition[j]*emission[\"A\"][i])\n",
31
+ " else: temp2.append(b*transition[j]*emission[\"B\"][i])\n",
32
+ " a=sum(temp1)\n",
33
+ " b=sum(temp2)\n",
34
+ " alpha1.append(a)\n",
35
+ " alpha2.append(b)\n",
36
+ " temp1.clear()\n",
37
+ " temp2.clear()\n",
38
+ " if i==\"S1\": print(\"Using Forward Chaining:\",round(a+b,4))\n",
39
+ "print(alpha1,\"\\n\",alpha2)"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "metadata": {
46
+ "colab": {
47
+ "base_uri": "https://localhost:8080/"
48
+ },
49
+ "id": "Ywqsib8v_zM7",
50
+ "outputId": "0b4359a0-276d-4d41-beb7-a0e5c9d8fe41"
51
+ },
52
+ "outputs": [],
53
+ "source": [
54
+ "transition={\"AtoA\":0.7,\"AtoB\":0.3,\"BtoA\":0.5,\"BtoB\":0.5}\n",
55
+ "emission={\"A\":{\"S1\":0.6,\"S2\":0.1,\"S3\":0.3},\"B\":{\"S1\":0.1,\"S2\":0.7,\"S3\":0.2}}\n",
56
+ "pi=(1,1)\n",
57
+ "\n",
58
+ "beta1=[]\n",
59
+ "beta2=[]\n",
60
+ "a,b=pi\n",
61
+ "for i in reversed([\"S3\",\"S2\",\"S1\"]):\n",
62
+ " temp1=[]\n",
63
+ " temp2=[]\n",
64
+ " for j in [\"AtoA\",\"AtoB\",\"BtoA\",\"BtoB\"]:\n",
65
+ " if j.startswith(\"A\"):\n",
66
+ " if j.endswith(\"A\"): temp1.append(a*transition[j]*emission[\"A\"][i])\n",
67
+ " else: temp1.append(b*transition[j]*emission[\"A\"][i])\n",
68
+ " else:\n",
69
+ " if j.endswith(\"A\"): temp2.append(a*transition[j]*emission[\"B\"][i])\n",
70
+ " else: temp2.append(b*transition[j]*emission[\"B\"][i])\n",
71
+ " a=sum(temp1)\n",
72
+ " b=sum(temp2)\n",
73
+ " beta1.append(a)\n",
74
+ " beta2.append(b)\n",
75
+ " temp1.clear()\n",
76
+ " temp2.clear()\n",
77
+ " if i==\"S3\": print(\"Using Backward Chaining:\",round(a,4))\n",
78
+ "print(beta1,\"\\n\",beta2)"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": null,
84
+ "metadata": {
85
+ "colab": {
86
+ "base_uri": "https://localhost:8080/"
87
+ },
88
+ "id": "waKP0k73_1yn",
89
+ "outputId": "1427045a-2118-4a11-938c-f5e54e63fe80"
90
+ },
91
+ "outputs": [],
92
+ "source": [
93
+ "transition={\"AtoA\":0.7,\"AtoB\":0.3,\"BtoA\":0.5,\"BtoB\":0.5}\n",
94
+ "emission={\"A\":{\"S1\":0.6,\"S2\":0.1,\"S3\":0.3},\"B\":{\"S1\":0.1,\"S2\":0.7,\"S3\":0.2}}\n",
95
+ "pi=(1,0)\n",
96
+ "\n",
97
+ "a,b=pi\n",
98
+ "delta1=[a]\n",
99
+ "delta2=[b]\n",
100
+ "chi=[]\n",
101
+ "for i in [\"S3\",\"S2\",\"S1\"]:\n",
102
+ " temp1=[]\n",
103
+ " temp2=[]\n",
104
+ " temp3={}\n",
105
+ " for j in [\"AtoA\",\"AtoB\",\"BtoA\",\"BtoB\"]:\n",
106
+ " if j.endswith(\"A\"):\n",
107
+ " if j.startswith(\"A\"): temp1.append(a*transition[j]*emission[\"A\"][i])\n",
108
+ " else: temp1.append(b*transition[j]*emission[\"B\"][i])\n",
109
+ " else:\n",
110
+ " if j.startswith(\"A\"): temp2.append(a*transition[j]*emission[\"A\"][i])\n",
111
+ " else: temp2.append(b*transition[j]*emission[\"B\"][i])\n",
112
+ " a=max(temp1)\n",
113
+ " b=max(temp2)\n",
114
+ " temp3[a]=\"A\"\n",
115
+ " temp3[b]=\"B\"\n",
116
+ " delta1.append(a)\n",
117
+ " delta2.append(b)\n",
118
+ " chi.append(temp3[max(temp3.keys())])\n",
119
+ " temp1.clear()\n",
120
+ " temp2.clear()\n",
121
+ " if i==\"S1\":\n",
122
+ " print(\"Using Veterbi Algorithm:\",round(max(a,b),4))\n",
123
+ " print(\"Best Sequence:\")\n",
124
+ " for i in chi: print(i,end=\"->\")\n",
125
+ " print(temp3[max(temp3.keys())])\n",
126
+ "print(delta1)\n",
127
+ "print(delta2)"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": null,
133
+ "metadata": {
134
+ "colab": {
135
+ "base_uri": "https://localhost:8080/",
136
+ "height": 36
137
+ },
138
+ "id": "pU0-CxGx_4Gz",
139
+ "outputId": "97bbaadc-83d8-4b86-89e7-f67e347adfb0"
140
+ },
141
+ "outputs": [],
142
+ "source": [
143
+ "from graphviz import Digraph\n",
144
+ "\n",
145
+ "states = ['A', 'B']\n",
146
+ "observations = ['S1', 'S2', 'S3']\n",
147
+ "\n",
148
+ "dot = Digraph(comment='HMM')\n",
149
+ "\n",
150
+ "for state in states:\n",
151
+ " dot.node(state, state)\n",
152
+ "\n",
153
+ "for from_state in states:\n",
154
+ " for to_state in states:\n",
155
+ " transition_prob = transition[from_state + 'to' + to_state]\n",
156
+ " dot.edge(from_state, to_state, label=str(transition_prob))\n",
157
+ "\n",
158
+ "for state in states:\n",
159
+ " for observation in observations:\n",
160
+ " emission_prob = emission[state][observation]\n",
161
+ " dot.edge(state, observation, label=str(emission_prob), style='dashed')\n",
162
+ "\n",
163
+ "dot.render('hmm', view=True)"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {
170
+ "colab": {
171
+ "base_uri": "https://localhost:8080/",
172
+ "height": 513
173
+ },
174
+ "id": "qBU_NokB_679",
175
+ "outputId": "d3b086cb-92d6-4941-de26-137888b1a2d1"
176
+ },
177
+ "outputs": [],
178
+ "source": [
179
+ "import networkx as nx\n",
180
+ "import matplotlib.pyplot as plt\n",
181
+ "graph = nx.DiGraph()\n",
182
+ "\n",
183
+ "graph.add_nodes_from(states)\n",
184
+ "for from_state in states:\n",
185
+ " for to_state in states:\n",
186
+ " transition_prob = transition[from_state + 'to' + to_state]\n",
187
+ " graph.add_edge(from_state, to_state, weight=transition_prob, label=str(transition_prob))\n",
188
+ "\n",
189
+ "for state in states:\n",
190
+ " for observation in observations:\n",
191
+ " emission_prob = emission[state][observation]\n",
192
+ " graph.add_edge(state, observation, weight=emission_prob, label=str(emission_prob), style='dashed')\n",
193
+ "pos = nx.spring_layout(graph)\n",
194
+ "edge_labels = nx.get_edge_attributes(graph, 'label')\n",
195
+ "\n",
196
+ "nx.draw(graph, pos, with_labels=True, node_size=1500, node_color=\"skyblue\", font_size=12, font_weight='bold')\n",
197
+ "nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, font_size=10)\n",
198
+ "\n",
199
+ "plt.title(\"Hidden Markov Model\")\n",
200
+ "plt.show()"
201
+ ]
202
+ }
203
+ ],
204
+ "metadata": {
205
+ "colab": {
206
+ "provenance": []
207
+ },
208
+ "kernelspec": {
209
+ "display_name": "Python 3 (ipykernel)",
210
+ "language": "python",
211
+ "name": "python3"
212
+ },
213
+ "language_info": {
214
+ "codemirror_mode": {
215
+ "name": "ipython",
216
+ "version": 3
217
+ },
218
+ "file_extension": ".py",
219
+ "mimetype": "text/x-python",
220
+ "name": "python",
221
+ "nbconvert_exporter": "python",
222
+ "pygments_lexer": "ipython3",
223
+ "version": "3.12.4"
224
+ }
225
+ },
226
+ "nbformat": 4,
227
+ "nbformat_minor": 4
228
+ }
@@ -0,0 +1,117 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "b4a8b5dc",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import numpy as np\n",
11
+ "import pandas as pd\n",
12
+ "import matplotlib.pyplot as plt\n",
13
+ "import sklearn\n",
14
+ "from sklearn.neighbors import KNeighborsClassifier\n",
15
+ "from sklearn.model_selection import train_test_split\n",
16
+ "from sklearn import metrics\n",
17
+ "from sklearn.preprocessing import StandardScaler"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": null,
23
+ "id": "1c308767",
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "cols = ['class name','left-weight','left-distance','right-weight','right-distance']\n",
28
+ "df = pd.read_csv('data/balance-scale.txt', delimiter=',', names=cols)\n",
29
+ "print(\"Shape:\", df.shape)\n",
30
+ "df.head()"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "id": "c5dc6788-f131-4b3e-8b39-0f83c117e2cd",
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "features = ['left-weight', 'left-distance', 'right-weight', 'right-distance']\n",
41
+ "x = df.loc[:, features]\n",
42
+ "y = df.loc[:, 'class name']"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "id": "59450a6f-db40-4b8c-b294-d427f0792c26",
49
+ "metadata": {},
50
+ "outputs": [],
51
+ "source": [
52
+ "x = StandardScaler().fit_transform(x)\n",
53
+ "X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4, \n",
54
+ " random_state=4)\n",
55
+ "print (X_train.shape)\n",
56
+ "print (X_test.shape)\n",
57
+ "knn = KNeighborsClassifier(n_neighbors=15)\n",
58
+ "knn.fit(X_train, y_train) "
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "id": "22e96c2a",
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "y_pred = knn.predict(np.array([1,1,1,1]).reshape(1, -1))[0]\n",
69
+ "print(\"Class Predicted [1,1,1,1]:\", y_pred)"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": null,
75
+ "id": "366c003d",
76
+ "metadata": {},
77
+ "outputs": [],
78
+ "source": [
79
+ "y_pred = knn.predict(X_test)\n",
80
+ "print(\"Accuracy:\", metrics.accuracy_score(y_test, y_pred))"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": null,
86
+ "id": "517c6e56-afc6-481c-a48c-b1a0435537bd",
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "cm = metrics.confusion_matrix(y_test, knn.predict(X_test))\n",
91
+ "metrics.ConfusionMatrixDisplay(cm, display_labels=['B','L','R']).plot()\n",
92
+ "plt.show()"
93
+ ]
94
+ }
95
+ ],
96
+ "metadata": {
97
+ "kernelspec": {
98
+ "display_name": "Python 3 (ipykernel)",
99
+ "language": "python",
100
+ "name": "python3"
101
+ },
102
+ "language_info": {
103
+ "codemirror_mode": {
104
+ "name": "ipython",
105
+ "version": 3
106
+ },
107
+ "file_extension": ".py",
108
+ "mimetype": "text/x-python",
109
+ "name": "python",
110
+ "nbconvert_exporter": "python",
111
+ "pygments_lexer": "ipython3",
112
+ "version": "3.12.4"
113
+ }
114
+ },
115
+ "nbformat": 4,
116
+ "nbformat_minor": 5
117
+ }