ml-toolkit-tech 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ml_toolkit_tech-0.1.0/PKG-INFO +9 -0
- ml_toolkit_tech-0.1.0/README.md +29 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech/__init__.py +7 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech/_injector.py +27 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech/practicals.json +39 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech.egg-info/PKG-INFO +9 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech.egg-info/SOURCES.txt +10 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech.egg-info/dependency_links.txt +1 -0
- ml_toolkit_tech-0.1.0/ml_toolkit_tech.egg-info/top_level.txt +1 -0
- ml_toolkit_tech-0.1.0/pyproject.toml +3 -0
- ml_toolkit_tech-0.1.0/setup.cfg +4 -0
- ml_toolkit_tech-0.1.0/setup.py +12 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# ml_toolkit_tech
|
|
2
|
+
|
|
3
|
+
Inject ML practical code directly into the next Google Colab cell.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install ml_toolkit_tech
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage in Google Colab
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from ml_toolkit_tech import tech1 # injects Practical 1 code into next cell
|
|
15
|
+
from ml_toolkit_tech import tech2 # injects Practical 2 code into next cell
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Each `techN` call writes the corresponding practical's code into the next cell automatically.
|
|
19
|
+
|
|
20
|
+
## Practicals
|
|
21
|
+
|
|
22
|
+
| Import | Title |
|
|
23
|
+
|--------|-------|
|
|
24
|
+
| `tech1` | TensorFlow Matrix Operations |
|
|
25
|
+
| `tech2` | XOR Neural Network with Keras |
|
|
26
|
+
| `tech3` | Binary Classification with Keras and Sklearn |
|
|
27
|
+
| `tech4` | Autoencoder on MNIST Dataset |
|
|
28
|
+
| `tech5` | CNN on MNIST Dataset |
|
|
29
|
+
| `tech6` | Convolutional Denoising Autoencoder on MNIST |
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
def _load_practicals():
|
|
5
|
+
path = os.path.join(os.path.dirname(__file__), "practicals.json")
|
|
6
|
+
with open(path, "r") as f:
|
|
7
|
+
return {p["id"]: p for p in json.load(f)["practicals"]}
|
|
8
|
+
|
|
9
|
+
def inject(practical_id: int):
|
|
10
|
+
"""Injects the practical code into the next Colab cell."""
|
|
11
|
+
practicals = _load_practicals()
|
|
12
|
+
if practical_id not in practicals:
|
|
13
|
+
raise ValueError(f"Practical {practical_id} not found.")
|
|
14
|
+
|
|
15
|
+
p = practicals[practical_id]
|
|
16
|
+
code = p["code"]
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from google.colab import _message # noqa
|
|
20
|
+
import IPython
|
|
21
|
+
shell = IPython.get_ipython()
|
|
22
|
+
shell.set_next_input(code, replace=False)
|
|
23
|
+
print(f"✓ Practical {practical_id}: '{p['title']}' injected into next cell.")
|
|
24
|
+
except ImportError:
|
|
25
|
+
# Fallback: just print the code if not in Colab
|
|
26
|
+
print(f"# Practical {practical_id}: {p['title']}\n")
|
|
27
|
+
print(code)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
{
|
|
2
|
+
"practicals": [
|
|
3
|
+
{
|
|
4
|
+
"id": 1,
|
|
5
|
+
"title": "TensorFlow Matrix Operations",
|
|
6
|
+
"code": "# Practical no 1: Performing matrix multiplication and finding eigenvalues and eigenvectors using TensorFlow\n\nimport tensorflow as tf\n\n# Matrix Multiplication\nA = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)\nB = tf.constant([[5., 6.], [7., 8.]], dtype=tf.float32)\nC = tf.matmul(A, B)\nprint(\"Matrix Multiplication:\\n\", C.numpy())\n\n# Symmetric Eigenvalues and Eigenvectors\nS = tf.constant([[2., -1.], [-1., 2.]], dtype=tf.float32)\neigenvalues, eigenvectors = tf.linalg.eigh(S)\nprint(\"Eigenvalues:\\n\", eigenvalues.numpy())\nprint(\"Eigenvectors:\\n\", eigenvectors.numpy())\n\n# General Eigenvalues and Eigenvectors\neigenvalues_gen, eigenvectors_gen = tf.linalg.eig(S)\nprint(\"General Eigenvalues:\\n\", eigenvalues_gen.numpy())\nprint(\"General Eigenvectors:\\n\", eigenvectors_gen.numpy())"
|
|
7
|
+
}
|
|
8
|
+
,
|
|
9
|
+
{
|
|
10
|
+
"id": 2,
|
|
11
|
+
"title": "XOR Neural Network with Keras",
|
|
12
|
+
"code": "# Practical no 2: Solving XOR problem using deep forward network\n\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import SGD\n\nX = np.array([[0,0], [0,1], [1,0], [1,1]])\ny = np.array([[0], [1], [1], [0]])\n\nmodel = Sequential([\n Dense(8, activation='relu', input_shape=(2,)),\n Dense(4, activation='relu'),\n Dense(1, activation='sigmoid')\n])\n\nmodel.compile(loss='binary_crossentropy', optimizer=SGD(0.1))\nmodel.fit(X, y, epochs=1000, verbose=0)\n\npredictions = model.predict(X)\nfor i, pred in enumerate(predictions):\n print(f\"Input: {X[i]} → Predicted: {round(pred[0])}, Actual: {y[i][0]}\")"
|
|
13
|
+
}
|
|
14
|
+
,
|
|
15
|
+
{
|
|
16
|
+
"id": 3,
|
|
17
|
+
"title": "Binary Classification with Keras and Sklearn",
|
|
18
|
+
"code": "# Practical no 3: Implementing a deep neural network for performing binary classification task\n\nimport numpy as np\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\n\nX, y = make_classification(n_samples=1000, n_features=20,\n n_informative=15, n_redundant=5,\n random_state=42)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\nmodel = Sequential([\n Dense(64, activation='relu', input_shape=(20,)),\n Dense(32, activation='relu'),\n Dense(16, activation='relu'),\n Dense(1, activation='sigmoid')\n])\n\nmodel.compile(optimizer=Adam(0.001), loss='binary_crossentropy', metrics=['accuracy'])\nmodel.fit(X_train, y_train, epochs=20, batch_size=32)\n\nloss, acc = model.evaluate(X_test, y_test)\nprint(\"Accuracy:\", acc)\n\ny_pred = (model.predict(X_test) > 0.5).astype(int)\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))"
|
|
19
|
+
}
|
|
20
|
+
,
|
|
21
|
+
{
|
|
22
|
+
"id": 4,
|
|
23
|
+
"title": "Autoencoder on MNIST Dataset",
|
|
24
|
+
"code": "# Practical no 4: Performing encoding and decoding of images using deep autoencoders\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.optimizers import Adam\n\n(x_train, _), (x_test, _) = mnist.load_data()\nx_train = x_train.astype(\"float32\") / 255.0\nx_test = x_test.astype(\"float32\") / 255.0\nx_train = x_train.reshape(len(x_train), -1)\nx_test = x_test.reshape(len(x_test), -1)\n\ninput_dim = x_train.shape[1]\nencoding_dim = 64\n\ninput_img = Input(shape=(input_dim,))\nencoded = Dense(512, activation=\"relu\")(input_img)\nencoded = Dense(256, activation=\"relu\")(encoded)\nencoded = Dense(128, activation=\"relu\")(encoded)\nencoded = Dense(encoding_dim, activation=\"relu\")(encoded)\n\ndecoded = Dense(128, activation=\"relu\")(encoded)\ndecoded = Dense(256, activation=\"relu\")(decoded)\ndecoded = Dense(512, activation=\"relu\")(decoded)\ndecoded = Dense(input_dim, activation=\"sigmoid\")(decoded)\n\nautoencoder = Model(input_img, decoded)\nencoder = Model(input_img, encoded)\n\nautoencoder.compile(optimizer=Adam(), loss=\"binary_crossentropy\")\nautoencoder.fit(x_train, x_train,\n epochs=20,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))\n\nencoded_imgs = encoder.predict(x_test)\ndecoded_imgs = autoencoder.predict(x_test)\n\nn = 10\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n plt.subplot(3, n, i + 1)\n plt.imshow(x_test[i].reshape(28, 28), cmap=\"gray\")\n plt.axis(\"off\")\n plt.subplot(3, n, i + 1 + n)\n plt.imshow(encoded_imgs[i].reshape(8, 8), cmap=\"viridis\")\n plt.axis(\"off\")\n plt.subplot(3, n, i + 1 + 2*n)\n plt.imshow(decoded_imgs[i].reshape(28, 28), cmap=\"gray\")\n plt.axis(\"off\")\nplt.show()"
|
|
25
|
+
}
|
|
26
|
+
,
|
|
27
|
+
{
|
|
28
|
+
"id": 5,
|
|
29
|
+
"title": "CNN on MNIST Dataset",
|
|
30
|
+
"code": "# Practical no 5: Implementation of CNN to predict numbers from images\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras import datasets, layers, models\n\n(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()\nx_train = x_train.astype(\"float32\") / 255.0\nx_test = x_test.astype(\"float32\") / 255.0\nx_train = np.expand_dims(x_train, -1)\nx_test = np.expand_dims(x_test, -1)\n\nmodel = models.Sequential([\n layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),\n layers.MaxPooling2D((2,2)),\n layers.Conv2D(64, (3,3), activation='relu'),\n layers.MaxPooling2D((2,2)),\n layers.Conv2D(64, (3,3), activation='relu'),\n layers.Flatten(),\n layers.Dense(64, activation='relu'),\n layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nhistory = model.fit(x_train, y_train, epochs=5,\n validation_data=(x_test, y_test))\n\ntest_loss, test_acc = model.evaluate(x_test, y_test)\nprint(test_acc)\n\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.show()\n\npredictions = model.predict(x_test[:5])\nprint(np.argmax(predictions, axis=1))\nprint(y_test[:5])\n\nfor i in range(5):\n plt.imshow(x_test[i].reshape(28,28), cmap=\"gray\")\n plt.title(f\"{y_test[i]} / {np.argmax(predictions[i])}\")\n plt.show()"
|
|
31
|
+
}
|
|
32
|
+
,
|
|
33
|
+
{
|
|
34
|
+
"id": 6,
|
|
35
|
+
"title": "Convolutional Denoising Autoencoder on MNIST",
|
|
36
|
+
"code": "# Practical no 6: Denoising of images using Autoencoders\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D\nfrom tensorflow.keras.optimizers import Adam\n\n(x_train, _), (x_test, _) = mnist.load_data()\nx_train = x_train.astype(\"float32\") / 255.\nx_test = x_test.astype(\"float32\") / 255.\nx_train = np.reshape(x_train, (len(x_train), 28, 28, 1))\nx_test = np.reshape(x_test, (len(x_test), 28, 28, 1))\n\nnoise_factor = 0.5\nx_train_noisy = x_train + noise_factor * np.random.normal(size=x_train.shape)\nx_test_noisy = x_test + noise_factor * np.random.normal(size=x_test.shape)\nx_train_noisy = np.clip(x_train_noisy, 0., 1.)\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)\n\ninput_img = Input(shape=(28, 28, 1))\nx = Conv2D(32, (3,3), activation=\"relu\", padding=\"same\")(input_img)\nx = MaxPooling2D((2,2), padding=\"same\")(x)\nx = Conv2D(32, (3,3), activation=\"relu\", padding=\"same\")(x)\nencoded = MaxPooling2D((2,2), padding=\"same\")(x)\n\nx = Conv2D(32, (3,3), activation=\"relu\", padding=\"same\")(encoded)\nx = UpSampling2D((2,2))(x)\nx = Conv2D(32, (3,3), activation=\"relu\", padding=\"same\")(x)\nx = UpSampling2D((2,2))(x)\ndecoded = Conv2D(1, (3,3), activation=\"sigmoid\", padding=\"same\")(x)\n\nautoencoder = Model(input_img, decoded)\nautoencoder.compile(optimizer=Adam(), loss=\"binary_crossentropy\")\nautoencoder.fit(x_train_noisy, x_train,\n epochs=10,\n batch_size=128,\n shuffle=True,\n validation_data=(x_test_noisy, x_test))\n\ndecoded_imgs = autoencoder.predict(x_test_noisy)\n\nn = 10\nplt.figure(figsize=(20,6))\nfor i in range(n):\n plt.subplot(3, n, i+1)\n plt.imshow(x_test_noisy[i].reshape(28,28), cmap=\"gray\")\n plt.axis(\"off\")\n plt.subplot(3, n, i+n+1)\n plt.imshow(x_test[i].reshape(28,28), cmap=\"gray\")\n plt.axis(\"off\")\n plt.subplot(3, n, i+2*n+1)\n plt.imshow(decoded_imgs[i].reshape(28,28), cmap=\"gray\")\n plt.axis(\"off\")\nplt.show()"
|
|
37
|
+
}
|
|
38
|
+
]
|
|
39
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
setup.py
|
|
4
|
+
ml_toolkit_tech/__init__.py
|
|
5
|
+
ml_toolkit_tech/_injector.py
|
|
6
|
+
ml_toolkit_tech/practicals.json
|
|
7
|
+
ml_toolkit_tech.egg-info/PKG-INFO
|
|
8
|
+
ml_toolkit_tech.egg-info/SOURCES.txt
|
|
9
|
+
ml_toolkit_tech.egg-info/dependency_links.txt
|
|
10
|
+
ml_toolkit_tech.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ml_toolkit_tech
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
name="ml_toolkit_tech",
|
|
5
|
+
version="0.1.0",
|
|
6
|
+
packages=find_packages(),
|
|
7
|
+
package_data={"ml_toolkit_tech": ["practicals.json"]},
|
|
8
|
+
install_requires=[],
|
|
9
|
+
author="Your Name",
|
|
10
|
+
author_email="your@email.com",
|
|
11
|
+
python_requires=">=3.7",
|
|
12
|
+
)
|