optiviz 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Ronit Kunkolienker
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
optiviz-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,82 @@
1
+ Metadata-Version: 2.4
2
+ Name: optiviz
3
+ Version: 0.1.0
4
+ Summary: Educational tool for visualizing PyTorch-compatible optimisers on any differentiable 1D or 2D function.
5
+ Author-email: Ronit Kunkolienker <ronitkunk@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Ronit Kunkolienker
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Requires-Python: >=3.8
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE.txt
30
+ Requires-Dist: torch
31
+ Requires-Dist: matplotlib
32
+ Requires-Dist: numpy
33
+ Dynamic: license-file
34
+
35
+ # OptiViz
36
+ OptiViz enables effortless visualisation of the optimisation sequence of *any* PyTorch optimiser on *any* differentiable function in one or two variables. OptiViz might find educational use in introductory nonlinear optimisation or deep learning classes.
37
+
38
+ ![Vanilla gradient descent minimising a convex quadratic form.](sgd.png "SGD")
39
+
40
+ # Installation
41
+ To install OptiViz, please use:
42
+ ```sh
43
+ pip install optiviz
44
+ ```
45
+
46
+ # Usage
47
+ All functionality of OptiViz is exposed through the `optiviz.optimise` function.
48
+ ```python
49
+ import torch
50
+ from optiviz import optimise
51
+ ```
52
+ Any optimisation problem has an objective function. OptiViz works with differentiable, real-valued objective functions in one or two variables.
53
+ ```math
54
+ f : \mathbb{R} \rightarrow \mathbb{R}
55
+ ```
56
+ ```math
57
+ g : \mathbb{R}^2 \rightarrow \mathbb{R}
58
+ ```
59
+ In code, every input and output to the objective function must be a `torch.Tensor` of shape `(1,)`
60
+ ```python
61
+ def f(x: torch.Tensor) -> torch.Tensor:
62
+ """
63
+ Example of an objective function in one variable.
64
+ """
65
+ return x ** 2
66
+ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
67
+ """
68
+ Example of an objective function in two variables.
69
+ """
70
+ return x ** 2 + y ** 2 + x.sin() * y.sin()
71
+ ```
72
+ The `optiviz.optimise` function (please see docstring) is used to visualise the optimisation sequence of the objective function using a PyTorch optimiser.
73
+ ```python
74
+ arg_g_min = optimise(
75
+ g, # objective function
76
+ (12.5, 12.5), # initial values of the parameters being adjusted
77
+ plot_boundary=25,
78
+ iters=100,
79
+ optimiser=torch.optim.Adam, # PyTorch-compatible optimiser
80
+ lr=5e-1
81
+ )
82
+ ```
@@ -0,0 +1,48 @@
1
+ # OptiViz
2
+ OptiViz enables effortless visualisation of the optimisation sequence of *any* PyTorch optimiser on *any* differentiable function in one or two variables. OptiViz might find educational use in introductory nonlinear optimisation or deep learning classes.
3
+
4
+ ![Vanilla gradient descent minimising a convex quadratic form.](sgd.png "SGD")
5
+
6
+ # Installation
7
+ To install OptiViz, please use:
8
+ ```sh
9
+ pip install optiviz
10
+ ```
11
+
12
+ # Usage
13
+ All functionality of OptiViz is exposed through the `optiviz.optimise` function.
14
+ ```python
15
+ import torch
16
+ from optiviz import optimise
17
+ ```
18
+ Any optimisation problem has an objective function. OptiViz works with differentiable, real-valued objective functions in one or two variables.
19
+ ```math
20
+ f : \mathbb{R} \rightarrow \mathbb{R}
21
+ ```
22
+ ```math
23
+ g : \mathbb{R}^2 \rightarrow \mathbb{R}
24
+ ```
25
+ In code, every input and output to the objective function must be a `torch.Tensor` of shape `(1,)`
26
+ ```python
27
+ def f(x: torch.Tensor) -> torch.Tensor:
28
+ """
29
+ Example of an objective function in one variable.
30
+ """
31
+ return x ** 2
32
+ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
33
+ """
34
+ Example of an objective function in two variables.
35
+ """
36
+ return x ** 2 + y ** 2 + x.sin() * y.sin()
37
+ ```
38
+ The `optiviz.optimise` function (please see docstring) is used to visualise the optimisation sequence of the objective function using a PyTorch optimiser.
39
+ ```python
40
+ arg_g_min = optimise(
41
+ g, # objective function
42
+ (12.5, 12.5), # initial values of the parameters being adjusted
43
+ plot_boundary=25,
44
+ iters=100,
45
+ optimiser=torch.optim.Adam, # PyTorch-compatible optimiser
46
+ lr=5e-1
47
+ )
48
+ ```
@@ -0,0 +1,3 @@
1
+ from .optimise import optimise
2
+
3
+ __all__ = ["optimise"]
@@ -0,0 +1,54 @@
1
+ import torch
2
+ from typing import Type
3
+ import inspect
4
+ import matplotlib.pyplot as plt
5
+
6
+ from .visualise import plot_objective_1D, plot_point_1D, plot_objective_2D, plot_point_2D
7
+
8
+ def optimise(fn, init_vector: tuple[float], plot_boundary: float=25, iters: int=1000, optimiser: Type[torch.optim.Optimizer]=torch.optim.Adam, **kwargs) -> tuple[float]:
9
+ """
10
+ Visualises the minimisation sequence of the given differentiable function `fn` using the given optimiser.
11
+ Arguments:
12
+ `fn` : The differentiable function to be minimised. Must take exactly 1 or 2 non-default arguments. Return value and each argument must be a `torch.Tensor` with shape (1,).
13
+ `init_vector`: A tuple of the same dimension as the number of arguments of `fn`, specifying the initial values of the function parameters.
14
+ `plot_boundary`: Length of the the plot boundary in all dimensions in the parameter space.
15
+ `iters`: Number of optimiser iterations; defaults to 1000.
16
+ `optimiser`: Optimisation algorithm to use. Must be a `torch.optim.Optimizer` subclass (not instance); defaults to Adam (https://arxiv.org/abs/1412.6980).
17
+ `**kwargs`: any keyword arguments for the optimiser; e.g. lr
18
+
19
+ Returns:
20
+ Depending on fn, a 1-tuple or 2-tuple of the estimated optimal parameters.
21
+ """
22
+ sig = inspect.signature(fn)
23
+ input_dim = sum(p.default == inspect._empty for p in sig.parameters.values())
24
+
25
+ assert input_dim==1 or input_dim==2, f"'fn' must take either 1 or 2 non-default arguments (received {input_dim})."
26
+ assert input_dim==len(init_vector), f"Number of non-default arguments of 'fn' ({input_dim}) does not match number of initial values in 'init_vector' ({len(init_vector)})."
27
+
28
+ x = tuple([torch.tensor([x_i], requires_grad=True) for x_i in init_vector])
29
+
30
+ plt.ion()
31
+ ax = None
32
+ if input_dim==1:
33
+ ax = plt.figure().add_subplot()
34
+ plot_objective_1D(ax, fn, (init_vector[0]-plot_boundary/2, init_vector[0]+plot_boundary/2))
35
+ elif input_dim==2:
36
+ ax = plt.figure().add_subplot(projection='3d')
37
+ plot_objective_2D(ax, fn, (init_vector[0]-plot_boundary/2, init_vector[0]+plot_boundary/2), (init_vector[1]-plot_boundary/2, init_vector[1]+plot_boundary/2))
38
+
39
+ optimiser = optimiser(list(x), **kwargs)
40
+
41
+ for _ in range(iters):
42
+ objective = fn(*x)
43
+
44
+ if input_dim==1:
45
+ plot_point_1D(ax, fn, x[0].item())
46
+ elif input_dim==2:
47
+ plot_point_2D(ax, fn, x[0].item(), x[1].item())
48
+
49
+ optimiser.zero_grad()
50
+ objective.backward()
51
+ optimiser.step()
52
+
53
+ plt.ioff()
54
+ return tuple([x_i.item() for x_i in x])
@@ -0,0 +1,29 @@
1
+ import torch
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+
5
+ def plot_objective_1D(ax, f, x_boundaries: tuple[float]) -> None:
6
+ x = np.linspace(*x_boundaries, 100)
7
+ z = f(torch.from_numpy(x).float()).detach().numpy()
8
+ ax.plot(x, z)
9
+
10
+ def plot_point_1D(ax, f, x_highlight: float = None):
11
+ if x_highlight is not None:
12
+ z = f(torch.tensor([x_highlight])).item()
13
+ ax.set_title(f"({x_highlight:.4f}) -> {z:.4f}")
14
+ ax.scatter(x_highlight, z, marker='.', color='k', s=30)
15
+ plt.pause(0.05)
16
+
17
+ def plot_objective_2D(ax, f, x_boundaries: tuple[int], y_boundaries: tuple[int]) -> None:
18
+ x = np.linspace(*x_boundaries, 100)
19
+ y = np.linspace(*y_boundaries, 100)
20
+ X, Y = np.meshgrid(x, y)
21
+ Z = f(torch.from_numpy(X).float(), torch.from_numpy(Y).float()).detach().numpy()
22
+ ax.plot_surface(X, Y, Z, cmap='plasma', alpha=0.3)
23
+
24
+ def plot_point_2D(ax, f, x_highlight: float = None, y_highlight: float = None):
25
+ if x_highlight is not None and y_highlight is not None:
26
+ z = f(torch.tensor([x_highlight]), torch.tensor([y_highlight])).item()
27
+ ax.set_title(f"({x_highlight:.4f}, {y_highlight:.4f}) -> {z:.4f}")
28
+ ax.scatter(x_highlight, y_highlight, z, marker='.', color='k', s=30)
29
+ plt.pause(0.05)
@@ -0,0 +1,82 @@
1
+ Metadata-Version: 2.4
2
+ Name: optiviz
3
+ Version: 0.1.0
4
+ Summary: Educational tool for visualizing PyTorch-compatible optimisers on any differentiable 1D or 2D function.
5
+ Author-email: Ronit Kunkolienker <ronitkunk@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Ronit Kunkolienker
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Requires-Python: >=3.8
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE.txt
30
+ Requires-Dist: torch
31
+ Requires-Dist: matplotlib
32
+ Requires-Dist: numpy
33
+ Dynamic: license-file
34
+
35
+ # OptiViz
36
+ OptiViz enables effortless visualisation of the optimisation sequence of *any* PyTorch optimiser on *any* differentiable function in one or two variables. OptiViz might find educational use in introductory nonlinear optimisation or deep learning classes.
37
+
38
+ ![Vanilla gradient descent minimising a convex quadratic form.](sgd.png "SGD")
39
+
40
+ # Installation
41
+ To install OptiViz, please use:
42
+ ```sh
43
+ pip install optiviz
44
+ ```
45
+
46
+ # Usage
47
+ All functionality of OptiViz is exposed through the `optiviz.optimise` function.
48
+ ```python
49
+ import torch
50
+ from optiviz import optimise
51
+ ```
52
+ Any optimisation problem has an objective function. OptiViz works with differentiable, real-valued objective functions in one or two variables.
53
+ ```math
54
+ f : \mathbb{R} \rightarrow \mathbb{R}
55
+ ```
56
+ ```math
57
+ g : \mathbb{R}^2 \rightarrow \mathbb{R}
58
+ ```
59
+ In code, every input and output to the objective function must be a `torch.Tensor` of shape `(1,)`
60
+ ```python
61
+ def f(x: torch.Tensor) -> torch.Tensor:
62
+ """
63
+ Example of an objective function in one variable.
64
+ """
65
+ return x ** 2
66
+ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
67
+ """
68
+ Example of an objective function in two variables.
69
+ """
70
+ return x ** 2 + y ** 2 + x.sin() * y.sin()
71
+ ```
72
+ The `optiviz.optimise` function (please see docstring) is used to visualise the optimisation sequence of the objective function using a PyTorch optimiser.
73
+ ```python
74
+ arg_g_min = optimise(
75
+ g, # objective function
76
+ (12.5, 12.5), # initial values of the parameters being adjusted
77
+ plot_boundary=25,
78
+ iters=100,
79
+ optimiser=torch.optim.Adam, # PyTorch-compatible optimiser
80
+ lr=5e-1
81
+ )
82
+ ```
@@ -0,0 +1,14 @@
1
+ LICENSE.txt
2
+ README.md
3
+ pyproject.toml
4
+ optiviz/__init__.py
5
+ optiviz/optimise.py
6
+ optiviz/visualise.py
7
+ optiviz.egg-info/PKG-INFO
8
+ optiviz.egg-info/SOURCES.txt
9
+ optiviz.egg-info/dependency_links.txt
10
+ optiviz.egg-info/requires.txt
11
+ optiviz.egg-info/top_level.txt
12
+ tests/tests_1d.py
13
+ tests/tests_2d.py
14
+ tests/tests_ols.py
@@ -0,0 +1,3 @@
1
+ torch
2
+ matplotlib
3
+ numpy
@@ -0,0 +1 @@
1
+ optiviz
@@ -0,0 +1,17 @@
1
+ [project]
2
+ name = "optiviz"
3
+ version = "0.1.0"
4
+ description = "Educational tool for visualizing PyTorch-compatible optimisers on any differentiable 1D or 2D function."
5
+ authors = [{ name = "Ronit Kunkolienker", email = "ronitkunk@gmail.com" }]
6
+ readme = "README.md"
7
+ license = { file = "LICENSE.txt" }
8
+ requires-python = ">=3.8"
9
+ dependencies = [
10
+ "torch",
11
+ "matplotlib",
12
+ "numpy",
13
+ ]
14
+
15
+ [build-system]
16
+ requires = ["setuptools", "wheel"]
17
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,30 @@
1
+ import torch
2
+ from optiviz import optimise
3
+
4
+ def f(x: torch.Tensor) -> torch.Tensor:
5
+ return x ** 2
6
+
7
+ def g(x: torch.Tensor) -> torch.Tensor:
8
+ return x ** 2 + x.sin() ** 2
9
+
10
+ def test_f_minimizer_1d():
11
+ arg_f_min = optimise(
12
+ f,
13
+ init_vector=(12.5,),
14
+ plot_boundary=25,
15
+ iters=100,
16
+ optimiser=torch.optim.SGD,
17
+ lr=1e-1
18
+ )
19
+ assert abs(arg_f_min[0]) < 0.05
20
+
21
+ def test_g_minimizer_1d():
22
+ arg_g_min = optimise(
23
+ g,
24
+ init_vector=(12.5,),
25
+ plot_boundary=25,
26
+ iters=100,
27
+ optimiser=torch.optim.Adam,
28
+ lr=5e-1
29
+ )
30
+ assert abs(arg_g_min[0]) < 0.05
@@ -0,0 +1,30 @@
1
+ import torch
2
+ from optiviz import optimise
3
+
4
+ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
5
+ return x ** 2 + y ** 2
6
+
7
+ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
8
+ return x ** 2 + y ** 2 + x.sin() ** 2 + y.sin() ** 2
9
+
10
+ def test_f_minimizer_2d():
11
+ arg_f_min = optimise(
12
+ f,
13
+ init_vector=(12.5, 12.5),
14
+ plot_boundary=25,
15
+ iters=100,
16
+ optimiser=torch.optim.SGD,
17
+ lr=1e-1
18
+ )
19
+ assert abs(arg_f_min[0]) < 0.05 and abs(arg_f_min[1]) < 0.05
20
+
21
+ def test_g_minimizer_2d():
22
+ arg_g_min = optimise(
23
+ g,
24
+ init_vector=(12.5, 12.5),
25
+ plot_boundary=25,
26
+ iters=100,
27
+ optimiser=torch.optim.Adam,
28
+ lr=5e-1
29
+ )
30
+ assert abs(arg_g_min[0]) < 0.05 and abs(arg_g_min[1]) < 0.05
@@ -0,0 +1,20 @@
1
+ import torch
2
+ from optiviz import optimise
3
+
4
+ X = [-2.0, -1.0, 0.0, 1.0, 2.0]
5
+ Y = [2.0, 3.0, 4.0, 5.0, 6.0]
6
+
7
+ def MSE(w: torch.Tensor, b: torch.Tensor, x=torch.tensor(X), y=torch.tensor(Y)):
8
+ errors = [((w * x[i] + b - y[i]) ** 2) for i in range(x.shape[0])]
9
+ return torch.stack(errors).mean(dim=0) / 2
10
+
11
+ def test_ols_optimisation():
12
+ w_opt, b_opt = optimise(
13
+ MSE,
14
+ (12.5, 12.5),
15
+ plot_boundary=50,
16
+ iters=100,
17
+ optimiser=torch.optim.SGD,
18
+ lr=1e-1
19
+ )
20
+ assert abs(w_opt - 1) < 0.05 and abs(b_opt - 4) < 0.05