liteopt 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liteopt-0.1.0/Cargo.lock +177 -0
- liteopt-0.1.0/Cargo.toml +3 -0
- liteopt-0.1.0/PKG-INFO +46 -0
- liteopt-0.1.0/README.md +29 -0
- liteopt-0.1.0/liteopt-core/Cargo.toml +8 -0
- liteopt-0.1.0/liteopt-core/src/bin/quadratic.rs +39 -0
- liteopt-0.1.0/liteopt-core/src/lib.rs +350 -0
- liteopt-0.1.0/liteopt-py/Cargo.toml +13 -0
- liteopt-0.1.0/liteopt-py/README.md +29 -0
- liteopt-0.1.0/liteopt-py/src/lib.rs +72 -0
- liteopt-0.1.0/liteopt-py/tests/test_liteopt.py +27 -0
- liteopt-0.1.0/pyproject.toml +33 -0
liteopt-0.1.0/Cargo.lock
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
# This file is automatically @generated by Cargo.
|
|
2
|
+
# It is not intended for manual editing.
|
|
3
|
+
version = 4
|
|
4
|
+
|
|
5
|
+
[[package]]
|
|
6
|
+
name = "autocfg"
|
|
7
|
+
version = "1.5.0"
|
|
8
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
9
|
+
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
|
10
|
+
|
|
11
|
+
[[package]]
|
|
12
|
+
name = "heck"
|
|
13
|
+
version = "0.5.0"
|
|
14
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
15
|
+
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
|
16
|
+
|
|
17
|
+
[[package]]
|
|
18
|
+
name = "indoc"
|
|
19
|
+
version = "2.0.7"
|
|
20
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
21
|
+
checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706"
|
|
22
|
+
dependencies = [
|
|
23
|
+
"rustversion",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
[[package]]
|
|
27
|
+
name = "libc"
|
|
28
|
+
version = "0.2.178"
|
|
29
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
30
|
+
checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091"
|
|
31
|
+
|
|
32
|
+
[[package]]
|
|
33
|
+
name = "liteopt"
|
|
34
|
+
version = "0.1.0"
|
|
35
|
+
|
|
36
|
+
[[package]]
|
|
37
|
+
name = "liteopt-py"
|
|
38
|
+
version = "0.1.0"
|
|
39
|
+
dependencies = [
|
|
40
|
+
"liteopt",
|
|
41
|
+
"pyo3",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
[[package]]
|
|
45
|
+
name = "memoffset"
|
|
46
|
+
version = "0.9.1"
|
|
47
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
48
|
+
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
|
|
49
|
+
dependencies = [
|
|
50
|
+
"autocfg",
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
[[package]]
|
|
54
|
+
name = "once_cell"
|
|
55
|
+
version = "1.21.3"
|
|
56
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
57
|
+
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
|
58
|
+
|
|
59
|
+
[[package]]
|
|
60
|
+
name = "portable-atomic"
|
|
61
|
+
version = "1.11.1"
|
|
62
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
63
|
+
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
|
|
64
|
+
|
|
65
|
+
[[package]]
|
|
66
|
+
name = "proc-macro2"
|
|
67
|
+
version = "1.0.103"
|
|
68
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
69
|
+
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
|
|
70
|
+
dependencies = [
|
|
71
|
+
"unicode-ident",
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
[[package]]
|
|
75
|
+
name = "pyo3"
|
|
76
|
+
version = "0.27.2"
|
|
77
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
78
|
+
checksum = "ab53c047fcd1a1d2a8820fe84f05d6be69e9526be40cb03b73f86b6b03e6d87d"
|
|
79
|
+
dependencies = [
|
|
80
|
+
"indoc",
|
|
81
|
+
"libc",
|
|
82
|
+
"memoffset",
|
|
83
|
+
"once_cell",
|
|
84
|
+
"portable-atomic",
|
|
85
|
+
"pyo3-build-config",
|
|
86
|
+
"pyo3-ffi",
|
|
87
|
+
"pyo3-macros",
|
|
88
|
+
"unindent",
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
[[package]]
|
|
92
|
+
name = "pyo3-build-config"
|
|
93
|
+
version = "0.27.2"
|
|
94
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
95
|
+
checksum = "b455933107de8642b4487ed26d912c2d899dec6114884214a0b3bb3be9261ea6"
|
|
96
|
+
dependencies = [
|
|
97
|
+
"target-lexicon",
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
[[package]]
|
|
101
|
+
name = "pyo3-ffi"
|
|
102
|
+
version = "0.27.2"
|
|
103
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
104
|
+
checksum = "1c85c9cbfaddf651b1221594209aed57e9e5cff63c4d11d1feead529b872a089"
|
|
105
|
+
dependencies = [
|
|
106
|
+
"libc",
|
|
107
|
+
"pyo3-build-config",
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
[[package]]
|
|
111
|
+
name = "pyo3-macros"
|
|
112
|
+
version = "0.27.2"
|
|
113
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
114
|
+
checksum = "0a5b10c9bf9888125d917fb4d2ca2d25c8df94c7ab5a52e13313a07e050a3b02"
|
|
115
|
+
dependencies = [
|
|
116
|
+
"proc-macro2",
|
|
117
|
+
"pyo3-macros-backend",
|
|
118
|
+
"quote",
|
|
119
|
+
"syn",
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
[[package]]
|
|
123
|
+
name = "pyo3-macros-backend"
|
|
124
|
+
version = "0.27.2"
|
|
125
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
126
|
+
checksum = "03b51720d314836e53327f5871d4c0cfb4fb37cc2c4a11cc71907a86342c40f9"
|
|
127
|
+
dependencies = [
|
|
128
|
+
"heck",
|
|
129
|
+
"proc-macro2",
|
|
130
|
+
"pyo3-build-config",
|
|
131
|
+
"quote",
|
|
132
|
+
"syn",
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
[[package]]
|
|
136
|
+
name = "quote"
|
|
137
|
+
version = "1.0.42"
|
|
138
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
139
|
+
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
|
|
140
|
+
dependencies = [
|
|
141
|
+
"proc-macro2",
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
[[package]]
|
|
145
|
+
name = "rustversion"
|
|
146
|
+
version = "1.0.22"
|
|
147
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
148
|
+
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
|
149
|
+
|
|
150
|
+
[[package]]
|
|
151
|
+
name = "syn"
|
|
152
|
+
version = "2.0.111"
|
|
153
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
154
|
+
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
|
|
155
|
+
dependencies = [
|
|
156
|
+
"proc-macro2",
|
|
157
|
+
"quote",
|
|
158
|
+
"unicode-ident",
|
|
159
|
+
]
|
|
160
|
+
|
|
161
|
+
[[package]]
|
|
162
|
+
name = "target-lexicon"
|
|
163
|
+
version = "0.13.3"
|
|
164
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
165
|
+
checksum = "df7f62577c25e07834649fc3b39fafdc597c0a3527dc1c60129201ccfcbaa50c"
|
|
166
|
+
|
|
167
|
+
[[package]]
|
|
168
|
+
name = "unicode-ident"
|
|
169
|
+
version = "1.0.22"
|
|
170
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
171
|
+
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
|
|
172
|
+
|
|
173
|
+
[[package]]
|
|
174
|
+
name = "unindent"
|
|
175
|
+
version = "0.2.4"
|
|
176
|
+
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
177
|
+
checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3"
|
liteopt-0.1.0/Cargo.toml
ADDED
liteopt-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: liteopt
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Classifier: Programming Language :: Python
|
|
5
|
+
Classifier: Programming Language :: Python :: 3
|
|
6
|
+
Classifier: Programming Language :: Rust
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
9
|
+
Requires-Dist: numpy>=1.24.4
|
|
10
|
+
Summary: Lightweight gradient descent optimizer implemented in Rust
|
|
11
|
+
Author-email: Taiki Ishigaki <taiki000ishigaki@gmail.com>
|
|
12
|
+
License: MIT
|
|
13
|
+
Requires-Python: >=3.8
|
|
14
|
+
Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
|
|
15
|
+
Project-URL: Homepage, https://github.com/MathRobotics/liteopt
|
|
16
|
+
Project-URL: Repository, https://github.com/MathRobotics/liteopt
|
|
17
|
+
|
|
18
|
+
# liteopt
|
|
19
|
+
|
|
20
|
+
A lightweight optimization library written in Rust with Python bindings.
|
|
21
|
+
|
|
22
|
+
## Installation
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
pip install liteopt
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Usage
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
import liteopt
|
|
32
|
+
|
|
33
|
+
def f(x):
|
|
34
|
+
x0, x1 = x
|
|
35
|
+
return (1.0 - x0)**2 + 100.0 * (x1 - x0**2)**2
|
|
36
|
+
|
|
37
|
+
def grad(x):
|
|
38
|
+
x0, x1 = x
|
|
39
|
+
df_dx = -2.0 * (1.0 - x0) - 400.0 * x0 * (x1 - x0**2)
|
|
40
|
+
df_dy = 200.0 * (x1 - x0**2)
|
|
41
|
+
return [df_dx, df_dy]
|
|
42
|
+
|
|
43
|
+
x0 = [-1.2, 1.0]
|
|
44
|
+
x_star, f_star, converged = liteopt.gd(f, grad, x0, step_size=1e-3, max_iters=200_000, tol_grad=1e-4)
|
|
45
|
+
print(converged, x_star, f_star)
|
|
46
|
+
```
|
liteopt-0.1.0/README.md
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# liteopt
|
|
2
|
+
|
|
3
|
+
A lightweight optimization library written in Rust with Python bindings.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install liteopt
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import liteopt
|
|
15
|
+
|
|
16
|
+
def f(x):
|
|
17
|
+
x0, x1 = x
|
|
18
|
+
return (1.0 - x0)**2 + 100.0 * (x1 - x0**2)**2
|
|
19
|
+
|
|
20
|
+
def grad(x):
|
|
21
|
+
x0, x1 = x
|
|
22
|
+
df_dx = -2.0 * (1.0 - x0) - 400.0 * x0 * (x1 - x0**2)
|
|
23
|
+
df_dy = 200.0 * (x1 - x0**2)
|
|
24
|
+
return [df_dx, df_dy]
|
|
25
|
+
|
|
26
|
+
x0 = [-1.2, 1.0]
|
|
27
|
+
x_star, f_star, converged = liteopt.gd(f, grad, x0, step_size=1e-3, max_iters=200_000, tol_grad=1e-4)
|
|
28
|
+
print(converged, x_star, f_star)
|
|
29
|
+
```
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
use liteopt::{EuclideanSpace, GradientDescent};
|
|
2
|
+
|
|
3
|
+
fn main() {
|
|
4
|
+
let space = EuclideanSpace;
|
|
5
|
+
|
|
6
|
+
let solver = GradientDescent {
|
|
7
|
+
space,
|
|
8
|
+
step_size: 1e-3,
|
|
9
|
+
max_iters: 200_000,
|
|
10
|
+
tol_grad: 1e-4,
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
// initial point
|
|
14
|
+
let x0 = vec![-1.2, 1.0];
|
|
15
|
+
|
|
16
|
+
// objective function f(x, y) = (1 - x)^2 + 100 (y - x^2)^2
|
|
17
|
+
let value_fn = |x: &Vec<f64>| {
|
|
18
|
+
let x0 = x[0];
|
|
19
|
+
let x1 = x[1];
|
|
20
|
+
(1.0 - x0).powi(2) + 100.0 * (x1 - x0 * x0).powi(2)
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
// gradient of the objective function
|
|
24
|
+
let grad_fn = |x: &Vec<f64>, grad: &mut Vec<f64>| {
|
|
25
|
+
let x0 = x[0];
|
|
26
|
+
let x1 = x[1];
|
|
27
|
+
|
|
28
|
+
grad[0] = -2.0 * (1.0 - x0) - 400.0 * x0 * (x1 - x0 * x0);
|
|
29
|
+
grad[1] = 200.0 * (x1 - x0 * x0);
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
let result = solver.minimize_with_fn(x0, value_fn, grad_fn);
|
|
33
|
+
|
|
34
|
+
println!("converged: {}", result.converged);
|
|
35
|
+
println!("iters : {}", result.iters);
|
|
36
|
+
println!("x* : {:?}", result.x);
|
|
37
|
+
println!("f(x*) : {}", result.f);
|
|
38
|
+
println!("‖grad‖ : {}", result.grad_norm);
|
|
39
|
+
}
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
//! liteopt: A tiny, lightweight optimization toolbox
|
|
2
|
+
//!
|
|
3
|
+
//! - `Space`: an abstraction of vector spaces
|
|
4
|
+
//! - `EuclideanSpace` (`Vec<f64>`): its concrete implementation
|
|
5
|
+
//! - `Objective`: a generic objective function interface
|
|
6
|
+
//! - `GradientDescent`: a gradient descent solver
|
|
7
|
+
//!
|
|
8
|
+
//! Start with simple optimization on R^n.
|
|
9
|
+
|
|
10
|
+
/// Trait that represents an abstract "space".
|
|
11
|
+
///
|
|
12
|
+
/// MVP implements only EuclideanSpace (Vec<f64>), leaving room for
|
|
13
|
+
/// future manifolds such as SO(3) or SE(3).
|
|
14
|
+
pub trait Space {
|
|
15
|
+
/// Type representing points/vectors on the space.
|
|
16
|
+
type Point: Clone;
|
|
17
|
+
|
|
18
|
+
/// Return a zero-like vector with the same shape as x.
|
|
19
|
+
fn zero_like(&self, x: &Self::Point) -> Self::Point;
|
|
20
|
+
|
|
21
|
+
/// Vector norm.
|
|
22
|
+
fn norm(&self, v: &Self::Point) -> f64;
|
|
23
|
+
|
|
24
|
+
/// Return v scaled by the scalar alpha.
|
|
25
|
+
fn scale(&self, v: &Self::Point, alpha: f64) -> Self::Point;
|
|
26
|
+
|
|
27
|
+
/// Compute x + v (result is a point).
|
|
28
|
+
fn add(&self, x: &Self::Point, v: &Self::Point) -> Self::Point;
|
|
29
|
+
|
|
30
|
+
/// Compute y - x (result is a vector).
|
|
31
|
+
fn difference(&self, x: &Self::Point, y: &Self::Point) -> Self::Point;
|
|
32
|
+
|
|
33
|
+
/// Return the point reached by moving from x along direction by step alpha.
|
|
34
|
+
///
|
|
35
|
+
/// By default this matches the Euclidean update
|
|
36
|
+
/// x_{k+1} = x_k + alpha * direction
|
|
37
|
+
/// in Euclidean space.
|
|
38
|
+
fn retract(&self, x: &Self::Point, direction: &Self::Point, alpha: f64) -> Self::Point {
|
|
39
|
+
let step = self.scale(direction, alpha);
|
|
40
|
+
self.add(x, &step)
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/// Simple Euclidean space representing R^n as Vec<f64>.
|
|
45
|
+
#[derive(Clone, Copy, Debug, Default)]
|
|
46
|
+
pub struct EuclideanSpace;
|
|
47
|
+
|
|
48
|
+
impl Space for EuclideanSpace {
|
|
49
|
+
type Point = Vec<f64>;
|
|
50
|
+
|
|
51
|
+
fn zero_like(&self, x: &Self::Point) -> Self::Point {
|
|
52
|
+
vec![0.0; x.len()]
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
fn norm(&self, v: &Self::Point) -> f64 {
|
|
56
|
+
v.iter().map(|vi| vi * vi).sum::<f64>().sqrt()
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
fn scale(&self, v: &Self::Point, alpha: f64) -> Self::Point {
|
|
60
|
+
v.iter().map(|vi| alpha * vi).collect()
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
fn add(&self, x: &Self::Point, v: &Self::Point) -> Self::Point {
|
|
64
|
+
x.iter().zip(v.iter()).map(|(xi, vi)| xi + vi).collect()
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
fn difference(&self, x: &Self::Point, y: &Self::Point) -> Self::Point {
|
|
68
|
+
y.iter().zip(x.iter()).map(|(yi, xi)| yi - xi).collect()
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/// Objective function to be minimized.
|
|
73
|
+
///
|
|
74
|
+
/// - `S::Point` represents points on the space
|
|
75
|
+
/// - In `gradient` the user computes the gradient and writes into the buffer
|
|
76
|
+
pub trait Objective<S: Space> {
|
|
77
|
+
/// Function value f(x) at x.
|
|
78
|
+
fn value(&self, x: &S::Point) -> f64;
|
|
79
|
+
|
|
80
|
+
/// Write the gradient ∇f(x) at x into grad.
|
|
81
|
+
///
|
|
82
|
+
/// grad is assumed to be pre-initialized, e.g., via zero_like(x).
|
|
83
|
+
fn gradient(&self, x: &S::Point, grad: &mut S::Point);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/// Configuration for gradient descent.
|
|
87
|
+
#[derive(Clone, Debug)]
|
|
88
|
+
pub struct GradientDescent<S: Space> {
|
|
89
|
+
/// Space to operate on (MVP can fix this to EuclideanSpace).
|
|
90
|
+
pub space: S,
|
|
91
|
+
/// Learning rate / step size.
|
|
92
|
+
pub step_size: f64,
|
|
93
|
+
/// Maximum number of iterations.
|
|
94
|
+
pub max_iters: usize,
|
|
95
|
+
/// Considered converged when the gradient norm falls below this threshold.
|
|
96
|
+
pub tol_grad: f64,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/// Struct that holds the optimization result.
|
|
100
|
+
#[derive(Clone, Debug)]
|
|
101
|
+
pub struct OptimizeResult<P> {
|
|
102
|
+
pub x: P,
|
|
103
|
+
pub f: f64,
|
|
104
|
+
pub iters: usize,
|
|
105
|
+
pub grad_norm: f64,
|
|
106
|
+
pub converged: bool,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
impl<S: Space> GradientDescent<S> {
|
|
110
|
+
pub fn minimize<O>(&self, obj: &O, mut x: S::Point) -> OptimizeResult<S::Point>
|
|
111
|
+
where
|
|
112
|
+
O: Objective<S>,
|
|
113
|
+
{
|
|
114
|
+
let mut grad = self.space.zero_like(&x);
|
|
115
|
+
|
|
116
|
+
for k in 0..self.max_iters {
|
|
117
|
+
// Compute gradient.
|
|
118
|
+
obj.gradient(&x, &mut grad);
|
|
119
|
+
|
|
120
|
+
let grad_norm = self.space.norm(&grad);
|
|
121
|
+
if grad_norm < self.tol_grad {
|
|
122
|
+
let f = obj.value(&x);
|
|
123
|
+
return OptimizeResult {
|
|
124
|
+
x,
|
|
125
|
+
f,
|
|
126
|
+
iters: k,
|
|
127
|
+
grad_norm,
|
|
128
|
+
converged: true,
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// x_{k+1} = Retr_x( - step_size * grad )
|
|
133
|
+
// direction = -grad
|
|
134
|
+
let direction = self.space.scale(&grad, -1.0);
|
|
135
|
+
x = self.space.retract(&x, &direction, self.step_size);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
let f = obj.value(&x);
|
|
139
|
+
let grad_norm = self.space.norm(&grad);
|
|
140
|
+
OptimizeResult {
|
|
141
|
+
x,
|
|
142
|
+
f,
|
|
143
|
+
iters: self.max_iters,
|
|
144
|
+
grad_norm,
|
|
145
|
+
converged: false,
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/// ★ Minimize using user-provided value and gradient functions.
|
|
150
|
+
pub fn minimize_with_fn<F, G>(
|
|
151
|
+
&self,
|
|
152
|
+
mut x: S::Point,
|
|
153
|
+
value_fn: F,
|
|
154
|
+
grad_fn: G,
|
|
155
|
+
) -> OptimizeResult<S::Point>
|
|
156
|
+
where
|
|
157
|
+
F: Fn(&S::Point) -> f64,
|
|
158
|
+
G: Fn(&S::Point, &mut S::Point),
|
|
159
|
+
{
|
|
160
|
+
let mut grad = self.space.zero_like(&x);
|
|
161
|
+
|
|
162
|
+
for k in 0..self.max_iters {
|
|
163
|
+
// call the user-provided gradient function
|
|
164
|
+
grad_fn(&x, &mut grad);
|
|
165
|
+
|
|
166
|
+
let grad_norm = self.space.norm(&grad);
|
|
167
|
+
if grad_norm < self.tol_grad {
|
|
168
|
+
let f = value_fn(&x);
|
|
169
|
+
return OptimizeResult {
|
|
170
|
+
x,
|
|
171
|
+
f,
|
|
172
|
+
iters: k,
|
|
173
|
+
grad_norm,
|
|
174
|
+
converged: true,
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
let direction = self.space.scale(&grad, -1.0);
|
|
179
|
+
x = self.space.retract(&x, &direction, self.step_size);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
let f = value_fn(&x);
|
|
183
|
+
let grad_norm = self.space.norm(&grad);
|
|
184
|
+
OptimizeResult {
|
|
185
|
+
x,
|
|
186
|
+
f,
|
|
187
|
+
iters: self.max_iters,
|
|
188
|
+
grad_norm,
|
|
189
|
+
converged: false,
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
//
|
|
195
|
+
// Tests and examples: quadratic / Rosenbrock
|
|
196
|
+
//
|
|
197
|
+
|
|
198
|
+
/// Example quadratic of the form f(x) = 0.5 * x^T A x - b^T x.
|
|
199
|
+
pub struct Quadratic {
|
|
200
|
+
pub a: f64,
|
|
201
|
+
pub b: f64,
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
impl Objective<EuclideanSpace> for Quadratic {
|
|
205
|
+
fn value(&self, x: &Vec<f64>) -> f64 {
|
|
206
|
+
// Treat as 1D for simplicity:
|
|
207
|
+
// f(x) = 0.5 * a * x^2 - b * x
|
|
208
|
+
let x0 = x[0];
|
|
209
|
+
0.5 * self.a * x0 * x0 - self.b * x0
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
fn gradient(&self, x: &Vec<f64>, grad: &mut Vec<f64>) {
|
|
213
|
+
let x0 = x[0];
|
|
214
|
+
// Gradient: df/dx = a * x - b
|
|
215
|
+
grad[0] = self.a * x0 - self.b;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/// Example 2D Rosenbrock function.
|
|
220
|
+
/// f(x, y) = (1 - x)^2 + 100 (y - x^2)^2
|
|
221
|
+
pub struct Rosenbrock;
|
|
222
|
+
|
|
223
|
+
impl Objective<EuclideanSpace> for Rosenbrock {
|
|
224
|
+
fn value(&self, x: &Vec<f64>) -> f64 {
|
|
225
|
+
let x0 = x[0];
|
|
226
|
+
let x1 = x[1];
|
|
227
|
+
(1.0 - x0).powi(2) + 100.0 * (x1 - x0 * x0).powi(2)
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
fn gradient(&self, x: &Vec<f64>, grad: &mut Vec<f64>) {
|
|
231
|
+
let x0 = x[0];
|
|
232
|
+
let x1 = x[1];
|
|
233
|
+
|
|
234
|
+
// df/dx = -2(1 - x) - 400x(y - x^2)
|
|
235
|
+
grad[0] = -2.0 * (1.0 - x0) - 400.0 * x0 * (x1 - x0 * x0);
|
|
236
|
+
// df/dy = 200(y - x^2)
|
|
237
|
+
grad[1] = 200.0 * (x1 - x0 * x0);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
#[cfg(test)]
|
|
242
|
+
mod tests {
|
|
243
|
+
use super::*;
|
|
244
|
+
|
|
245
|
+
#[test]
|
|
246
|
+
fn quadratic_minimization() {
|
|
247
|
+
// f(x) = 0.5 * a x^2 - b x
|
|
248
|
+
// Minimizer is x* = b / a
|
|
249
|
+
let obj = Quadratic { a: 2.0, b: 4.0 }; // f(x) = x^2 - 4x => x* = 2
|
|
250
|
+
let space = EuclideanSpace;
|
|
251
|
+
let solver = GradientDescent {
|
|
252
|
+
space,
|
|
253
|
+
step_size: 0.1,
|
|
254
|
+
max_iters: 1000,
|
|
255
|
+
tol_grad: 1e-6,
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
let x0 = vec![0.0];
|
|
259
|
+
let result = solver.minimize(&obj, x0);
|
|
260
|
+
|
|
261
|
+
assert!(result.converged);
|
|
262
|
+
assert!((result.x[0] - 2.0).abs() < 1e-3);
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
#[test]
|
|
266
|
+
fn rosenbrock_minimization() {
|
|
267
|
+
let obj = Rosenbrock;
|
|
268
|
+
let space = EuclideanSpace;
|
|
269
|
+
let solver = GradientDescent {
|
|
270
|
+
space,
|
|
271
|
+
step_size: 1e-3,
|
|
272
|
+
max_iters: 200_000,
|
|
273
|
+
tol_grad: 1e-4,
|
|
274
|
+
};
|
|
275
|
+
|
|
276
|
+
let x0 = vec![-1.2, 1.0];
|
|
277
|
+
let result = solver.minimize(&obj, x0);
|
|
278
|
+
|
|
279
|
+
// True minimizer is (1,1)
|
|
280
|
+
assert!((result.x[0] - 1.0).abs() < 5e-2);
|
|
281
|
+
assert!((result.x[1] - 1.0).abs() < 5e-2);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
#[test]
|
|
285
|
+
fn nonlinear_minimization_with_fn() {
|
|
286
|
+
let space = EuclideanSpace;
|
|
287
|
+
let solver = GradientDescent {
|
|
288
|
+
space,
|
|
289
|
+
step_size: 1e-3,
|
|
290
|
+
max_iters: 200_000,
|
|
291
|
+
tol_grad: 1e-4,
|
|
292
|
+
};
|
|
293
|
+
|
|
294
|
+
// initial point
|
|
295
|
+
let x0 = vec![0.0, 0.0];
|
|
296
|
+
|
|
297
|
+
// objective function
|
|
298
|
+
// p = [ cos(x) + cos(x+y)
|
|
299
|
+
// sin(x) + sin(x+y)]
|
|
300
|
+
let p_fn = |x: &Vec<f64>| {
|
|
301
|
+
let p = vec![
|
|
302
|
+
f64::cos(x[0]) + f64::cos(x[0] + x[1]),
|
|
303
|
+
f64::sin(x[0]) + f64::sin(x[0] + x[1]),
|
|
304
|
+
];
|
|
305
|
+
p
|
|
306
|
+
};
|
|
307
|
+
let dp_fn = |x: &Vec<f64>| {
|
|
308
|
+
let dp = vec![
|
|
309
|
+
vec![
|
|
310
|
+
-(f64::sin(x[0]) + f64::sin(x[0] + x[1])),
|
|
311
|
+
-f64::sin(x[0] + x[1]),
|
|
312
|
+
],
|
|
313
|
+
vec![
|
|
314
|
+
f64::cos(x[0]) + f64::cos(x[0] + x[1]),
|
|
315
|
+
-f64::sin(x[0] + x[1]),
|
|
316
|
+
],
|
|
317
|
+
];
|
|
318
|
+
dp
|
|
319
|
+
};
|
|
320
|
+
let target = vec![0.5, (f64::sqrt(3.0) + 2.0) / 2.0];
|
|
321
|
+
use std::f64::consts::PI;
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
let value_fn = |x: &Vec<f64>| {
|
|
325
|
+
let x0_target = target[0];
|
|
326
|
+
let x1_target = target[1];
|
|
327
|
+
let p = p_fn(x);
|
|
328
|
+
let residual = vec![p[0] - x0_target, p[1] - x1_target];
|
|
329
|
+
0.5 * (residual[0].powi(2) + residual[1].powi(2))
|
|
330
|
+
};
|
|
331
|
+
|
|
332
|
+
// gradient of the objective function
|
|
333
|
+
let grad_fn = |x: &Vec<f64>, grad: &mut Vec<f64>| {
|
|
334
|
+
let x0_target = target[0];
|
|
335
|
+
let x1_target = target[1];
|
|
336
|
+
let p = p_fn(x);
|
|
337
|
+
let residual = vec![p[0] - x0_target, p[1] - x1_target];
|
|
338
|
+
|
|
339
|
+
let dp = dp_fn(x);
|
|
340
|
+
grad[0] = residual[0] * dp[0][0] + residual[1] * dp[1][0];
|
|
341
|
+
grad[1] = residual[0] * dp[0][1] + residual[1] * dp[1][1];
|
|
342
|
+
};
|
|
343
|
+
|
|
344
|
+
let result = solver.minimize_with_fn(x0, value_fn, grad_fn);
|
|
345
|
+
|
|
346
|
+
// True minimizer is (pi/3, pi/6)
|
|
347
|
+
assert!((result.x[0] - PI/3.0).abs() < 1e-3);
|
|
348
|
+
assert!((result.x[1] - PI/6.0).abs() < 1e-3);
|
|
349
|
+
}
|
|
350
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
[package]
|
|
2
|
+
name = "liteopt-py"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
edition = "2021"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
|
|
7
|
+
[lib]
|
|
8
|
+
name = "liteopt"
|
|
9
|
+
crate-type = ["cdylib"]
|
|
10
|
+
|
|
11
|
+
[dependencies]
|
|
12
|
+
liteopt_core = { package = "liteopt", path = "../liteopt-core" }
|
|
13
|
+
pyo3 = { version = "0.27", features = ["extension-module"] }
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# liteopt
|
|
2
|
+
|
|
3
|
+
A lightweight optimization library written in Rust with Python bindings.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install liteopt
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import liteopt
|
|
15
|
+
|
|
16
|
+
def f(x):
|
|
17
|
+
x0, x1 = x
|
|
18
|
+
return (1.0 - x0)**2 + 100.0 * (x1 - x0**2)**2
|
|
19
|
+
|
|
20
|
+
def grad(x):
|
|
21
|
+
x0, x1 = x
|
|
22
|
+
df_dx = -2.0 * (1.0 - x0) - 400.0 * x0 * (x1 - x0**2)
|
|
23
|
+
df_dy = 200.0 * (x1 - x0**2)
|
|
24
|
+
return [df_dx, df_dy]
|
|
25
|
+
|
|
26
|
+
x0 = [-1.2, 1.0]
|
|
27
|
+
x_star, f_star, converged = liteopt.gd(f, grad, x0, step_size=1e-3, max_iters=200_000, tol_grad=1e-4)
|
|
28
|
+
print(converged, x_star, f_star)
|
|
29
|
+
```
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
use liteopt_core::{EuclideanSpace, GradientDescent};
|
|
2
|
+
use pyo3::prelude::*;
|
|
3
|
+
|
|
4
|
+
/// Gradient Descent optimizer exposed to Python.
|
|
5
|
+
///
|
|
6
|
+
/// f: callable(x: list[float]) -> float
|
|
7
|
+
/// grad: callable(x: list[float]) -> list[float]
|
|
8
|
+
#[pyfunction]
|
|
9
|
+
fn gd(
|
|
10
|
+
py: Python<'_>,
|
|
11
|
+
f: Py<PyAny>,
|
|
12
|
+
grad: Py<PyAny>,
|
|
13
|
+
x0: Vec<f64>,
|
|
14
|
+
step_size: f64,
|
|
15
|
+
max_iters: usize,
|
|
16
|
+
tol_grad: f64,
|
|
17
|
+
) -> PyResult<(Vec<f64>, f64, bool)> {
|
|
18
|
+
let space = EuclideanSpace;
|
|
19
|
+
let solver = GradientDescent {
|
|
20
|
+
space,
|
|
21
|
+
step_size,
|
|
22
|
+
max_iters,
|
|
23
|
+
tol_grad,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
let f_obj = f.clone_ref(py);
|
|
27
|
+
let grad_obj = grad.clone_ref(py);
|
|
28
|
+
|
|
29
|
+
// closure for calling Python function f(x)
|
|
30
|
+
let f_closure = move |x: &Vec<f64>| -> f64 {
|
|
31
|
+
let arg = x.clone();
|
|
32
|
+
let res = f_obj
|
|
33
|
+
.call1(py, (arg,))
|
|
34
|
+
.expect("failed to call objective function from Python");
|
|
35
|
+
res.extract::<f64>(py)
|
|
36
|
+
.expect("objective function must return float")
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
// closure for calling Python gradient function grad(x)
|
|
40
|
+
let grad_closure = move |x: &Vec<f64>, grad_out: &mut Vec<f64>| {
|
|
41
|
+
let arg = x.clone();
|
|
42
|
+
let res = grad_obj
|
|
43
|
+
.call1(py, (arg,))
|
|
44
|
+
.expect("failed to call gradient function from Python");
|
|
45
|
+
let g: Vec<f64> = res
|
|
46
|
+
.extract(py)
|
|
47
|
+
.expect("gradient function must return list[float]");
|
|
48
|
+
|
|
49
|
+
assert_eq!(
|
|
50
|
+
g.len(),
|
|
51
|
+
grad_out.len(),
|
|
52
|
+
"gradient length mismatch: expected {}, got {}",
|
|
53
|
+
grad_out.len(),
|
|
54
|
+
g.len()
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
for (o, gi) in grad_out.iter_mut().zip(g.iter()) {
|
|
58
|
+
*o = *gi;
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
let result = solver.minimize_with_fn(x0, f_closure, grad_closure);
|
|
63
|
+
|
|
64
|
+
Ok((result.x, result.f, result.converged))
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/// Python module definition
|
|
68
|
+
#[pymodule]
|
|
69
|
+
fn liteopt(_py: Python, m: &Bound<PyModule>) -> PyResult<()> {
|
|
70
|
+
m.add_function(wrap_pyfunction!(gd, m)?)?;
|
|
71
|
+
Ok(())
|
|
72
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import liteopt
|
|
3
|
+
|
|
4
|
+
def f(x):
|
|
5
|
+
x = np.asarray(x)
|
|
6
|
+
return float((1.0 - x[0])**2 + 100.0 * (x[1] - x[0]**2)**2)
|
|
7
|
+
|
|
8
|
+
def grad(x):
|
|
9
|
+
x = np.asarray(x)
|
|
10
|
+
df_dx = -2.0 * (1.0 - x[0]) - 400.0 * x[0] * (x[1] - x[0]**2)
|
|
11
|
+
df_dy = 200.0 * (x[1] - x[0]**2)
|
|
12
|
+
return [float(df_dx), float(df_dy)]
|
|
13
|
+
|
|
14
|
+
def main():
|
|
15
|
+
x0 = [-1.2, 1.0]
|
|
16
|
+
x_star, f_star, converged = liteopt.gd(
|
|
17
|
+
f, grad, x0,
|
|
18
|
+
step_size=1e-3,
|
|
19
|
+
max_iters=200_000,
|
|
20
|
+
tol_grad=1e-10,
|
|
21
|
+
)
|
|
22
|
+
print("converged:", converged)
|
|
23
|
+
print("x*:", x_star, "f(x*):", f_star)
|
|
24
|
+
print("expected x*:", [1.0, 1.0], "f(x*):", f([1.0, 1.0]))
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
main()
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["maturin>=1.5,<2.0"]
|
|
3
|
+
build-backend = "maturin"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "liteopt"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Lightweight gradient descent optimizer implemented in Rust"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Taiki Ishigaki", email = "taiki000ishigaki@gmail.com" }
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Programming Language :: Python",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Rust",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Operating System :: OS Independent",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
dependencies = [
|
|
24
|
+
"numpy>=1.24.4",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
[project.urls]
|
|
28
|
+
Homepage = "https://github.com/MathRobotics/liteopt"
|
|
29
|
+
Repository = "https://github.com/MathRobotics/liteopt"
|
|
30
|
+
|
|
31
|
+
[tool.maturin]
|
|
32
|
+
manifest-path = "liteopt-py/Cargo.toml"
|
|
33
|
+
# module-name = "liteopt"
|