scs 0.2.3 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (100) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4 -0
  3. data/README.md +11 -6
  4. data/lib/scs/ffi.rb +30 -13
  5. data/lib/scs/solver.rb +32 -9
  6. data/lib/scs/version.rb +1 -1
  7. data/vendor/scs/CITATION.cff +39 -0
  8. data/vendor/scs/CMakeLists.txt +7 -8
  9. data/vendor/scs/Makefile +24 -15
  10. data/vendor/scs/README.md +5 -263
  11. data/vendor/scs/include/aa.h +67 -23
  12. data/vendor/scs/include/cones.h +17 -17
  13. data/vendor/scs/include/glbopts.h +98 -32
  14. data/vendor/scs/include/linalg.h +2 -4
  15. data/vendor/scs/include/linsys.h +58 -44
  16. data/vendor/scs/include/normalize.h +3 -3
  17. data/vendor/scs/include/rw.h +8 -2
  18. data/vendor/scs/include/scs.h +293 -133
  19. data/vendor/scs/include/util.h +3 -15
  20. data/vendor/scs/linsys/cpu/direct/private.c +220 -224
  21. data/vendor/scs/linsys/cpu/direct/private.h +13 -7
  22. data/vendor/scs/linsys/cpu/direct/private.o +0 -0
  23. data/vendor/scs/linsys/cpu/indirect/private.c +177 -110
  24. data/vendor/scs/linsys/cpu/indirect/private.h +8 -4
  25. data/vendor/scs/linsys/cpu/indirect/private.o +0 -0
  26. data/vendor/scs/linsys/csparse.c +87 -0
  27. data/vendor/scs/linsys/csparse.h +34 -0
  28. data/vendor/scs/linsys/csparse.o +0 -0
  29. data/vendor/scs/linsys/external/amd/SuiteSparse_config.c +1 -1
  30. data/vendor/scs/linsys/external/amd/SuiteSparse_config.o +0 -0
  31. data/vendor/scs/linsys/external/amd/amd_1.o +0 -0
  32. data/vendor/scs/linsys/external/amd/amd_2.o +0 -0
  33. data/vendor/scs/linsys/external/amd/amd_aat.o +0 -0
  34. data/vendor/scs/linsys/external/amd/amd_control.o +0 -0
  35. data/vendor/scs/linsys/external/amd/amd_defaults.o +0 -0
  36. data/vendor/scs/linsys/external/amd/amd_dump.o +0 -0
  37. data/vendor/scs/linsys/external/amd/amd_global.o +0 -0
  38. data/vendor/scs/linsys/external/amd/amd_info.o +0 -0
  39. data/vendor/scs/linsys/external/amd/amd_internal.h +1 -1
  40. data/vendor/scs/linsys/external/amd/amd_order.o +0 -0
  41. data/vendor/scs/linsys/external/amd/amd_post_tree.o +0 -0
  42. data/vendor/scs/linsys/external/amd/amd_postorder.o +0 -0
  43. data/vendor/scs/linsys/external/amd/amd_preprocess.o +0 -0
  44. data/vendor/scs/linsys/external/amd/amd_valid.o +0 -0
  45. data/vendor/scs/linsys/external/qdldl/changes +2 -0
  46. data/vendor/scs/linsys/external/qdldl/qdldl.c +29 -46
  47. data/vendor/scs/linsys/external/qdldl/qdldl.h +33 -41
  48. data/vendor/scs/linsys/external/qdldl/qdldl.o +0 -0
  49. data/vendor/scs/linsys/external/qdldl/qdldl_types.h +11 -3
  50. data/vendor/scs/linsys/gpu/gpu.c +31 -33
  51. data/vendor/scs/linsys/gpu/gpu.h +48 -31
  52. data/vendor/scs/linsys/gpu/indirect/private.c +338 -232
  53. data/vendor/scs/linsys/gpu/indirect/private.h +23 -14
  54. data/vendor/scs/linsys/scs_matrix.c +498 -0
  55. data/vendor/scs/linsys/scs_matrix.h +70 -0
  56. data/vendor/scs/linsys/scs_matrix.o +0 -0
  57. data/vendor/scs/scs.mk +13 -9
  58. data/vendor/scs/src/aa.c +384 -109
  59. data/vendor/scs/src/aa.o +0 -0
  60. data/vendor/scs/src/cones.c +440 -353
  61. data/vendor/scs/src/cones.o +0 -0
  62. data/vendor/scs/src/ctrlc.c +15 -5
  63. data/vendor/scs/src/ctrlc.o +0 -0
  64. data/vendor/scs/src/linalg.c +84 -28
  65. data/vendor/scs/src/linalg.o +0 -0
  66. data/vendor/scs/src/normalize.c +22 -64
  67. data/vendor/scs/src/normalize.o +0 -0
  68. data/vendor/scs/src/rw.c +160 -21
  69. data/vendor/scs/src/rw.o +0 -0
  70. data/vendor/scs/src/scs.c +767 -563
  71. data/vendor/scs/src/scs.o +0 -0
  72. data/vendor/scs/src/scs_indir.o +0 -0
  73. data/vendor/scs/src/scs_version.c +9 -3
  74. data/vendor/scs/src/scs_version.o +0 -0
  75. data/vendor/scs/src/util.c +37 -106
  76. data/vendor/scs/src/util.o +0 -0
  77. data/vendor/scs/test/minunit.h +17 -8
  78. data/vendor/scs/test/problem_utils.h +176 -14
  79. data/vendor/scs/test/problems/degenerate.h +130 -0
  80. data/vendor/scs/test/problems/hs21_tiny_qp.h +124 -0
  81. data/vendor/scs/test/problems/hs21_tiny_qp_rw.h +116 -0
  82. data/vendor/scs/test/problems/infeasible_tiny_qp.h +100 -0
  83. data/vendor/scs/test/problems/qafiro_tiny_qp.h +199 -0
  84. data/vendor/scs/test/problems/random_prob +0 -0
  85. data/vendor/scs/test/problems/random_prob.h +45 -0
  86. data/vendor/scs/test/problems/rob_gauss_cov_est.h +188 -31
  87. data/vendor/scs/test/problems/small_lp.h +13 -14
  88. data/vendor/scs/test/problems/test_fails.h +43 -0
  89. data/vendor/scs/test/problems/unbounded_tiny_qp.h +82 -0
  90. data/vendor/scs/test/random_socp_prob.c +54 -53
  91. data/vendor/scs/test/rng.h +109 -0
  92. data/vendor/scs/test/run_from_file.c +19 -10
  93. data/vendor/scs/test/run_tests.c +27 -3
  94. metadata +20 -8
  95. data/vendor/scs/linsys/amatrix.c +0 -305
  96. data/vendor/scs/linsys/amatrix.h +0 -36
  97. data/vendor/scs/linsys/amatrix.o +0 -0
  98. data/vendor/scs/test/data/small_random_socp +0 -0
  99. data/vendor/scs/test/problems/small_random_socp.h +0 -33
  100. data/vendor/scs/test/run_tests +0 -2
data/vendor/scs/README.md CHANGED
@@ -1,270 +1,12 @@
1
1
  <h1 align="center" margin=0px>
2
- <img src="https://github.com/cvxgrp/scs/blob/master/docs/scs_logo.png" alt="Intersection of a cone and a polyhedron" width="450">
2
+ <img src="https://github.com/cvxgrp/scs/blob/master/docs/src/_static/scs_logo.png" alt="Intersection of a cone and a polyhedron" width="450">
3
3
  </h1>
4
4
 
5
- ![Build Status](https://github.com/cvxgrp/scs/actions/workflows/build.yml/badge.svg)
5
+ [![Build Status](https://github.com/cvxgrp/scs/actions/workflows/build.yml/badge.svg)](https://github.com/cvxgrp/scs/actions/workflows/build.yml)
6
+ [![Coverage Status](https://coveralls.io/repos/github/cvxgrp/scs/badge.svg?branch=master)](https://coveralls.io/github/cvxgrp/scs?branch=master)
6
7
 
7
8
 
8
9
  SCS (`splitting conic solver`) is a numerical optimization package for solving
9
- large-scale convex cone problems, based on our paper [Conic Optimization via
10
- Operator Splitting and Homogeneous Self-Dual
11
- Embedding](http://www.stanford.edu/~boyd/papers/scs.html). It is written in C
12
- and can be used in other C, C++,
13
- [Python](https://github.com/bodono/scs-python),
14
- [Matlab](https://github.com/bodono/scs-matlab),
15
- [R](https://github.com/bodono/scs-r),
16
- [Julia](https://github.com/JuliaOpt/SCS.jl), and
17
- [Ruby](https://github.com/ankane/scs),
18
- programs via the linked
19
- interfaces. It can also be called as a solver from convex optimization
20
- toolboxes [CVX](http://cvxr.com/cvx/) (3.0 or later),
21
- [CVXPY](https://github.com/cvxgrp/cvxpy),
22
- [Convex.jl](https://github.com/jump-dev/Convex.jl),
23
- [JuMP.jl](https://github.com/jump-dev/JuMP.jl), and
24
- [Yalmip](https://github.com/johanlofberg/YALMIP).
10
+ large-scale convex cone problems. The current version is `3.0.0`.
25
11
 
26
- The current version is `2.1.4`. If you wish to cite SCS, please use the
27
- following:
28
- ```
29
- @article{ocpb:16,
30
- author = {B. O'Donoghue and E. Chu and N. Parikh and S. Boyd},
31
- title = {Conic Optimization via Operator Splitting and Homogeneous Self-Dual Embedding},
32
- journal = {Journal of Optimization Theory and Applications},
33
- month = {June},
34
- year = {2016},
35
- volume = {169},
36
- number = {3},
37
- pages = {1042-1068},
38
- url = {http://stanford.edu/~boyd/papers/scs.html},
39
- }
40
- @misc{scs,
41
- author = {B. O'Donoghue and E. Chu and N. Parikh and S. Boyd},
42
- title = {{SCS}: Splitting Conic Solver, version 2.1.4},
43
- howpublished = {\url{https://github.com/cvxgrp/scs}},
44
- month = nov,
45
- year = 2019
46
- }
47
- ```
48
-
49
- ----
50
- SCS numerically solves convex cone programs using the alternating direction
51
- method of multipliers
52
- ([ADMM](http://web.stanford.edu/~boyd/papers/admm_distr_stats.html)). It
53
- returns solutions to both the primal and dual problems if the problem is
54
- feasible, or a certificate of infeasibility otherwise. It solves the following
55
- primal cone problem:
56
-
57
- ```
58
- minimize c'x
59
- subject to Ax + s = b
60
- s in K
61
- ```
62
- over variables `x` and `s`, where `A`, `b` and `c` are user-supplied data and
63
- `K` is a user-defined convex cone. The dual problem is given by
64
- ```
65
- maximize -b'y
66
- subject to -A'y == c
67
- y in K^*
68
- ```
69
- over variable `y`, where `K^*` denotes the dual cone to `K`.
70
-
71
- The cone `K` can be any Cartesian product of the following primitive cones:
72
- + zero cone `{x | x = 0 }` (dual to the free cone `{x | x in R}`)
73
- + positive orthant `{x | x >= 0}`
74
- + second-order cone `{(t,x) | ||x||_2 <= t}`
75
- + positive semidefinite cone `{ X | min(eig(X)) >= 0, X = X^T }`
76
- + exponential cone `{(x,y,z) | y e^(x/y) <= z, y>0 }`
77
- + dual exponential cone `{(u,v,w) | −u e^(v/u) <= e w, u<0}`
78
- + power cone `{(x,y,z) | x^a * y^(1-a) >= |z|, x>=0, y>=0}`
79
- + dual power cone `{(u,v,w) | (u/a)^a * (v/(1-a))^(1-a) >= |w|, u>=0, v>=0}`
80
-
81
- The rows of the data matrix `A` correspond to the cones in `K`. **The rows of
82
- `A` must be in the order of the cones given above, i.e., first come the rows
83
- that correspond to the zero/free cones, then those that correspond to the
84
- positive orthants, then SOCs, etc.** For a `k` dimensional semidefinite cone
85
- when interpreting the rows of the data matrix `A` SCS assumes that the `k x k`
86
- matrix variable has been vectorized by scaling the off-diagonal entries by
87
- `sqrt(2)` and stacking the **lower triangular elements column-wise** to create a
88
- vector of length `k(k+1)/2`. See the section on semidefinite programming below.
89
-
90
- At termination SCS returns solution `(x*, s*, y*)` if the problem is feasible,
91
- or a certificate of infeasibility otherwise. See
92
- [here](http://web.stanford.edu/~boyd/cvxbook/) for more details about
93
- cone programming and certificates of infeasibility.
94
-
95
- **Anderson Acceleration**
96
-
97
- By default SCS uses Anderson acceleration (AA) to speed up convergence. The
98
- number of iterates that SCS uses in the AA calculation can be controlled by the
99
- parameter `acceleration_lookback` in the settings struct. It defaults to 10. AA
100
- is available as a standalone package [here](https://github.com/cvxgrp/aa). More
101
- details are available in our paper on AA
102
- [here](https://stanford.edu/~boyd/papers/nonexp_global_aa1.html).
103
-
104
- **Semidefinite Programming**
105
-
106
- SCS assumes that the matrix variables and the input data corresponding to
107
- semidefinite cones have been vectorized by **scaling the off-diagonal entries by
108
- `sqrt(2)`** and stacking the lower triangular elements **column-wise**. For a `k
109
- x k` matrix variable (or data matrix) this operation would create a vector of
110
- length `k(k+1)/2`. Scaling by `sqrt(2)` is required to preserve the
111
- inner-product.
112
-
113
- **To recover the matrix solution this operation must be inverted on the
114
- components of the vector returned by SCS corresponding to semidefinite cones**.
115
- That is, the off-diagonal entries must be scaled by `1/sqrt(2)` and the upper
116
- triangular entries are filled in by copying the values of lower triangular
117
- entries.
118
-
119
- More explicitly, we want to express
120
- `Tr(C X)` as `vec(C)'*vec(X)`, where the `vec` operation takes the `k x k` matrix
121
- ```
122
- X = [ X11 X12 ... X1k
123
- X21 X22 ... X2k
124
- ...
125
- Xk1 Xk2 ... Xkk ]
126
- ```
127
- and produces a vector consisting of
128
- ```
129
- vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk).
130
- ```
131
-
132
- **Linear equation solvers**
133
-
134
- Each iteration of SCS requires the solution of a set of linear equations. This
135
- package includes two implementations for solving linear equations: a direct
136
- solver which uses a cached LDL factorization and an indirect solver based on
137
- conjugate gradients. The indirect solver can be run on either the cpu or
138
- gpu.
139
-
140
- The direct solver uses external numerical linear algebra packages:
141
- * [QDLDL](https://github.com/oxfordcontrol/qdldl)
142
- * [AMD](https://github.com/DrTimothyAldenDavis/SuiteSparse).
143
-
144
- ### Using SCS in C
145
- Typing `make` at the command line will compile the code and create SCS libraries
146
- in the `out` folder. To run the tests execute:
147
- ```sh
148
- make
149
- make test
150
- test/run_tests
151
- ```
152
-
153
- If `make` completes successfully, it will produce two static library files,
154
- `libscsdir.a`, `libscsindir.a`, and two dynamic library files `libscsdir.ext`,
155
- `libscsindir.ext` (where `.ext` extension is platform dependent) in the same
156
- folder. It will also produce two demo binaries in the `out` folder named
157
- `demo_socp_direct`, and `demo_socp_indirect`. If you have a GPU and have CUDA
158
- installed, you can also execute `make gpu` to compile SCS to run on the GPU
159
- which will create additional libraries and demo binaries in the `out` folder
160
- corresponding to the gpu version. Note that the GPU version requires 32 bit
161
- ints, which can be enforced by compiling with `DLONG=0`.
162
-
163
- To use the libraries in your own source code, compile your code with the linker
164
- option `-L(PATH_TO_SCS_LIBS)` and `-lscsdir` or `-lscsindir` (as needed). The
165
- API and required data structures are defined in the file `include/scs.h`. The
166
- four main API functions are:
167
-
168
- * `ScsWork * scs_init(const ScsData * d, const ScsCone * k, ScsInfo * info);`
169
-
170
- This initializes the ScsWork struct containing the workspace that scs will
171
- use, and performs the necessary preprocessing (e.g. matrix factorization).
172
- All inputs `d`, `k`, and `info` must be memory allocated before calling.
173
-
174
- * `scs_int scs_solve(ScsWork * w, const ScsData * d, const ScsCone * k, ScsSolution * sol, ScsInfo * info);`
175
-
176
- This solves the problem as defined by ScsData `d` and ScsCone `k` using the
177
- workspace in `w`. The solution is returned in `sol` and information about
178
- the solve is returned in `info` (outputs must have memory allocated before
179
- calling). None of the inputs can be NULL. You can call `scs_solve` many
180
- times for one call to `scs_init`, so long as the matrix `A` does not change
181
- (vectors `b` and `c` can change).
182
-
183
- * `void scs_finish(ScsWork * w);`
184
-
185
- Called after all solves completed to free allocated memory and other
186
- cleanup.
187
-
188
- * `scs_int scs(const ScsData * d, const ScsCone * k, ScsSolution * sol, ScsInfo * info);`
189
-
190
- Convenience method that simply calls all the above routines in order, for
191
- cases where the workspace does not need to be reused. All inputs must have
192
- memory allocated before this call.
193
-
194
- The data matrix `A` is specified in column-compressed format and the vectors `b`
195
- and `c` are specified as dense arrays. The solutions `x` (primal), `s` (slack),
196
- and `y` (dual) are returned as dense arrays. Cones are specified as the struct
197
- defined in `include/scs.h`, the rows of `A` must correspond to the cones in the
198
- exact order as specified by the cone struct (i.e. put linear cones before
199
- second-order cones etc.).
200
-
201
- **Warm-start**
202
-
203
- You can warm-start SCS (supply a guess of the solution) by setting `warm_start`
204
- in the ScsData struct to `1` and supplying the warm-starts in the ScsSolution
205
- struct (`x`,`y`, and `s`). All inputs must be warm-started if any one is. These
206
- are used to initialize the iterates in `scs_solve`.
207
-
208
- **Re-using matrix factorization**
209
-
210
- If using the direct version you can factorize the matrix once and solve many
211
- times. Simply call `scs_init` once, and use `scs_solve` many times with the same
212
- workspace, changing the input data `b` and `c` (and optionally warm-starts) for
213
- each iteration.
214
-
215
- **Using your own linear system solver**
216
-
217
- To use your own linear system solver simply implement all the methods and the
218
- two structs in `include/linsys.h` and plug it in.
219
-
220
- **BLAS / LAPACK install error**
221
-
222
- If you get an error like `cannot find -lblas` or `cannot find -llapack`, then
223
- you need to install blas and lapack and / or update your environment variables
224
- to point to the install locations.
225
-
226
- ### Using SCS with cmake
227
-
228
- Thanks to [`CMake`](http://cmake.org) buildsystem, scs can be easily compiled
229
- and linked by other `CMake` projects. To use the `cmake` buld system please run
230
- the following commands:
231
- ```
232
- cd scs
233
- mkdir build
234
- cd build
235
- cmake -DCMAKE_INSTALL_PREFIX:PATH=<custom-folder> ../
236
- make
237
- make install
238
- ```
239
-
240
- You may also want to compile the tests. In this case when you configure the project,
241
- please call the following command
242
- ```
243
- cmake -DCMAKE_INSTALL_PREFIX:PATH=<custom-folder> -DBUILD_TESTING=ON ../
244
- make
245
- ctest
246
- ```
247
-
248
- By default the build-system will compile the library as `shared`. If you want to
249
- compile it as `static`, please call the following command when you configure the
250
- project
251
- ```
252
- cmake -DCMAKE_INSTALL_PREFIX:PATH=<custom-folder> -BUILD_SHARED_LIBS=OFF ../
253
- make
254
- ```
255
-
256
- The `cmake` build-system exports two CMake targets called `scs::scsdir` and
257
- `scs::scsindir` which can be imported using the `find_package` CMake command
258
- and used by calling `target_link_libraries` as in the following example:
259
- ```cmake
260
- cmake_minimum_required(VERSION 3.0)
261
- project(myproject)
262
- find_package(scs REQUIRED)
263
- add_executable(example example.cpp)
264
-
265
- # To use the direct method
266
- target_link_libraries(example scs::scsdir)
267
-
268
- # To use the indirect method
269
- target_link_libraries(example scs::scsindir)
270
- ```
12
+ The full documentation is available [here](https://www.cvxgrp.org/scs/).
@@ -5,50 +5,94 @@
5
5
  extern "C" {
6
6
  #endif
7
7
 
8
+ #include "glbopts.h"
8
9
  #include <stdio.h>
9
10
  #include <stdlib.h>
10
11
  #include <string.h>
11
- #include "glbopts.h"
12
12
 
13
13
  typedef scs_float aa_float;
14
14
  typedef scs_int aa_int;
15
15
 
16
16
  typedef struct ACCEL_WORK AaWork;
17
17
 
18
- /* Initialize Anderson Acceleration, allocates memory.
18
+ /**
19
+ * Initialize Anderson Acceleration, allocates memory.
20
+ *
21
+ * @param dim the dimension of the variable for AA
22
+ * @param mem the memory (number of past iterations used) for AA
23
+ * @param type1 if True use type 1 AA, otherwise use type 2
24
+ * @param regularization type-I and type-II different, for type-I: 1e-8 works
25
+ * well, type-II: more stable can use 1e-12 often
26
+ * @param relaxation float \in [0,2], mixing parameter (1.0 is vanilla)
27
+ * @param safeguard_factor factor that controls safeguarding checks
28
+ * larger is more aggressive but less stable
29
+ * @param max_weight_norm float, maximum norm of AA weights
30
+ * @param verbosity if greater than 0 prints out various info
31
+ *
32
+ * @return pointer to AA workspace
33
+ *
34
+ */
35
+ AaWork *aa_init(aa_int dim, aa_int mem, aa_int type1, aa_float regularization,
36
+ aa_float relaxation, aa_float safeguard_factor,
37
+ aa_float max_weight_norm, aa_int verbosity);
38
+ /**
39
+ * Apply Anderson Acceleration. The usage pattern should be as follows:
40
+ *
41
+ * - for i = 0 .. N:
42
+ * - if (i > 0): aa_apply(x, x_prev, a)
43
+ * - x_prev = x.copy()
44
+ * - x = F(x)
45
+ * - aa_safeguard(x, x_prev, a) // optional but helps stability
46
+ *
47
+ * Here F is the map we are trying to find the fixed point for. We put the AA
48
+ * before the map so that any properties of the map are maintained at the end.
49
+ * Eg if the map contains a projection onto a set then the output is guaranteed
50
+ * to be in the set.
51
+ *
19
52
  *
20
- * Args:
21
- * dim: the dimension of the variable for aa
22
- * aa_mem: the memory (number of past iterations used) for aa
23
- * type1: bool, if True use type 1 aa, otherwise use type 2
53
+ * @param f output of map at current iteration, overwritten with AA output
54
+ * @param x input to map at current iteration
55
+ * @param a workspace from aa_init
56
+ *
57
+ * @return (+ or -) norm of AA weights vector. If positive then update
58
+ * was accepted and f contains new point, if negative then update was
59
+ * rejected and f is unchanged
24
60
  *
25
- * Reurns:
26
- * Pointer to aa workspace
27
61
  */
28
- AaWork *aa_init(aa_int dim, aa_int aa_mem, aa_int type1);
62
+ aa_float aa_apply(aa_float *f, const aa_float *x, AaWork *a);
29
63
 
30
- /* Apply Anderson Acceleration.
64
+ /**
65
+ * Apply safeguarding.
66
+ *
67
+ * This step is optional but can improve stability.
68
+ *
69
+ * @param f_new output of map after AA step
70
+ * @param x_new AA output that is input to the map
71
+ * @param a workspace from aa_init
31
72
  *
32
- * Args:
33
- * f: output of map at current iteration, overwritten with aa output at end.
34
- * x: input to map at current iteration
35
- * a: aa workspace from aa_init
73
+ * @returns 0 if AA step is accepted otherwise -1, if AA step is rejected then
74
+ * this overwrites f_new and x_new with previous values
36
75
  *
37
- * Returns:
38
- * int, a value of 0 is success, <0 is failure at which point f is unchanged
39
76
  */
40
- aa_int aa_apply(aa_float *f, const aa_float *x, AaWork *a);
77
+ aa_int aa_safeguard(aa_float *f_new, aa_float *x_new, AaWork *a);
41
78
 
42
- /* Finish Anderson Acceleration, clears memory.
79
+ /**
80
+ * Finish Anderson Acceleration, clears memory.
81
+ *
82
+ * @param a AA workspace from aa_init
43
83
  *
44
- * Args:
45
- * a: aa workspace from aa_init.
46
84
  */
47
85
  void aa_finish(AaWork *a);
48
86
 
49
- #define MAX_AA_NRM (1e4)
50
-
51
- #define MIN(a, b) (((a) < (b)) ? (a) : (b))
87
+ /**
88
+ * Reset Anderson Acceleration.
89
+ *
90
+ * Resets AA as if at the first iteration, reuses original memory allocations.
91
+ *
92
+ * @param a AA workspace from aa_init
93
+ *
94
+ */
95
+ void aa_reset(AaWork *a);
52
96
 
53
97
  #ifdef __cplusplus
54
98
  }
@@ -11,34 +11,34 @@ extern "C" {
11
11
 
12
12
  /* private data to help cone projection step */
13
13
  struct SCS_CONE_WORK {
14
- scs_float total_cone_time;
14
+ /*
15
+ * cone_boundaries will contain array of indices of rows of A corresponding to
16
+ * cone boundaries, boundaries[0] is starting index for cones of size larger
17
+ * than 1
18
+ */
19
+ scs_float *s; /* used for Moreau decomposition in projection */
20
+ scs_int cone_len;
21
+ /* box cone quantities */
22
+ scs_float *bl, *bu, box_t_warm_start;
15
23
  #ifdef USE_LAPACK
16
24
  /* workspace for eigenvector decompositions: */
17
25
  scs_float *Xs, *Z, *e, *work;
18
- blas_int *iwork, lwork, liwork;
26
+ blas_int lwork;
19
27
  #endif
20
28
  };
21
29
 
22
- /*
23
- * boundaries will contain array of indices of rows of A corresponding to
24
- * cone boundaries, boundaries[0] is starting index for cones of size larger
25
- * than 1
26
- * returns length of boundaries array, boundaries malloc-ed here so should be
27
- * freed
28
- */
29
- scs_int SCS(get_cone_boundaries)(const ScsCone *k, scs_int **boundaries);
30
-
31
- ScsConeWork *SCS(init_cone)(const ScsCone *k);
30
+ ScsConeWork *SCS(init_cone)(const ScsCone *k, const ScsScaling *scal,
31
+ scs_int cone_len);
32
32
  char *SCS(get_cone_header)(const ScsCone *k);
33
33
  scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k);
34
+ scs_int SCS(set_cone_boundaries)(const ScsCone *k, scs_int **cone_boundaries);
34
35
 
35
- /* pass in iter to control how accurate the cone projection
36
- with iteration, set iter < 0 for exact projection, warm_start contains guess
37
- of solution, can be SCS_NULL*/
38
36
  scs_int SCS(proj_dual_cone)(scs_float *x, const ScsCone *k, ScsConeWork *c,
39
- const scs_float *warm_start, scs_int iter);
37
+ scs_int normalize);
40
38
  void SCS(finish_cone)(ScsConeWork *c);
41
- char *SCS(get_cone_summary)(const ScsInfo *info, ScsConeWork *c);
39
+
40
+ void SCS(set_rho_y_vec)(const ScsCone *k, scs_float scale, scs_float *rho_y_vec,
41
+ scs_int m);
42
42
 
43
43
  #ifdef __cplusplus
44
44
  }
@@ -12,8 +12,8 @@ extern "C" {
12
12
  #endif
13
13
 
14
14
  /* SCS VERSION NUMBER ---------------------------------------------- */
15
- #define SCS_VERSION \
16
- ("2.1.4") /* string literals automatically null-terminated */
15
+ #define SCS_VERSION \
16
+ ("3.0.0") /* string literals automatically null-terminated */
17
17
 
18
18
  /* SCS returns one of the following integers: */
19
19
  #define SCS_INFEASIBLE_INACCURATE (-7)
@@ -27,18 +27,28 @@ extern "C" {
27
27
  #define SCS_SOLVED (1)
28
28
  #define SCS_SOLVED_INACCURATE (2)
29
29
 
30
+ /* verbosity level */
31
+ #ifndef VERBOSITY
32
+ #define VERBOSITY (0)
33
+ #endif
34
+
30
35
  /* DEFAULT SOLVER PARAMETERS AND SETTINGS -------------------------- */
31
- #define MAX_ITERS (5000)
32
- #define EPS (1E-5)
36
+ #define MAX_ITERS (100000)
37
+ #define EPS_REL (1E-4)
38
+ #define EPS_ABS (1E-4)
39
+ #define EPS_INFEAS (1E-7)
33
40
  #define ALPHA (1.5)
34
- #define RHO_X (1E-3)
35
- #define SCALE (1.0)
36
- #define CG_RATE (2.0)
41
+ #define RHO_X (1E-6)
42
+ #define SCALE (0.1)
37
43
  #define VERBOSE (1)
38
44
  #define NORMALIZE (1)
39
45
  #define WARM_START (0)
40
46
  #define ACCELERATION_LOOKBACK (10)
47
+ #define ACCELERATION_INTERVAL (10)
48
+ #define ADAPTIVE_SCALE (1)
41
49
  #define WRITE_DATA_FILENAME (0)
50
+ #define LOG_CSV_FILENAME (0)
51
+ #define TIME_LIMIT_SECS (0.)
42
52
 
43
53
  /* redefine printfs and memory allocators as needed */
44
54
  #ifdef MATLAB_MEX_FILE
@@ -50,19 +60,30 @@ extern "C" {
50
60
  #define _scs_realloc mxRealloc
51
61
  #elif defined PYTHON
52
62
  #include <Python.h>
53
- #include <stdlib.h>
54
- #define scs_printf(...) \
55
- { \
56
- PyGILState_STATE gilstate = PyGILState_Ensure(); \
57
- PySys_WriteStdout(__VA_ARGS__); \
58
- PyGILState_Release(gilstate); \
63
+ #define scs_printf(...) \
64
+ { \
65
+ PyGILState_STATE gilstate = PyGILState_Ensure(); \
66
+ PySys_WriteStdout(__VA_ARGS__); \
67
+ PyGILState_Release(gilstate); \
59
68
  }
60
- #define _scs_printf printf
61
- #define _scs_free free
62
- #define _scs_malloc malloc
63
- #define _scs_calloc calloc
64
- #define _scs_realloc realloc
65
- #elif (defined(USING_R))
69
+ /* only for SuiteSparse */
70
+ #define _scs_printf PySys_WriteStdout
71
+ #if PY_MAJOR_VERSION >= 3
72
+ #define _scs_free PyMem_RawFree
73
+ #define _scs_malloc PyMem_RawMalloc
74
+ #define _scs_realloc PyMem_RawRealloc
75
+ #define _scs_calloc PyMem_RawCalloc
76
+ #else
77
+ #define _scs_free PyMem_Free
78
+ #define _scs_malloc PyMem_Malloc
79
+ #define _scs_realloc PyMem_Realloc
80
+ static inline void *_scs_calloc(size_t count, size_t size) {
81
+ void *obj = PyMem_Malloc(count * size);
82
+ memset(obj, 0, count * size);
83
+ return obj;
84
+ }
85
+ #endif
86
+ #elif defined R_LANG
66
87
  #include <R_ext/Print.h> /* Rprintf etc */
67
88
  #include <stdio.h>
68
89
  #include <stdlib.h>
@@ -86,21 +107,22 @@ extern "C" {
86
107
  #define _scs_printf scs_printf
87
108
  #endif
88
109
 
89
- #define scs_free(x) \
90
- _scs_free(x); \
110
+ #define scs_free(x) \
111
+ _scs_free(x); \
91
112
  x = SCS_NULL
92
113
  #define scs_malloc(x) _scs_malloc(x)
93
114
  #define scs_calloc(x, y) _scs_calloc(x, y)
94
115
  #define scs_realloc(x, y) _scs_realloc(x, y)
95
116
 
96
117
  #ifdef DLONG
97
- #ifdef _WIN64
118
+ /*#ifdef _WIN64
98
119
  #include <stdint.h>
99
120
  typedef int64_t scs_int;
100
- /* typedef long scs_int; */
101
121
  #else
102
122
  typedef long scs_int;
103
123
  #endif
124
+ */
125
+ typedef long long scs_int;
104
126
  #else
105
127
  typedef int scs_int;
106
128
  #endif
@@ -153,23 +175,67 @@ typedef float scs_float;
153
175
  #endif
154
176
  #endif
155
177
 
178
+ /* Force SCS to treat the problem as (non-homogeneous) feasible for this many */
179
+ /* iters. This acts like a warm-start that biases towards feasibility, which */
180
+ /* is the most common use-case */
181
+ #define FEASIBLE_ITERS (1)
182
+
183
+ /* how many iterations between heuristic residual rescaling */
184
+ #define RESCALING_MIN_ITERS (100)
185
+
156
186
  #define EPS_TOL (1E-18)
157
187
  #define SAFEDIV_POS(X, Y) ((Y) < EPS_TOL ? ((X) / EPS_TOL) : (X) / (Y))
158
188
 
159
- #if EXTRA_VERBOSE > 0
189
+ #if VERBOSITY > 0
160
190
  #define PRINT_INTERVAL (1)
161
191
  #define CONVERGED_INTERVAL (1)
162
192
  #else
193
+
163
194
  /* print summary output every this num iterations */
164
- #define PRINT_INTERVAL (100)
195
+ #define PRINT_INTERVAL (250)
165
196
  /* check for convergence every this num iterations */
166
- #define CONVERGED_INTERVAL (20)
167
- #endif
168
-
169
- /* tolerance at which we declare problem indeterminate */
170
- #define INDETERMINATE_TOL (1e-9)
171
- /* maintain the iterates at this l2 norm (due to homogeneity) */
172
- #define ITERATE_NORM (10.)
197
+ #define CONVERGED_INTERVAL (25)
198
+ #endif
199
+
200
+ /* maintain the iterates at L2 norm = ITERATE_NORM * sqrt(n+m+1) */
201
+ #define ITERATE_NORM (1.)
202
+
203
+ /* Which norm to use for termination checking etc */
204
+ /* #define NORM SCS(norm_2) */
205
+ #define NORM SCS(norm_inf)
206
+
207
+ /* Factor which is scales tau in the linear system update */
208
+ /* Larger factors prevent tau from moving as much */
209
+ #define TAU_FACTOR (10.)
210
+
211
+ /* Anderson acceleration parameters: */
212
+ #define AA_RELAXATION (1.0)
213
+ #define AA_REGULARIZATION_TYPE_1 (1e-6)
214
+ #define AA_REGULARIZATION_TYPE_2 (1e-10)
215
+ /* Safeguarding norm factor at which we reject AA steps */
216
+ #define AA_SAFEGUARD_FACTOR (1.)
217
+ /* Max allowable AA weight norm */
218
+ #define AA_MAX_WEIGHT_NORM (1e10)
219
+
220
+ /* (Dual) Scale updating parameters */
221
+ #define MAX_SCALE_VALUE (1e6)
222
+ #define MIN_SCALE_VALUE (1e-6)
223
+ #define SCALE_NORM NORM /* what norm to use when computing the scale factor */
224
+
225
+ /* CG == Conjugate gradient */
226
+ /* Linear system tolerances, only used with indirect */
227
+ #define CG_BEST_TOL (1e-12)
228
+ /* This scales the current residuals to get the tolerance we solve the
229
+ * linear system to at each iteration. Lower factors require more CG steps
230
+ * but give better accuracy */
231
+ #define CG_TOL_FACTOR (0.2)
232
+
233
+ /* norm to use when deciding CG convergence */
234
+ #ifndef CG_NORM
235
+ #define CG_NORM SCS(norm_inf)
236
+ #endif
237
+ /* cg tol ~ O(1/k^(CG_RATE)) */
238
+ #define CG_RATE (1.5)
173
239
 
174
240
  #ifdef __cplusplus
175
241
  }
@@ -5,15 +5,13 @@
5
5
  extern "C" {
6
6
  #endif
7
7
 
8
- #include <math.h>
9
8
  #include "scs.h"
9
+ #include <math.h>
10
10
 
11
- void SCS(set_as_scaled_array)(scs_float *x, const scs_float *a,
12
- const scs_float b, scs_int len);
13
11
  void SCS(scale_array)(scs_float *a, const scs_float b, scs_int len);
14
12
  scs_float SCS(dot)(const scs_float *x, const scs_float *y, scs_int len);
15
13
  scs_float SCS(norm_sq)(const scs_float *v, scs_int len);
16
- scs_float SCS(norm)(const scs_float *v, scs_int len);
14
+ scs_float SCS(norm_2)(const scs_float *v, scs_int len);
17
15
  scs_float SCS(norm_inf)(const scs_float *a, scs_int l);
18
16
  void SCS(add_scaled_array)(scs_float *a, const scs_float *b, scs_int n,
19
17
  const scs_float sc);