scs 0.2.1 → 0.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +16 -0
  3. data/LICENSE.txt +18 -18
  4. data/README.md +12 -7
  5. data/lib/scs/ffi.rb +30 -13
  6. data/lib/scs/solver.rb +32 -14
  7. data/lib/scs/version.rb +1 -1
  8. data/vendor/scs/CITATION.cff +39 -0
  9. data/vendor/scs/CMakeLists.txt +272 -0
  10. data/vendor/scs/Makefile +24 -15
  11. data/vendor/scs/README.md +8 -216
  12. data/vendor/scs/include/aa.h +67 -23
  13. data/vendor/scs/include/cones.h +17 -17
  14. data/vendor/scs/include/glbopts.h +98 -32
  15. data/vendor/scs/include/linalg.h +2 -4
  16. data/vendor/scs/include/linsys.h +58 -44
  17. data/vendor/scs/include/normalize.h +3 -3
  18. data/vendor/scs/include/rw.h +8 -2
  19. data/vendor/scs/include/scs.h +293 -133
  20. data/vendor/scs/include/util.h +3 -15
  21. data/vendor/scs/linsys/cpu/direct/private.c +220 -224
  22. data/vendor/scs/linsys/cpu/direct/private.h +13 -7
  23. data/vendor/scs/linsys/cpu/indirect/private.c +177 -110
  24. data/vendor/scs/linsys/cpu/indirect/private.h +8 -4
  25. data/vendor/scs/linsys/csparse.c +87 -0
  26. data/vendor/scs/linsys/csparse.h +34 -0
  27. data/vendor/scs/linsys/external/amd/SuiteSparse_config.c +1 -1
  28. data/vendor/scs/linsys/external/amd/amd_internal.h +1 -1
  29. data/vendor/scs/linsys/external/qdldl/changes +2 -0
  30. data/vendor/scs/linsys/external/qdldl/qdldl.c +29 -46
  31. data/vendor/scs/linsys/external/qdldl/qdldl.h +33 -41
  32. data/vendor/scs/linsys/external/qdldl/qdldl_types.h +11 -3
  33. data/vendor/scs/linsys/gpu/gpu.c +58 -21
  34. data/vendor/scs/linsys/gpu/gpu.h +66 -28
  35. data/vendor/scs/linsys/gpu/indirect/private.c +368 -154
  36. data/vendor/scs/linsys/gpu/indirect/private.h +26 -12
  37. data/vendor/scs/linsys/scs_matrix.c +498 -0
  38. data/vendor/scs/linsys/scs_matrix.h +70 -0
  39. data/vendor/scs/scs.mk +13 -9
  40. data/vendor/scs/src/aa.c +384 -109
  41. data/vendor/scs/src/cones.c +440 -353
  42. data/vendor/scs/src/ctrlc.c +15 -5
  43. data/vendor/scs/src/linalg.c +84 -28
  44. data/vendor/scs/src/normalize.c +22 -64
  45. data/vendor/scs/src/rw.c +161 -22
  46. data/vendor/scs/src/scs.c +768 -561
  47. data/vendor/scs/src/scs_version.c +9 -3
  48. data/vendor/scs/src/util.c +37 -106
  49. data/vendor/scs/test/minunit.h +17 -8
  50. data/vendor/scs/test/problem_utils.h +176 -14
  51. data/vendor/scs/test/problems/degenerate.h +130 -0
  52. data/vendor/scs/test/problems/hs21_tiny_qp.h +124 -0
  53. data/vendor/scs/test/problems/hs21_tiny_qp_rw.h +116 -0
  54. data/vendor/scs/test/problems/infeasible_tiny_qp.h +100 -0
  55. data/vendor/scs/test/problems/qafiro_tiny_qp.h +199 -0
  56. data/vendor/scs/test/problems/random_prob +0 -0
  57. data/vendor/scs/test/problems/random_prob.h +45 -0
  58. data/vendor/scs/test/problems/rob_gauss_cov_est.h +188 -31
  59. data/vendor/scs/test/problems/small_lp.h +13 -14
  60. data/vendor/scs/test/problems/test_fails.h +43 -0
  61. data/vendor/scs/test/problems/unbounded_tiny_qp.h +82 -0
  62. data/vendor/scs/test/random_socp_prob.c +54 -53
  63. data/vendor/scs/test/rng.h +109 -0
  64. data/vendor/scs/test/run_from_file.c +19 -10
  65. data/vendor/scs/test/run_tests.c +27 -3
  66. metadata +25 -97
  67. data/vendor/scs/linsys/amatrix.c +0 -305
  68. data/vendor/scs/linsys/amatrix.h +0 -36
  69. data/vendor/scs/linsys/amatrix.o +0 -0
  70. data/vendor/scs/linsys/cpu/direct/private.o +0 -0
  71. data/vendor/scs/linsys/cpu/indirect/private.o +0 -0
  72. data/vendor/scs/linsys/external/amd/SuiteSparse_config.o +0 -0
  73. data/vendor/scs/linsys/external/amd/amd_1.o +0 -0
  74. data/vendor/scs/linsys/external/amd/amd_2.o +0 -0
  75. data/vendor/scs/linsys/external/amd/amd_aat.o +0 -0
  76. data/vendor/scs/linsys/external/amd/amd_control.o +0 -0
  77. data/vendor/scs/linsys/external/amd/amd_defaults.o +0 -0
  78. data/vendor/scs/linsys/external/amd/amd_dump.o +0 -0
  79. data/vendor/scs/linsys/external/amd/amd_global.o +0 -0
  80. data/vendor/scs/linsys/external/amd/amd_info.o +0 -0
  81. data/vendor/scs/linsys/external/amd/amd_order.o +0 -0
  82. data/vendor/scs/linsys/external/amd/amd_post_tree.o +0 -0
  83. data/vendor/scs/linsys/external/amd/amd_postorder.o +0 -0
  84. data/vendor/scs/linsys/external/amd/amd_preprocess.o +0 -0
  85. data/vendor/scs/linsys/external/amd/amd_valid.o +0 -0
  86. data/vendor/scs/linsys/external/qdldl/qdldl.o +0 -0
  87. data/vendor/scs/src/aa.o +0 -0
  88. data/vendor/scs/src/cones.o +0 -0
  89. data/vendor/scs/src/ctrlc.o +0 -0
  90. data/vendor/scs/src/linalg.o +0 -0
  91. data/vendor/scs/src/normalize.o +0 -0
  92. data/vendor/scs/src/rw.o +0 -0
  93. data/vendor/scs/src/scs.o +0 -0
  94. data/vendor/scs/src/scs_version.o +0 -0
  95. data/vendor/scs/src/util.o +0 -0
  96. data/vendor/scs/test/data/small_random_socp +0 -0
  97. data/vendor/scs/test/problems/small_random_socp.h +0 -33
  98. data/vendor/scs/test/run_tests +0 -2
data/vendor/scs/Makefile CHANGED
@@ -1,7 +1,9 @@
1
1
  # MAKEFILE for scs
2
2
  include scs.mk
3
3
 
4
- SCS_OBJECTS = src/scs.o src/util.o src/cones.o src/aa.o src/rw.o src/linalg.o src/ctrlc.o src/scs_version.o src/normalize.o
4
+ SCS_OBJECTS = src/util.o src/cones.o src/aa.o src/rw.o src/linalg.o src/ctrlc.o src/scs_version.o src/normalize.o
5
+ SCS_O = src/scs.o
6
+ SCS_INDIR_O = src/scs_indir.o
5
7
 
6
8
  SRC_FILES = $(wildcard src/*.c)
7
9
  INC_FILES = $(wildcard include/*.h)
@@ -17,7 +19,7 @@ TARGETS = $(OUT)/demo_socp_indirect $(OUT)/demo_socp_direct $(OUT)/run_from_file
17
19
  default: $(TARGETS) $(OUT)/libscsdir.a $(OUT)/libscsindir.a $(OUT)/libscsdir.$(SHARED) $(OUT)/libscsindir.$(SHARED)
18
20
  @echo "****************************************************************************************"
19
21
  @echo "Successfully compiled scs, copyright Brendan O'Donoghue 2012."
20
- @echo "To test, type '$(OUT)/demo_socp_indirect' to solve a random SOCP."
22
+ @echo "To test, type '$(OUT)/demo_socp_direct' to solve a random SOCP."
21
23
  @echo "**********************************************************************************"
22
24
  ifneq ($(USE_LAPACK), 0)
23
25
  @echo "Compiled with blas and lapack, can solve LPs, SOCPs, SDPs, ECPs, and PCPs"
@@ -28,38 +30,43 @@ else
28
30
  endif
29
31
  @echo "****************************************************************************************"
30
32
 
33
+ $(SCS_O): src/scs.c $(INC_FILES)
34
+ $(CC) $(CFLAGS) -c $< -o $@
35
+
36
+ $(SCS_INDIR_O): src/scs.c $(INC_FILES)
37
+ $(CC) $(CFLAGS) -DINDIRECT=1 -c $< -o $@
38
+
31
39
  %.o : src/%.c
32
40
  $(CC) $(CFLAGS) -c $< -o $@
33
41
 
34
- src/scs.o : $(SRC_FILES) $(INC_FILES)
35
42
  src/util.o : src/util.c include/util.h include/glbopts.h
36
43
  src/cones.o : src/cones.c include/cones.h include/scs_blas.h
37
44
  src/aa.o : src/aa.c include/aa.h include/scs_blas.h
38
45
  src/rw.o : src/rw.c include/rw.h
39
- src/cs.o : src/cs.c include/cs.h
40
46
  src/linalg.o: src/linalg.c include/linalg.h
41
47
  src/ctrl.o : src/ctrl.c include/ctrl.h
42
48
  src/scs_version.o: src/scs_version.c include/glbopts.h
43
49
 
44
50
  $(DIRSRC)/private.o: $(DIRSRC)/private.c $(DIRSRC)/private.h
45
51
  $(INDIRSRC)/indirect/private.o: $(INDIRSRC)/private.c $(INDIRSRC)/private.h
46
- $(LINSYS)/amatrix.o: $(LINSYS)/amatrix.c $(LINSYS)/amatrix.h
52
+ $(LINSYS)/scs_matrix.o: $(LINSYS)/scs_matrix.c $(LINSYS)/scs_matrix.h
53
+ $(LINSYS)/csparse.o: $(LINSYS)/csparse.c $(LINSYS)/csparse.h
47
54
 
48
- $(OUT)/libscsdir.a: $(SCS_OBJECTS) $(DIRSRC)/private.o $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/amatrix.o
55
+ $(OUT)/libscsdir.a: $(SCS_O) $(SCS_OBJECTS) $(DIRSRC)/private.o $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o
49
56
  mkdir -p $(OUT)
50
57
  $(ARCHIVE) $@ $^
51
58
  - $(RANLIB) $@
52
59
 
53
- $(OUT)/libscsindir.a: $(SCS_OBJECTS) $(INDIRSRC)/private.o $(LINSYS)/amatrix.o
60
+ $(OUT)/libscsindir.a: $(SCS_INDIR_O) $(SCS_OBJECTS) $(INDIRSRC)/private.o $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o
54
61
  mkdir -p $(OUT)
55
62
  $(ARCHIVE) $@ $^
56
63
  - $(RANLIB) $@
57
64
 
58
- $(OUT)/libscsdir.$(SHARED): $(SCS_OBJECTS) $(DIRSRC)/private.o $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/amatrix.o
65
+ $(OUT)/libscsdir.$(SHARED): $(SCS_O) $(SCS_OBJECTS) $(DIRSRC)/private.o $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o
59
66
  mkdir -p $(OUT)
60
67
  $(CC) $(CFLAGS) -shared -Wl,$(SONAME),$(@:$(OUT)/%=%) -o $@ $^ $(LDFLAGS)
61
68
 
62
- $(OUT)/libscsindir.$(SHARED): $(SCS_OBJECTS) $(INDIRSRC)/private.o $(LINSYS)/amatrix.o
69
+ $(OUT)/libscsindir.$(SHARED): $(SCS_INDIR_O) $(SCS_OBJECTS) $(INDIRSRC)/private.o $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o
63
70
  mkdir -p $(OUT)
64
71
  $(CC) $(CFLAGS) -shared -Wl,$(SONAME),$(@:$(OUT)/%=%) -o $@ $^ $(LDFLAGS)
65
72
 
@@ -110,20 +117,20 @@ $(LINSYS)/gpu/gpu.o: $(LINSYS)/gpu/gpu.c
110
117
  $(GPUINDIR)/private.o: $(GPUINDIR)/private.c
111
118
  $(CUCC) -c -o $(GPUINDIR)/private.o $^ $(CUDAFLAGS)
112
119
 
113
- # $(OUT)/libscsgpudir.$(SHARED): $(SCS_OBJECTS) $(GPUDIR)/private.o $(AMD_OBJS) $(LINSYS)/amatrix.o $(LINSYS)/gpu/gpu.o
120
+ # $(OUT)/libscsgpudir.$(SHARED): $(SCS_O) $(SCS_OBJECTS) $(GPUDIR)/private.o $(AMD_OBJS) $(LINSYS)/scs_matrix.o $(LINSYS)/gpu/gpu.o
114
121
  # mkdir -p $(OUT)
115
122
  # $(CC) $(CFLAGS) -shared -Wl,$(SONAME),$(@:$(OUT)/%=%) -o $@ $^ $(LDFLAGS) $(CULDFLAGS)
116
123
 
117
- # $(OUT)/libscsgpudir.a: $(SCS_OBJECTS) $(GPUDIR)/private.o $(AMD_OBJS) $(LINSYS)/amatrix.o $(LINSYS)/gpu/gpu.o
124
+ # $(OUT)/libscsgpudir.a: $(SCS_INDIR_O) $(SCS_OBJECTS) $(GPUDIR)/private.o $(AMD_OBJS) $(LINSYS)/scs_matrix.o $(LINSYS)/gpu/gpu.o
118
125
  # mkdir -p $(OUT)
119
126
  # $(ARCHIVE) $@ $^
120
127
  # - $(RANLIB) $@
121
128
 
122
- $(OUT)/libscsgpuindir.$(SHARED): $(SCS_OBJECTS) $(GPUINDIR)/private.o $(LINSYS)/amatrix.o $(LINSYS)/gpu/gpu.o
129
+ $(OUT)/libscsgpuindir.$(SHARED): $(SCS_O) $(SCS_OBJECTS) $(GPUINDIR)/private.o $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o $(LINSYS)/gpu/gpu.o
123
130
  mkdir -p $(OUT)
124
131
  $(CC) $(CFLAGS) -shared -Wl,$(SONAME),$(@:$(OUT)/%=%) -o $@ $^ $(LDFLAGS) $(CULDFLAGS)
125
132
 
126
- $(OUT)/libscsgpuindir.a: $(SCS_OBJECTS) $(GPUINDIR)/private.o $(LINSYS)/amatrix.o $(LINSYS)/gpu/gpu.o
133
+ $(OUT)/libscsgpuindir.a: $(SCS_INDIR_O) $(SCS_OBJECTS) $(GPUINDIR)/private.o $(LINSYS)/scs_matrix.o $(LINSYS)/csparse.o $(LINSYS)/gpu/gpu.o
127
134
  mkdir -p $(OUT)
128
135
  $(ARCHIVE) $@ $^
129
136
  - $(RANLIB) $@
@@ -136,7 +143,7 @@ $(OUT)/demo_socp_gpu_indirect: test/random_socp_prob.c $(OUT)/libscsgpuindir.a
136
143
 
137
144
  .PHONY: clean purge
138
145
  clean:
139
- @rm -rf $(TARGETS) $(SCS_OBJECTS) $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/*.o $(DIRSRC)/*.o $(INDIRSRC)/*.o $(GPUDIR)/*.o $(GPUINDIR)/*.o
146
+ @rm -rf $(TARGETS) $(SCS_O) $(SCS_INDIR_O) $(SCS_OBJECTS) $(AMD_OBJS) $(LDL_OBJS) $(LINSYS)/*.o $(DIRSRC)/*.o $(INDIRSRC)/*.o $(GPUDIR)/*.o $(GPUINDIR)/*.o
140
147
  @rm -rf $(OUT)/*.dSYM
141
148
  @rm -rf matlab/*.mex*
142
149
  @rm -rf .idea
@@ -153,7 +160,7 @@ INSTALL_GPU_TARGETS = $(OUT)/libscsgpuindir.a $(OUT)/libscsgpuindir.$(SHARED) #
153
160
  INSTALL_INC_DIR = $(DESTDIR)$(PREFIX)/include/scs/
154
161
  INSTALL_LIB_DIR = $(DESTDIR)$(PREFIX)/lib/
155
162
 
156
- .PHONY: install install_gpu
163
+ .PHONY: install install_gpu direct indirect
157
164
  install: $(INSTALL_INC_FILES) $(INSTALL_TARGETS)
158
165
  $(INSTALL) -d $(INSTALL_INC_DIR) $(INSTALL_LIB_DIR)
159
166
  $(INSTALL) -m 644 $(INSTALL_INC_FILES) $(INSTALL_INC_DIR)
@@ -162,3 +169,5 @@ install_gpu: $(INSTALL_INC_FILES) $(INSTALL_GPU_TARGETS)
162
169
  $(INSTALL) -d $(INSTALL_INC_DIR) $(INSTALL_LIB_DIR)
163
170
  $(INSTALL) -m 644 $(INSTALL_INC_FILES) $(INSTALL_INC_DIR)
164
171
  $(INSTALL) -m 644 $(INSTALL_GPU_TARGETS) $(INSTALL_LIB_DIR)
172
+ direct:$(OUT)/libscsdir.$(SHARED) $(OUT)/demo_socp_direct $(OUT)/run_from_file_direct $(OUT)/run_tests_direct
173
+ indirect:$(OUT)/libscsindir.$(SHARED) $(OUT)/demo_socp_indirect $(OUT)/run_from_file_indirect $(OUT)/run_tests_indirect
data/vendor/scs/README.md CHANGED
@@ -1,220 +1,12 @@
1
- SCS
2
- ====
1
+ <h1 align="center" margin=0px>
2
+ <img src="https://github.com/cvxgrp/scs/blob/master/docs/src/_static/scs_logo.png" alt="Intersection of a cone and a polyhedron" width="450">
3
+ </h1>
3
4
 
4
- [![Build Status](https://travis-ci.org/cvxgrp/scs.svg?branch=master)](https://travis-ci.org/cvxgrp/scs)
5
- [![Build status](https://ci.appveyor.com/api/projects/status/4542u6kom5293qpm/branch/master?svg=true)](https://ci.appveyor.com/project/bodono/scs/branch/master)
5
+ [![Build Status](https://github.com/cvxgrp/scs/actions/workflows/build.yml/badge.svg)](https://github.com/cvxgrp/scs/actions/workflows/build.yml)
6
+ [![Coverage Status](https://coveralls.io/repos/github/cvxgrp/scs/badge.svg?branch=master)](https://coveralls.io/github/cvxgrp/scs?branch=master)
6
7
 
7
- SCS (`splitting conic solver`) is a numerical optimization package for solving
8
- large-scale convex cone problems, based on our paper [Conic Optimization via
9
- Operator Splitting and Homogeneous Self-Dual
10
- Embedding](http://www.stanford.edu/~boyd/papers/scs.html). It is written in C
11
- and can be used in other C, C++,
12
- [Python](https://github.com/bodono/scs-python),
13
- [Matlab](https://github.com/bodono/scs-matlab),
14
- [R](https://github.com/bodono/scs-r),
15
- [Julia](https://github.com/JuliaOpt/SCS.jl), programs via the linked
16
- interfaces. It can also be called as a solver from convex optimization
17
- toolboxes [CVX](http://cvxr.com/cvx/) (3.0 or later),
18
- [CVXPY](https://github.com/cvxgrp/cvxpy),
19
- [Convex.jl](https://github.com/JuliaOpt/Convex.jl), and
20
- [Yalmip](https://github.com/johanlofberg/YALMIP).
21
-
22
- The current version is `2.1.2`. If you wish to cite SCS, please use the
23
- following:
24
- ```
25
- @article{ocpb:16,
26
- author = {B. O'Donoghue and E. Chu and N. Parikh and S. Boyd},
27
- title = {Conic Optimization via Operator Splitting and Homogeneous Self-Dual Embedding},
28
- journal = {Journal of Optimization Theory and Applications},
29
- month = {June},
30
- year = {2016},
31
- volume = {169},
32
- number = {3},
33
- pages = {1042-1068},
34
- url = {http://stanford.edu/~boyd/papers/scs.html},
35
- }
36
- @misc{scs,
37
- author = {B. O'Donoghue and E. Chu and N. Parikh and S. Boyd},
38
- title = {{SCS}: Splitting Conic Solver, version 2.1.2},
39
- howpublished = {\url{https://github.com/cvxgrp/scs}},
40
- month = nov,
41
- year = 2019
42
- }
43
- ```
44
-
45
- ----
46
- SCS numerically solves convex cone programs using the alternating direction
47
- method of multipliers
48
- ([ADMM](http://web.stanford.edu/~boyd/papers/admm_distr_stats.html)). It
49
- returns solutions to both the primal and dual problems if the problem is
50
- feasible, or a certificate of infeasibility otherwise. It solves the following
51
- primal cone problem:
52
-
53
- ```
54
- minimize c'x
55
- subject to Ax + s = b
56
- s in K
57
- ```
58
- over variables `x` and `s`, where `A`, `b` and `c` are user-supplied data and
59
- `K` is a user-defined convex cone. The dual problem is given by
60
- ```
61
- maximize -b'y
62
- subject to -A'y == c
63
- y in K^*
64
- ```
65
- over variable `y`, where `K^*` denotes the dual cone to `K`.
66
-
67
- The cone `K` can be any Cartesian product of the following primitive cones:
68
- + zero cone `{x | x = 0 }` (dual to the free cone `{x | x in R}`)
69
- + positive orthant `{x | x >= 0}`
70
- + second-order cone `{(t,x) | ||x||_2 <= t}`
71
- + positive semidefinite cone `{ X | min(eig(X)) >= 0, X = X^T }`
72
- + exponential cone `{(x,y,z) | y e^(x/y) <= z, y>0 }`
73
- + dual exponential cone `{(u,v,w) | −u e^(v/u) <= e w, u<0}`
74
- + power cone `{(x,y,z) | x^a * y^(1-a) >= |z|, x>=0, y>=0}`
75
- + dual power cone `{(u,v,w) | (u/a)^a * (v/(1-a))^(1-a) >= |w|, u>=0, v>=0}`
76
-
77
- The rows of the data matrix `A` correspond to the cones in `K`. **The rows of
78
- `A` must be in the order of the cones given above, i.e., first come the rows
79
- that correspond to the zero/free cones, then those that correspond to the
80
- positive orthants, then SOCs, etc.** For a `k` dimensional semidefinite cone
81
- when interpreting the rows of the data matrix `A` SCS assumes that the `k x k`
82
- matrix variable has been vectorized by scaling the off-diagonal entries by
83
- `sqrt(2)` and stacking the **lower triangular elements column-wise** to create a
84
- vector of length `k(k+1)/2`. See the section on semidefinite programming below.
85
-
86
- At termination SCS returns solution `(x*, s*, y*)` if the problem is feasible,
87
- or a certificate of infeasibility otherwise. See
88
- [here](http://web.stanford.edu/~boyd/cvxbook/) for more details about
89
- cone programming and certificates of infeasibility.
90
-
91
- **Anderson Acceleration**
92
-
93
- By default SCS uses Anderson acceleration (AA) to speed up convergence. The
94
- number of iterates that SCS uses in the AA calculation can be controlled by the
95
- parameter `acceleration_lookback` in the settings struct. It defaults to 10. AA
96
- is available as a standalone package [here](https://github.com/cvxgrp/aa). More
97
- details are available in our paper on AA
98
- [here](https://stanford.edu/~boyd/papers/nonexp_global_aa1.html).
99
-
100
- **Semidefinite Programming**
101
-
102
- SCS assumes that the matrix variables and the input data corresponding to
103
- semidefinite cones have been vectorized by **scaling the off-diagonal entries by
104
- `sqrt(2)`** and stacking the lower triangular elements **column-wise**. For a `k
105
- x k` matrix variable (or data matrix) this operation would create a vector of
106
- length `k(k+1)/2`. Scaling by `sqrt(2)` is required to preserve the
107
- inner-product.
108
-
109
- **To recover the matrix solution this operation must be inverted on the
110
- components of the vector returned by SCS corresponding to semidefinite cones**.
111
- That is, the off-diagonal entries must be scaled by `1/sqrt(2)` and the upper
112
- triangular entries are filled in by copying the values of lower triangular
113
- entries.
114
-
115
- More explicitly, we want to express
116
- `Tr(C X)` as `vec(C)'*vec(X)`, where the `vec` operation takes the `k x k` matrix
117
- ```
118
- X = [ X11 X12 ... X1k
119
- X21 X22 ... X2k
120
- ...
121
- Xk1 Xk2 ... Xkk ]
122
- ```
123
- and produces a vector consisting of
124
- ```
125
- vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk).
126
- ```
127
-
128
- **Linear equation solvers**
129
-
130
- Each iteration of SCS requires the solution of a set of linear equations. This
131
- package includes two implementations for solving linear equations: a direct
132
- solver which uses a cached LDL factorization and an indirect solver based on
133
- conjugate gradients. The indirect solver can be run on either the cpu or
134
- gpu.
135
-
136
- The direct solver uses external numerical linear algebra packages:
137
- * [QDLDL](https://github.com/oxfordcontrol/qdldl)
138
- * [AMD](http://www.cise.ufl.edu/research/sparse/).
139
-
140
- ### Using SCS in C
141
- Typing `make` at the command line will compile the code and create SCS libraries
142
- in the `out` folder. To run the tests execute:
143
- ```sh
144
- make
145
- make test
146
- test/run_tests
147
- ```
148
-
149
- If `make` completes successfully, it will produce two static library files,
150
- `libscsdir.a`, `libscsindir.a`, and two dynamic library files `libscsdir.ext`,
151
- `libscsindir.ext` (where `.ext` extension is platform dependent) in the same
152
- folder. It will also produce two demo binaries in the `out` folder named
153
- `demo_socp_direct`, and `demo_socp_indirect`. If you have a GPU and have CUDA
154
- installed, you can also execture `make gpu` to compile SCS to run on the GPU
155
- which will create additional libraries and demo binaries in the `out` folder
156
- corresponding to the gpu version. Note that the GPU version requires 32 bit
157
- ints, which can be enforced by compiling with `DLONG=0`.
158
8
 
159
- To use the libraries in your own source code, compile your code with the linker
160
- option `-L(PATH_TO_SCS_LIBS)` and `-lscsdir` or `-lscsindir` (as needed). The
161
- API and required data structures are defined in the file `include/scs.h`. The
162
- four main API functions are:
163
-
164
- * `ScsWork * scs_init(const ScsData * d, const ScsCone * k, ScsInfo * info);`
165
-
166
- This initializes the ScsWork struct containing the workspace that scs will
167
- use, and performs the necessary preprocessing (e.g. matrix factorization).
168
- All inputs `d`, `k`, and `info` must be memory allocated before calling.
169
-
170
- * `scs_int scs_solve(ScsWork * w, const ScsData * d, const ScsCone * k, ScsSolution * sol, ScsInfo * info);`
171
-
172
- This solves the problem as defined by ScsData `d` and ScsCone `k` using the
173
- workspace in `w`. The solution is returned in `sol` and information about
174
- the solve is returned in `info` (outputs must have memory allocated before
175
- calling). None of the inputs can be NULL. You can call `scs_solve` many
176
- times for one call to `scs_init`, so long as the matrix `A` does not change
177
- (vectors `b` and `c` can change).
178
-
179
- * `void scs_finish(ScsWork * w);`
180
-
181
- Called after all solves completed to free allocated memory and other
182
- cleanup.
183
-
184
- * `scs_int scs(const ScsData * d, const ScsCone * k, ScsSolution * sol, ScsInfo * info);`
185
-
186
- Convenience method that simply calls all the above routines in order, for
187
- cases where the workspace does not need to be reused. All inputs must have
188
- memory allocated before this call.
189
-
190
- The data matrix `A` is specified in column-compressed format and the vectors `b`
191
- and `c` are specified as dense arrays. The solutions `x` (primal), `s` (slack),
192
- and `y` (dual) are returned as dense arrays. Cones are specified as the struct
193
- defined in `include/scs.h`, the rows of `A` must correspond to the cones in the
194
- exact order as specified by the cone struct (i.e. put linear cones before
195
- second-order cones etc.).
196
-
197
- **Warm-start**
198
-
199
- You can warm-start SCS (supply a guess of the solution) by setting `warm_start`
200
- in the ScsData struct to `1` and supplying the warm-starts in the ScsSolution
201
- struct (`x`,`y`, and `s`). All inputs must be warm-started if any one is. These
202
- are used to initialize the iterates in `scs_solve`.
203
-
204
- **Re-using matrix factorization**
205
-
206
- If using the direct version you can factorize the matrix once and solve many
207
- times. Simply call `scs_init` once, and use `scs_solve` many times with the same
208
- workspace, changing the input data `b` and `c` (and optionally warm-starts) for
209
- each iteration.
210
-
211
- **Using your own linear system solver**
212
-
213
- To use your own linear system solver simply implement all the methods and the
214
- two structs in `include/linsys.h` and plug it in.
215
-
216
- **BLAS / LAPACK install error**
9
+ SCS (`splitting conic solver`) is a numerical optimization package for solving
10
+ large-scale convex cone problems. The current version is `3.0.0`.
217
11
 
218
- If you get an error like `cannot find -lblas` or `cannot find -llapack`, then
219
- you need to install blas and lapack and / or update your environment variables
220
- to point to the install locations.
12
+ The full documentation is available [here](https://www.cvxgrp.org/scs/).
@@ -5,50 +5,94 @@
5
5
  extern "C" {
6
6
  #endif
7
7
 
8
+ #include "glbopts.h"
8
9
  #include <stdio.h>
9
10
  #include <stdlib.h>
10
11
  #include <string.h>
11
- #include "glbopts.h"
12
12
 
13
13
  typedef scs_float aa_float;
14
14
  typedef scs_int aa_int;
15
15
 
16
16
  typedef struct ACCEL_WORK AaWork;
17
17
 
18
- /* Initialize Anderson Acceleration, allocates memory.
18
+ /**
19
+ * Initialize Anderson Acceleration, allocates memory.
20
+ *
21
+ * @param dim the dimension of the variable for AA
22
+ * @param mem the memory (number of past iterations used) for AA
23
+ * @param type1 if True use type 1 AA, otherwise use type 2
24
+ * @param regularization type-I and type-II different, for type-I: 1e-8 works
25
+ * well, type-II: more stable can use 1e-12 often
26
+ * @param relaxation float \in [0,2], mixing parameter (1.0 is vanilla)
27
+ * @param safeguard_factor factor that controls safeguarding checks
28
+ * larger is more aggressive but less stable
29
+ * @param max_weight_norm float, maximum norm of AA weights
30
+ * @param verbosity if greater than 0 prints out various info
31
+ *
32
+ * @return pointer to AA workspace
33
+ *
34
+ */
35
+ AaWork *aa_init(aa_int dim, aa_int mem, aa_int type1, aa_float regularization,
36
+ aa_float relaxation, aa_float safeguard_factor,
37
+ aa_float max_weight_norm, aa_int verbosity);
38
+ /**
39
+ * Apply Anderson Acceleration. The usage pattern should be as follows:
40
+ *
41
+ * - for i = 0 .. N:
42
+ * - if (i > 0): aa_apply(x, x_prev, a)
43
+ * - x_prev = x.copy()
44
+ * - x = F(x)
45
+ * - aa_safeguard(x, x_prev, a) // optional but helps stability
46
+ *
47
+ * Here F is the map we are trying to find the fixed point for. We put the AA
48
+ * before the map so that any properties of the map are maintained at the end.
49
+ * Eg if the map contains a projection onto a set then the output is guaranteed
50
+ * to be in the set.
51
+ *
19
52
  *
20
- * Args:
21
- * dim: the dimension of the variable for aa
22
- * aa_mem: the memory (number of past iterations used) for aa
23
- * type1: bool, if True use type 1 aa, otherwise use type 2
53
+ * @param f output of map at current iteration, overwritten with AA output
54
+ * @param x input to map at current iteration
55
+ * @param a workspace from aa_init
56
+ *
57
+ * @return (+ or -) norm of AA weights vector. If positive then update
58
+ * was accepted and f contains new point, if negative then update was
59
+ * rejected and f is unchanged
24
60
  *
25
- * Reurns:
26
- * Pointer to aa workspace
27
61
  */
28
- AaWork *aa_init(aa_int dim, aa_int aa_mem, aa_int type1);
62
+ aa_float aa_apply(aa_float *f, const aa_float *x, AaWork *a);
29
63
 
30
- /* Apply Anderson Acceleration.
64
+ /**
65
+ * Apply safeguarding.
66
+ *
67
+ * This step is optional but can improve stability.
68
+ *
69
+ * @param f_new output of map after AA step
70
+ * @param x_new AA output that is input to the map
71
+ * @param a workspace from aa_init
31
72
  *
32
- * Args:
33
- * f: output of map at current iteration, overwritten with aa output at end.
34
- * x: input to map at current iteration
35
- * a: aa workspace from aa_init
73
+ * @returns 0 if AA step is accepted otherwise -1, if AA step is rejected then
74
+ * this overwrites f_new and x_new with previous values
36
75
  *
37
- * Returns:
38
- * int, a value of 0 is success, <0 is failure at which point f is unchanged
39
76
  */
40
- aa_int aa_apply(aa_float *f, const aa_float *x, AaWork *a);
77
+ aa_int aa_safeguard(aa_float *f_new, aa_float *x_new, AaWork *a);
41
78
 
42
- /* Finish Anderson Acceleration, clears memory.
79
+ /**
80
+ * Finish Anderson Acceleration, clears memory.
81
+ *
82
+ * @param a AA workspace from aa_init
43
83
  *
44
- * Args:
45
- * a: aa workspace from aa_init.
46
84
  */
47
85
  void aa_finish(AaWork *a);
48
86
 
49
- #define MAX_AA_NRM (1e4)
50
-
51
- #define MIN(a, b) (((a) < (b)) ? (a) : (b))
87
+ /**
88
+ * Reset Anderson Acceleration.
89
+ *
90
+ * Resets AA as if at the first iteration, reuses original memory allocations.
91
+ *
92
+ * @param a AA workspace from aa_init
93
+ *
94
+ */
95
+ void aa_reset(AaWork *a);
52
96
 
53
97
  #ifdef __cplusplus
54
98
  }
@@ -11,34 +11,34 @@ extern "C" {
11
11
 
12
12
  /* private data to help cone projection step */
13
13
  struct SCS_CONE_WORK {
14
- scs_float total_cone_time;
14
+ /*
15
+ * cone_boundaries will contain array of indices of rows of A corresponding to
16
+ * cone boundaries, boundaries[0] is starting index for cones of size larger
17
+ * than 1
18
+ */
19
+ scs_float *s; /* used for Moreau decomposition in projection */
20
+ scs_int cone_len;
21
+ /* box cone quantities */
22
+ scs_float *bl, *bu, box_t_warm_start;
15
23
  #ifdef USE_LAPACK
16
24
  /* workspace for eigenvector decompositions: */
17
25
  scs_float *Xs, *Z, *e, *work;
18
- blas_int *iwork, lwork, liwork;
26
+ blas_int lwork;
19
27
  #endif
20
28
  };
21
29
 
22
- /*
23
- * boundaries will contain array of indices of rows of A corresponding to
24
- * cone boundaries, boundaries[0] is starting index for cones of size larger
25
- * than 1
26
- * returns length of boundaries array, boundaries malloc-ed here so should be
27
- * freed
28
- */
29
- scs_int SCS(get_cone_boundaries)(const ScsCone *k, scs_int **boundaries);
30
-
31
- ScsConeWork *SCS(init_cone)(const ScsCone *k);
30
+ ScsConeWork *SCS(init_cone)(const ScsCone *k, const ScsScaling *scal,
31
+ scs_int cone_len);
32
32
  char *SCS(get_cone_header)(const ScsCone *k);
33
33
  scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k);
34
+ scs_int SCS(set_cone_boundaries)(const ScsCone *k, scs_int **cone_boundaries);
34
35
 
35
- /* pass in iter to control how accurate the cone projection
36
- with iteration, set iter < 0 for exact projection, warm_start contains guess
37
- of solution, can be SCS_NULL*/
38
36
  scs_int SCS(proj_dual_cone)(scs_float *x, const ScsCone *k, ScsConeWork *c,
39
- const scs_float *warm_start, scs_int iter);
37
+ scs_int normalize);
40
38
  void SCS(finish_cone)(ScsConeWork *c);
41
- char *SCS(get_cone_summary)(const ScsInfo *info, ScsConeWork *c);
39
+
40
+ void SCS(set_rho_y_vec)(const ScsCone *k, scs_float scale, scs_float *rho_y_vec,
41
+ scs_int m);
42
42
 
43
43
  #ifdef __cplusplus
44
44
  }