robotic 0.3.4.dev1__cp312-cp312-manylinux2014_x86_64.whl → 0.3.4.dev3__cp312-cp312-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of robotic might be problematic. Click here for more details.
- robotic/_robotic.pyi +43 -15
- robotic/_robotic.so +0 -0
- robotic/include/rai/Algo/RidgeRegression.h +1 -1
- robotic/include/rai/Algo/rungeKutta.h +1 -1
- robotic/include/rai/Core/array.h +31 -22
- robotic/include/rai/Core/array.ipp +59 -74
- robotic/include/rai/Core/arrayDouble.h +29 -25
- robotic/include/rai/Core/defines.h +0 -1
- robotic/include/rai/Core/thread.h +1 -1
- robotic/include/rai/Core/util.h +0 -1
- robotic/include/rai/DataGen/shapenetGrasps.h +1 -1
- robotic/include/rai/Geo/geo.h +7 -7
- robotic/include/rai/Geo/mesh.h +2 -2
- robotic/include/rai/Geo/pairCollision.h +42 -42
- robotic/include/rai/Geo/signedDistanceFunctions.h +5 -3
- robotic/include/rai/KOMO/komo.h +1 -1
- robotic/include/rai/Kin/cameraview.h +27 -16
- robotic/include/rai/Kin/dof_forceExchange.h +3 -3
- robotic/include/rai/Kin/feature.h +1 -1
- robotic/include/rai/Kin/frame.h +1 -1
- robotic/include/rai/Kin/proxy.h +1 -1
- robotic/include/rai/Kin/simulation.h +5 -3
- robotic/include/rai/Logic/treeSearchDomain.h +2 -2
- robotic/include/rai/Optim/BayesOpt.h +14 -7
- robotic/include/rai/Optim/CMA/boundary_transformation.h +73 -0
- robotic/include/rai/Optim/CMA/cmaes.h +175 -0
- robotic/include/rai/Optim/CMA/cmaes_interface.h +68 -0
- robotic/include/rai/Optim/GlobalIterativeNewton.h +7 -3
- robotic/include/rai/Optim/NLP.h +15 -1
- robotic/include/rai/Optim/NLP_Solver.h +5 -5
- robotic/include/rai/Optim/constrained.h +3 -3
- robotic/include/rai/Optim/lagrangian.h +6 -5
- robotic/include/rai/Optim/m_EvoStrategies.h +113 -0
- robotic/include/rai/Optim/{gradient.h → m_Gradient.h} +12 -13
- robotic/include/rai/Optim/m_LBFGS.h +21 -0
- robotic/include/rai/Optim/m_LeastSquaresZeroOrder.h +34 -13
- robotic/include/rai/Optim/m_LocalGreedy.h +31 -0
- robotic/include/rai/Optim/m_NelderMead.h +23 -0
- robotic/include/rai/Optim/{newton.h → m_Newton.h} +8 -5
- robotic/include/rai/Optim/options.h +7 -7
- robotic/include/rai/Optim/primalDual.h +9 -5
- robotic/include/rai/Optim/testProblems_Opt.h +5 -5
- robotic/include/rai/Optim/utils.h +10 -20
- robotic/include/rai/Search/TreeSearchNode.h +1 -1
- robotic/librai.so +0 -0
- robotic/meshTool +0 -0
- robotic/version.py +1 -1
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev3.dist-info}/METADATA +1 -1
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev3.dist-info}/RECORD +70 -64
- robotic/include/rai/Optim/lbfgs.h +0 -18
- /robotic/include/rai/Geo/{assimpInterface.h → i_assimp.h} +0 -0
- /robotic/include/rai/Geo/{fclInterface.h → i_fcl.h} +0 -0
- /robotic/include/rai/Kin/{kin_bullet.h → i_Bullet.h} +0 -0
- /robotic/include/rai/Kin/{kin_feather.h → i_Feather.h} +0 -0
- /robotic/include/rai/Kin/{kin_ode.h → i_Ode.h} +0 -0
- /robotic/include/rai/Kin/{kin_physx.h → i_Physx.h} +0 -0
- /robotic/include/rai/Optim/{opt-ceres.h → i_Ceres.h} +0 -0
- /robotic/include/rai/Optim/{opt-ipopt.h → i_Ipopt.h} +0 -0
- /robotic/include/rai/Optim/{opt-nlopt.h → i_NLopt.h} +0 -0
- /robotic/include/rai/Optim/{liblbfgs.h → liblbfgs/liblbfgs.h} +0 -0
- /robotic/include/rai/Optim/{SlackGaussNewton.h → m_SlackGaussNewton.h} +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-bot +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-h5info +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-info +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-meshTool +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-test +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-urdfConvert.py +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev3.data}/scripts/ry-view +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev3.dist-info}/WHEEL +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev3.dist-info}/licenses/LICENSE +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
/* --------------------------------------------------------- */
|
|
2
|
+
/* --- File: cmaes.h ----------- Author: Nikolaus Hansen --- */
|
|
3
|
+
/* ---------------------- last modified: IX 2010 --- */
|
|
4
|
+
/* --------------------------------- by: Nikolaus Hansen --- */
|
|
5
|
+
/* --------------------------------------------------------- */
|
|
6
|
+
/*
|
|
7
|
+
CMA-ES for non-linear function minimization.
|
|
8
|
+
|
|
9
|
+
Copyright (C) 1996, 2003-2010 Nikolaus Hansen.
|
|
10
|
+
e-mail: nikolaus.hansen (you know what) inria.fr
|
|
11
|
+
|
|
12
|
+
License: see file cmaes.c
|
|
13
|
+
|
|
14
|
+
*/
|
|
15
|
+
#ifndef NH_cmaes_h /* only include ones */
|
|
16
|
+
#define NH_cmaes_h
|
|
17
|
+
|
|
18
|
+
#include <time.h>
|
|
19
|
+
|
|
20
|
+
typedef struct
|
|
21
|
+
/* cmaes_random_t
|
|
22
|
+
* sets up a pseudo random number generator instance
|
|
23
|
+
*/
|
|
24
|
+
{
|
|
25
|
+
/* Variables for Uniform() */
|
|
26
|
+
long int startseed;
|
|
27
|
+
long int aktseed;
|
|
28
|
+
long int aktrand;
|
|
29
|
+
long int *rgrand;
|
|
30
|
+
|
|
31
|
+
/* Variables for Gauss() */
|
|
32
|
+
short flgstored;
|
|
33
|
+
double hold;
|
|
34
|
+
} cmaes_random_t;
|
|
35
|
+
|
|
36
|
+
typedef struct
|
|
37
|
+
/* cmaes_timings_t
|
|
38
|
+
* time measurement, used to time eigendecomposition
|
|
39
|
+
*/
|
|
40
|
+
{
|
|
41
|
+
/* for outside use */
|
|
42
|
+
double totaltime; /* zeroed by calling re-calling cmaes_timings_start */
|
|
43
|
+
double totaltotaltime;
|
|
44
|
+
double tictoctime;
|
|
45
|
+
double lasttictoctime;
|
|
46
|
+
|
|
47
|
+
/* local fields */
|
|
48
|
+
clock_t lastclock;
|
|
49
|
+
time_t lasttime;
|
|
50
|
+
clock_t ticclock;
|
|
51
|
+
time_t tictime;
|
|
52
|
+
short istic;
|
|
53
|
+
short isstarted;
|
|
54
|
+
|
|
55
|
+
double lastdiff;
|
|
56
|
+
double tictoczwischensumme;
|
|
57
|
+
} cmaes_timings_t;
|
|
58
|
+
|
|
59
|
+
typedef struct
|
|
60
|
+
/* cmaes_readpara_t
|
|
61
|
+
* collects all parameters, in particular those that are read from
|
|
62
|
+
* a file before to start. This should split in future?
|
|
63
|
+
*/
|
|
64
|
+
{
|
|
65
|
+
char * filename; /* keep record of the file that was taken to read parameters */
|
|
66
|
+
short flgsupplemented;
|
|
67
|
+
|
|
68
|
+
/* input parameters */
|
|
69
|
+
int N; /* problem dimension, must stay constant, should be unsigned or long? */
|
|
70
|
+
unsigned int seed;
|
|
71
|
+
double * xstart;
|
|
72
|
+
double * typicalX;
|
|
73
|
+
int typicalXcase;
|
|
74
|
+
double * rgInitialStds;
|
|
75
|
+
double * rgDiffMinChange;
|
|
76
|
+
|
|
77
|
+
/* termination parameters */
|
|
78
|
+
double stopMaxFunEvals;
|
|
79
|
+
double facmaxeval;
|
|
80
|
+
double stopMaxIter;
|
|
81
|
+
struct { int flg; double val; } stStopFitness;
|
|
82
|
+
double stopTolFun;
|
|
83
|
+
double stopTolFunHist;
|
|
84
|
+
double stopTolX;
|
|
85
|
+
double stopTolUpXFactor;
|
|
86
|
+
|
|
87
|
+
/* internal evolution strategy parameters */
|
|
88
|
+
int lambda; /* -> mu, <- N */
|
|
89
|
+
int mu; /* -> weights, (lambda) */
|
|
90
|
+
double mucov, mueff; /* <- weights */
|
|
91
|
+
double *weights; /* <- mu, -> mueff, mucov, ccov */
|
|
92
|
+
double damps; /* <- cs, maxeval, lambda */
|
|
93
|
+
double cs; /* -> damps, <- N */
|
|
94
|
+
double ccumcov; /* <- N */
|
|
95
|
+
double ccov; /* <- mucov, <- N */
|
|
96
|
+
double diagonalCov; /* number of initial iterations */
|
|
97
|
+
struct { int flgalways; double modulo; double maxtime; } updateCmode;
|
|
98
|
+
double facupdateCmode;
|
|
99
|
+
|
|
100
|
+
/* supplementary variables */
|
|
101
|
+
|
|
102
|
+
char *weigkey;
|
|
103
|
+
char resumefile[99];
|
|
104
|
+
const char **rgsformat;
|
|
105
|
+
void **rgpadr;
|
|
106
|
+
const char **rgskeyar;
|
|
107
|
+
double ***rgp2adr;
|
|
108
|
+
int n1para, n1outpara;
|
|
109
|
+
int n2para;
|
|
110
|
+
} cmaes_readpara_t;
|
|
111
|
+
|
|
112
|
+
typedef struct
|
|
113
|
+
/* cmaes_t
|
|
114
|
+
* CMA-ES "object"
|
|
115
|
+
*/
|
|
116
|
+
{
|
|
117
|
+
const char *version;
|
|
118
|
+
/* char *signalsFilename; */
|
|
119
|
+
cmaes_readpara_t sp;
|
|
120
|
+
cmaes_random_t rand; /* random number generator */
|
|
121
|
+
|
|
122
|
+
double sigma; /* step size */
|
|
123
|
+
|
|
124
|
+
double *rgxmean; /* mean x vector, "parent" */
|
|
125
|
+
double *rgxbestever;
|
|
126
|
+
double **rgrgx; /* range of x-vectors, lambda offspring */
|
|
127
|
+
int *index; /* sorting index of sample pop. */
|
|
128
|
+
double *arFuncValueHist;
|
|
129
|
+
|
|
130
|
+
short flgIniphase; /* not really in use anymore */
|
|
131
|
+
short flgStop;
|
|
132
|
+
|
|
133
|
+
double chiN;
|
|
134
|
+
double **C; /* lower triangular matrix: i>=j for C[i][j] */
|
|
135
|
+
double **B; /* matrix with normalize eigenvectors in columns */
|
|
136
|
+
double *rgD; /* axis lengths */
|
|
137
|
+
|
|
138
|
+
double *rgpc;
|
|
139
|
+
double *rgps;
|
|
140
|
+
double *rgxold;
|
|
141
|
+
double *rgout;
|
|
142
|
+
double *rgBDz; /* for B*D*z */
|
|
143
|
+
double *rgdTmp; /* temporary (random) vector used in different places */
|
|
144
|
+
double *rgFuncValue;
|
|
145
|
+
double *publicFitness; /* returned by cmaes_init() */
|
|
146
|
+
|
|
147
|
+
double gen; /* Generation number */
|
|
148
|
+
double countevals;
|
|
149
|
+
double state; /* 1 == sampled, 2 == not in use anymore, 3 == updated */
|
|
150
|
+
|
|
151
|
+
double maxdiagC; /* repeatedly used for output */
|
|
152
|
+
double mindiagC;
|
|
153
|
+
double maxEW;
|
|
154
|
+
double minEW;
|
|
155
|
+
|
|
156
|
+
char sOutString[330]; /* 4x80 */
|
|
157
|
+
|
|
158
|
+
short flgEigensysIsUptodate;
|
|
159
|
+
short flgCheckEigen; /* control via cmaes_signals.par */
|
|
160
|
+
double genOfEigensysUpdate;
|
|
161
|
+
cmaes_timings_t eigenTimings;
|
|
162
|
+
|
|
163
|
+
double dMaxSignifKond;
|
|
164
|
+
double dLastMinEWgroesserNull;
|
|
165
|
+
|
|
166
|
+
short flgresumedone;
|
|
167
|
+
|
|
168
|
+
time_t printtime;
|
|
169
|
+
time_t writetime; /* ideally should keep track for each output file */
|
|
170
|
+
time_t firstwritetime;
|
|
171
|
+
time_t firstprinttime;
|
|
172
|
+
|
|
173
|
+
} cmaes_t;
|
|
174
|
+
|
|
175
|
+
#endif
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/* --------------------------------------------------------- */
|
|
2
|
+
/* --- File: cmaes_interface.h - Author: Nikolaus Hansen --- */
|
|
3
|
+
/* ---------------------- last modified: IV 2007 --- */
|
|
4
|
+
/* --------------------------------- by: Nikolaus Hansen --- */
|
|
5
|
+
/* --------------------------------------------------------- */
|
|
6
|
+
/*
|
|
7
|
+
CMA-ES for non-linear function minimization.
|
|
8
|
+
|
|
9
|
+
Copyright (C) 1996, 2003, 2007 Nikolaus Hansen.
|
|
10
|
+
e-mail: hansen AT lri.fr
|
|
11
|
+
|
|
12
|
+
Documentation: see file docfunctions.txt
|
|
13
|
+
|
|
14
|
+
License: see file cmaes.c
|
|
15
|
+
*/
|
|
16
|
+
#include "cmaes.h"
|
|
17
|
+
|
|
18
|
+
/* --------------------------------------------------------- */
|
|
19
|
+
/* ------------------ Interface ---------------------------- */
|
|
20
|
+
/* --------------------------------------------------------- */
|
|
21
|
+
|
|
22
|
+
#ifdef __cplusplus
|
|
23
|
+
extern "C" {
|
|
24
|
+
#endif
|
|
25
|
+
|
|
26
|
+
/* --- initialization, constructors, destructors --- */
|
|
27
|
+
double * cmaes_init(cmaes_t *, int dimension , double *xstart,
|
|
28
|
+
double *stddev, long seed, int lambda,
|
|
29
|
+
const char *input_parameter_filename);
|
|
30
|
+
void cmaes_init_para(cmaes_t *, int dimension , double *xstart,
|
|
31
|
+
double *stddev, long seed, int lambda,
|
|
32
|
+
const char *input_parameter_filename);
|
|
33
|
+
double * cmaes_init_final(cmaes_t *);
|
|
34
|
+
void cmaes_resume_distribution(cmaes_t *evo_ptr, char *filename);
|
|
35
|
+
void cmaes_exit(cmaes_t *);
|
|
36
|
+
|
|
37
|
+
/* --- core functions --- */
|
|
38
|
+
double * const * cmaes_SamplePopulation(cmaes_t *);
|
|
39
|
+
double * cmaes_UpdateDistribution(cmaes_t *,
|
|
40
|
+
const double *rgFitnessValues);
|
|
41
|
+
const char * cmaes_TestForTermination(cmaes_t *);
|
|
42
|
+
|
|
43
|
+
/* --- additional functions --- */
|
|
44
|
+
double * const * cmaes_ReSampleSingle( cmaes_t *t, int index);
|
|
45
|
+
double const * cmaes_ReSampleSingle_old(cmaes_t *, double *rgx);
|
|
46
|
+
double * cmaes_SampleSingleInto( cmaes_t *t, double *rgx);
|
|
47
|
+
void cmaes_UpdateEigensystem(cmaes_t *, int flgforce);
|
|
48
|
+
|
|
49
|
+
/* --- getter functions --- */
|
|
50
|
+
double cmaes_Get(cmaes_t *, char const *keyword);
|
|
51
|
+
const double * cmaes_GetPtr(cmaes_t *, char const *keyword); /* e.g. "xbestever" */
|
|
52
|
+
double * cmaes_GetNew( cmaes_t *t, char const *keyword); /* user is responsible to free */
|
|
53
|
+
double * cmaes_GetInto( cmaes_t *t, char const *keyword, double *mem); /* allocs if mem==NULL, user is responsible to free */
|
|
54
|
+
|
|
55
|
+
/* --- online control and output --- */
|
|
56
|
+
void cmaes_ReadSignals(cmaes_t *, char const *filename);
|
|
57
|
+
void cmaes_WriteToFile(cmaes_t *, const char *szKeyWord,
|
|
58
|
+
const char *output_filename);
|
|
59
|
+
char * cmaes_SayHello(cmaes_t *);
|
|
60
|
+
/* --- misc --- */
|
|
61
|
+
double * cmaes_NewDouble(int n); /* user is responsible to free */
|
|
62
|
+
void cmaes_FATAL(char const *s1, char const *s2, char const *s3,
|
|
63
|
+
char const *s4);
|
|
64
|
+
|
|
65
|
+
#ifdef __cplusplus
|
|
66
|
+
} // end extern "C"
|
|
67
|
+
#endif
|
|
68
|
+
|
|
@@ -8,8 +8,10 @@
|
|
|
8
8
|
|
|
9
9
|
#pragma once
|
|
10
10
|
|
|
11
|
-
#include "
|
|
12
|
-
#include "
|
|
11
|
+
#include "m_Newton.h"
|
|
12
|
+
#include "m_Gradient.h"
|
|
13
|
+
|
|
14
|
+
namespace rai {
|
|
13
15
|
|
|
14
16
|
struct GlobalIterativeNewton {
|
|
15
17
|
arr x;
|
|
@@ -21,7 +23,7 @@ struct GlobalIterativeNewton {
|
|
|
21
23
|
rai::Array<LocalMinimum> localMinima;
|
|
22
24
|
LocalMinimum* best;
|
|
23
25
|
|
|
24
|
-
GlobalIterativeNewton(ScalarFunction
|
|
26
|
+
GlobalIterativeNewton(ScalarFunction f, const arr& bounds, std::shared_ptr<OptOptions> opt=make_shared<OptOptions>());
|
|
25
27
|
~GlobalIterativeNewton();
|
|
26
28
|
|
|
27
29
|
void step();
|
|
@@ -30,3 +32,5 @@ struct GlobalIterativeNewton {
|
|
|
30
32
|
|
|
31
33
|
void reOptimizeAllPoints();
|
|
32
34
|
};
|
|
35
|
+
|
|
36
|
+
} //namespace
|
robotic/include/rai/Optim/NLP.h
CHANGED
|
@@ -62,7 +62,8 @@ struct NLP : rai::NonCopyable {
|
|
|
62
62
|
|
|
63
63
|
//-- utilities
|
|
64
64
|
shared_ptr<NLP> ptr() { return shared_ptr<NLP>(this, [](NLP*) {}); }
|
|
65
|
-
double eval_scalar(arr& g, arr& H, const arr& x);
|
|
65
|
+
virtual double eval_scalar(arr& g, arr& H, const arr& x);
|
|
66
|
+
ScalarFunction f_scalar(){ return [this](arr& g, arr& H, const arr& x){ return this->eval_scalar(g, H, x); }; }
|
|
66
67
|
bool checkJacobian(const arr& x, double tolerance, const StringA& featureNames= {});
|
|
67
68
|
bool checkHessian(const arr& x, double tolerance);
|
|
68
69
|
bool checkBounds(bool strictlyLarger);
|
|
@@ -80,6 +81,19 @@ struct NLP : rai::NonCopyable {
|
|
|
80
81
|
|
|
81
82
|
//===========================================================================
|
|
82
83
|
|
|
84
|
+
struct NLP_Scalar : NLP {
|
|
85
|
+
arr x, H_x;
|
|
86
|
+
NLP_Scalar() { featureTypes.resize(1) = OT_f; }
|
|
87
|
+
virtual double f(arr& g, arr& H, const arr& x) = 0;
|
|
88
|
+
void evaluate(arr& phi, arr& J, const arr& _x){
|
|
89
|
+
x = _x;
|
|
90
|
+
double f_x = f(J, H_x, x);
|
|
91
|
+
phi.resize(1) = f_x;
|
|
92
|
+
if(!!J) J.reshape(1, x.N);
|
|
93
|
+
}
|
|
94
|
+
void getFHessian(arr& H, const arr& _x) { CHECK_EQ(_x, x, ""); H = H_x; }
|
|
95
|
+
};
|
|
96
|
+
|
|
83
97
|
struct NLP_Factored : NLP {
|
|
84
98
|
//-- problem factorization: needs to be defined in the constructor or a derived class
|
|
85
99
|
uintA variableDimensions; //the size of each variable block
|
|
@@ -19,17 +19,17 @@ struct ConstrainedSolver;
|
|
|
19
19
|
/** User Interface: Meta class to call several different solvers in a unified manner. */
|
|
20
20
|
struct NLP_Solver : NonCopyable {
|
|
21
21
|
arr x, dual; //owner of decision variables, which are passed by reference to lower level solvers
|
|
22
|
-
|
|
22
|
+
std::shared_ptr<OptOptions> opt; //owner of options, which are passed by reference to lower level solvers
|
|
23
23
|
std::shared_ptr<SolverReturn> ret;
|
|
24
24
|
std::shared_ptr<ConstrainedSolver> optCon;
|
|
25
25
|
std::shared_ptr<NLP_Traced> P;
|
|
26
26
|
|
|
27
27
|
NLP_Solver();
|
|
28
|
-
NLP_Solver(const shared_ptr<NLP>& _P, int verbose=-100) { setProblem(_P); if(verbose>-100) opt
|
|
28
|
+
NLP_Solver(const shared_ptr<NLP>& _P, int verbose=-100) : NLP_Solver() { setProblem(_P); if(verbose>-100) opt->verbose=verbose; }
|
|
29
29
|
|
|
30
|
-
NLP_Solver& setSolver(OptMethod _method) { opt
|
|
30
|
+
NLP_Solver& setSolver(OptMethod _method) { opt->method=_method; return *this; }
|
|
31
31
|
NLP_Solver& setProblem(const shared_ptr<NLP>& _P);
|
|
32
|
-
NLP_Solver& setOptions(const rai::OptOptions& _opt) { opt = _opt; return *this; }
|
|
32
|
+
NLP_Solver& setOptions(const rai::OptOptions& _opt) { *opt = _opt; return *this; }
|
|
33
33
|
NLP_Solver& setInitialization(const arr& _x) { x=_x; return *this; }
|
|
34
34
|
NLP_Solver& setWarmstart(const arr& _x, const arr& _dual) { x=_x; dual=_dual; return *this; }
|
|
35
35
|
NLP_Solver& setTracing(bool trace_x, bool trace_costs, bool trace_phi, bool trace_J) { P->setTracing(trace_x, trace_costs, trace_phi, trace_J); return *this; }
|
|
@@ -48,7 +48,7 @@ struct NLP_Solver : NonCopyable {
|
|
|
48
48
|
arr getTrace_evals();
|
|
49
49
|
rai::Graph reportLagrangeGradients(const StringA& featureNames);
|
|
50
50
|
void gnuplot_costs() {
|
|
51
|
-
FILE("z.opt.trace") <<getTrace_costs();
|
|
51
|
+
FILE("z.opt.trace") <<getTrace_costs().modRaw();
|
|
52
52
|
gnuplot("plot 'z.opt.trace' us 0:1 t 'f', '' us 0:2 t 'sos', '' us 0:3 t 'ineq', '' us 0:4 t 'eq'");
|
|
53
53
|
}
|
|
54
54
|
};
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
#pragma once
|
|
10
10
|
|
|
11
11
|
#include "lagrangian.h"
|
|
12
|
-
#include "
|
|
12
|
+
#include "m_Newton.h"
|
|
13
13
|
|
|
14
14
|
namespace rai {
|
|
15
15
|
|
|
@@ -24,10 +24,10 @@ struct ConstrainedSolver {
|
|
|
24
24
|
LagrangianProblem L;
|
|
25
25
|
OptNewton newton;
|
|
26
26
|
arr& dual;
|
|
27
|
-
|
|
27
|
+
shared_ptr<OptOptions> opt;
|
|
28
28
|
int outer_iters=0, numBadSteps=0;
|
|
29
29
|
|
|
30
|
-
ConstrainedSolver(arr& x, arr& dual, const shared_ptr<NLP>& P,
|
|
30
|
+
ConstrainedSolver(arr& x, arr& dual, const shared_ptr<NLP>& P, shared_ptr<OptOptions> _opt=make_shared<OptOptions>());
|
|
31
31
|
|
|
32
32
|
std::shared_ptr<SolverReturn> run();
|
|
33
33
|
bool ministep();
|
|
@@ -21,8 +21,9 @@ namespace rai {
|
|
|
21
21
|
// that can include lagrange terms, penalties, log barriers, and augmented lagrangian terms
|
|
22
22
|
//
|
|
23
23
|
|
|
24
|
-
struct LagrangianProblem :
|
|
24
|
+
struct LagrangianProblem : NLP {
|
|
25
25
|
shared_ptr<NLP> P;
|
|
26
|
+
shared_ptr<OptOptions> opt;
|
|
26
27
|
|
|
27
28
|
//-- parameters of the inner problem (Lagrangian, unconstrained problem)
|
|
28
29
|
double muLB; ///< log barrier mu
|
|
@@ -34,20 +35,20 @@ struct LagrangianProblem : ScalarFunction, NLP {
|
|
|
34
35
|
arr x; ///< point where P was last evaluated
|
|
35
36
|
arr phi_x, J_x, H_x; ///< features at x
|
|
36
37
|
|
|
37
|
-
LagrangianProblem(const shared_ptr<NLP>& P,
|
|
38
|
+
LagrangianProblem(const shared_ptr<NLP>& P, std::shared_ptr<OptOptions> _opt, double muSquaredPenalty=-1., double muLogBarrier=-1.);
|
|
38
39
|
|
|
39
40
|
virtual void evaluate(arr& phi, arr& J, const arr& x); //evaluate all features and (optionally) their Jacobians for state x
|
|
40
41
|
virtual void getFHessian(arr& H, const arr& x); //the Hessian of the sum of all f-features (or Hessian in addition to the Gauss-Newton Hessian of all other features)
|
|
41
42
|
virtual arr getInitializationSample() { return P->getInitializationSample(); }
|
|
42
43
|
virtual void report(ostream &os, int verbose, const char *msg){ P->report(os, verbose, msg); }
|
|
43
44
|
|
|
44
|
-
double
|
|
45
|
+
virtual double eval_scalar(arr& dL, arr& HL, const arr& x); ///< CORE METHOD: the unconstrained scalar function F
|
|
45
46
|
|
|
46
47
|
rai::Graph reportGradients(const StringA& featureNames);
|
|
47
48
|
void reportMatrix(std::ostream& os);
|
|
48
49
|
|
|
49
|
-
void aulaUpdate(
|
|
50
|
-
void autoUpdate(
|
|
50
|
+
void aulaUpdate(bool anyTimeVariant, double lambdaStepsize=1., double* L_x=nullptr, arr& dL_x=NoArr, arr& HL_x=NoArr);
|
|
51
|
+
void autoUpdate(double* L_x=nullptr, arr& dL_x=NoArr, arr& HL_x=NoArr);
|
|
51
52
|
|
|
52
53
|
//private: used gpenalty function
|
|
53
54
|
double gpenalty(double g);
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "NLP.h"
|
|
4
|
+
#include "options.h"
|
|
5
|
+
#include "../Core/util.h"
|
|
6
|
+
|
|
7
|
+
/*
|
|
8
|
+
|
|
9
|
+
Implement:
|
|
10
|
+
HillClimbing (with fixed exploration; with adaptive exploration)
|
|
11
|
+
DifferentialHillClimbing (with adaptive exploration distribution on delta)
|
|
12
|
+
classical model-based optim
|
|
13
|
+
|
|
14
|
+
Greedy local search (6:5) Stochastic local search (6:6) Simulated annealing (6:7)
|
|
15
|
+
Random restarts (6:10) Iterated local search (6:11) Variable neighborhood search
|
|
16
|
+
(6:13) Coordinate search (6:14) Pattern search (6:15) Nelder-Mead simplex method
|
|
17
|
+
(6:16) General stochastic search (6:20) Evolutionary algorithms (6:23) Covariance Matrix
|
|
18
|
+
Adaptation (CMAES) (6:24) Estimation of Distribution Algorithms (EDAs) (6:28)
|
|
19
|
+
Model-based optimization (6:31) Implicit filtering (6:34)
|
|
20
|
+
|
|
21
|
+
Improvement (5:24) Maximal Probability of Improvement
|
|
22
|
+
(5:24) GP-UCB (5:24)
|
|
23
|
+
|
|
24
|
+
Generic globalization: Iterated Local Optim: check when converged multiply to same local opt
|
|
25
|
+
|
|
26
|
+
Require bound constraints!
|
|
27
|
+
|
|
28
|
+
*
|
|
29
|
+
Twiddle
|
|
30
|
+
*/
|
|
31
|
+
|
|
32
|
+
namespace rai {
|
|
33
|
+
|
|
34
|
+
//===========================================================================
|
|
35
|
+
|
|
36
|
+
struct EvolutionStrategy {
|
|
37
|
+
ScalarFunction f;
|
|
38
|
+
shared_ptr<OptOptions> opt;
|
|
39
|
+
arr x;
|
|
40
|
+
double f_x=1e10;
|
|
41
|
+
int evals=0, steps=0, rejectedSteps=0, tinySteps=0;
|
|
42
|
+
|
|
43
|
+
EvolutionStrategy(ScalarFunction _f, const arr& x_init, shared_ptr<OptOptions> _opt): f(_f), opt(_opt), x(x_init) {}
|
|
44
|
+
|
|
45
|
+
//virtuals that define a method
|
|
46
|
+
virtual arr generateNewSamples() = 0;
|
|
47
|
+
virtual void update(arr& samples, const arr& values) = 0;
|
|
48
|
+
|
|
49
|
+
//generic stepping & looping
|
|
50
|
+
bool step();
|
|
51
|
+
shared_ptr<SolverReturn> solve();
|
|
52
|
+
|
|
53
|
+
//helper
|
|
54
|
+
arr select(const arr& samples, const arr& values, uint mu);
|
|
55
|
+
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
//===========================================================================
|
|
59
|
+
|
|
60
|
+
struct CMAES : EvolutionStrategy {
|
|
61
|
+
unique_ptr<struct CMA_self> self;
|
|
62
|
+
RAI_PARAM("CMA/", int, lambda, 20)
|
|
63
|
+
RAI_PARAM("CMA/", double, sigmaInit, .1)
|
|
64
|
+
|
|
65
|
+
CMAES(ScalarFunction f, const arr& x_init, shared_ptr<OptOptions> opt = make_shared<OptOptions>());
|
|
66
|
+
~CMAES();
|
|
67
|
+
|
|
68
|
+
virtual arr generateNewSamples();
|
|
69
|
+
virtual void update(arr& samples, const arr& values);
|
|
70
|
+
|
|
71
|
+
arr getBestEver();
|
|
72
|
+
arr getCurrentMean();
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
//===========================================================================
|
|
76
|
+
|
|
77
|
+
struct ES_mu_plus_lambda : EvolutionStrategy {
|
|
78
|
+
arr mean;
|
|
79
|
+
arr elite;
|
|
80
|
+
RAI_PARAM("ES/", double, sigma, .1)
|
|
81
|
+
RAI_PARAM("ES/", double, sigmaDecay, .001)
|
|
82
|
+
RAI_PARAM("ES/", uint, lambda, 20)
|
|
83
|
+
RAI_PARAM("ES/", uint, mu, 5)
|
|
84
|
+
|
|
85
|
+
ES_mu_plus_lambda(ScalarFunction f, const arr& x_init, shared_ptr<OptOptions> opt = make_shared<OptOptions>())
|
|
86
|
+
: EvolutionStrategy(f, x_init, opt) { mean = x_init; }
|
|
87
|
+
|
|
88
|
+
virtual arr generateNewSamples();
|
|
89
|
+
|
|
90
|
+
virtual void update(arr& X, const arr& y);
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
//===========================================================================
|
|
94
|
+
|
|
95
|
+
struct GaussEDA : EvolutionStrategy {
|
|
96
|
+
arr mean;
|
|
97
|
+
arr cov;
|
|
98
|
+
arr elite;
|
|
99
|
+
RAI_PARAM("GaussEDA/", double, sigmaInit, .1)
|
|
100
|
+
RAI_PARAM("GaussEDA/", double, sigma2Min, .001)
|
|
101
|
+
RAI_PARAM("GaussEDA/", double, beta, .1)
|
|
102
|
+
RAI_PARAM("ES/", double, sigmaDecay, .001)
|
|
103
|
+
RAI_PARAM("ES/", uint, lambda, 20)
|
|
104
|
+
RAI_PARAM("ES/", uint, mu, 5)
|
|
105
|
+
|
|
106
|
+
GaussEDA(ScalarFunction f, const arr& x_init, shared_ptr<OptOptions> opt = make_shared<OptOptions>());
|
|
107
|
+
|
|
108
|
+
virtual arr generateNewSamples();
|
|
109
|
+
|
|
110
|
+
virtual void update(arr& X, const arr& y);
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
} //namespace
|
|
@@ -11,6 +11,8 @@
|
|
|
11
11
|
#include "options.h"
|
|
12
12
|
#include "../Core/array.h"
|
|
13
13
|
|
|
14
|
+
namespace rai {
|
|
15
|
+
|
|
14
16
|
//===========================================================================
|
|
15
17
|
//
|
|
16
18
|
// proper (monotone) plain gradient descent with line search
|
|
@@ -18,28 +20,24 @@
|
|
|
18
20
|
|
|
19
21
|
struct OptGrad {
|
|
20
22
|
arr& x;
|
|
21
|
-
ScalarFunction
|
|
22
|
-
|
|
23
|
+
ScalarFunction f;
|
|
24
|
+
shared_ptr<OptOptions> opt;
|
|
23
25
|
|
|
24
26
|
enum StopCriterion { stopNone=0, stopCrit1, stopCrit2, stopCritLineSteps, stopCritEvals, stopStepFailed };
|
|
25
|
-
double
|
|
26
|
-
arr
|
|
27
|
+
double f_x;
|
|
28
|
+
arr g_x;
|
|
27
29
|
double alpha;
|
|
28
30
|
uint it, evals, numTinySteps;
|
|
29
31
|
StopCriterion stopCriterion;
|
|
30
32
|
ofstream fil;
|
|
31
33
|
|
|
32
|
-
OptGrad(arr& x, ScalarFunction
|
|
34
|
+
OptGrad(arr& x, ScalarFunction f, std::shared_ptr<OptOptions> _opt);
|
|
33
35
|
~OptGrad();
|
|
34
36
|
StopCriterion step();
|
|
35
37
|
StopCriterion run(uint maxIt = 1000);
|
|
36
38
|
void reinit(const arr& _x=NoArr);
|
|
37
39
|
};
|
|
38
40
|
|
|
39
|
-
inline int optGrad(arr& x, ScalarFunction& f, rai::OptOptions opt=DEFAULT_OPTIONS) {
|
|
40
|
-
return OptGrad(x, f, opt).run();
|
|
41
|
-
}
|
|
42
|
-
|
|
43
41
|
//===========================================================================
|
|
44
42
|
//
|
|
45
43
|
// Rprop
|
|
@@ -53,11 +51,12 @@ struct Rprop {
|
|
|
53
51
|
Rprop();
|
|
54
52
|
~Rprop();
|
|
55
53
|
void init(double initialStepSize=1., double minStepSize=1e-6, double stepMaxSize=50.);
|
|
56
|
-
bool step(arr& x, ScalarFunction
|
|
57
|
-
uint loop(arr& x, ScalarFunction
|
|
54
|
+
bool step(arr& x, ScalarFunction f);
|
|
55
|
+
uint loop(arr& x, ScalarFunction f, double stoppingTolerance=1e-2, double initialStepSize=1., uint maxIterations=1000, int verbose=0);
|
|
58
56
|
};
|
|
59
57
|
|
|
60
|
-
inline uint optRprop(arr& x, ScalarFunction
|
|
61
|
-
return Rprop().loop(x, f, opt
|
|
58
|
+
inline uint optRprop(arr& x, ScalarFunction f, shared_ptr<OptOptions> opt) {
|
|
59
|
+
return Rprop().loop(x, f, opt->stopTolerance, opt->stepInit, opt->stopEvals, opt->verbose);
|
|
62
60
|
}
|
|
63
61
|
|
|
62
|
+
} //namespace
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "NLP.h"
|
|
4
|
+
#include "options.h"
|
|
5
|
+
|
|
6
|
+
namespace rai {
|
|
7
|
+
|
|
8
|
+
struct LBFGS{
|
|
9
|
+
ScalarFunction f;
|
|
10
|
+
shared_ptr<rai::OptOptions> opt;
|
|
11
|
+
arr x;
|
|
12
|
+
|
|
13
|
+
LBFGS(ScalarFunction _f, const arr& x_init, std::shared_ptr<OptOptions> _opt);
|
|
14
|
+
|
|
15
|
+
std::shared_ptr<SolverReturn> solve();
|
|
16
|
+
|
|
17
|
+
//private:
|
|
18
|
+
arr g;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
} //namespace
|
|
@@ -1,31 +1,50 @@
|
|
|
1
1
|
#pragma once
|
|
2
2
|
|
|
3
3
|
#include "NLP.h"
|
|
4
|
+
#include "options.h"
|
|
4
5
|
#include "../Core/util.h"
|
|
5
6
|
|
|
7
|
+
namespace rai {
|
|
8
|
+
|
|
6
9
|
struct LeastSquaredZeroOrder{
|
|
7
10
|
shared_ptr<NLP> P;
|
|
11
|
+
std::shared_ptr<OptOptions> opt;
|
|
12
|
+
bool hasLinTerms=false;
|
|
8
13
|
|
|
9
|
-
//-- parameters
|
|
10
|
-
|
|
11
|
-
double
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
14
|
+
//-- parameters
|
|
15
|
+
str method="rank1";
|
|
16
|
+
double alpha = .5;
|
|
17
|
+
RAI_PARAM("LSZO/", double, alpha_min, .001)
|
|
18
|
+
RAI_PARAM("LSZO/", double, damping, 1e-2)
|
|
19
|
+
RAI_PARAM("LSZO/", double, noiseRatio, .2)
|
|
20
|
+
RAI_PARAM("LSZO/", double, noiseAbs, .0)
|
|
21
|
+
RAI_PARAM("LSZO/", int, maxIters, 500)
|
|
22
|
+
RAI_PARAM("LSZO/", double, dataRatio, 1.)
|
|
23
|
+
RAI_PARAM("LSZO/", bool, pruneData, false)
|
|
24
|
+
RAI_PARAM("LSZO/", bool, covariantNoise, false)
|
|
25
|
+
RAI_PARAM("LSZO/", double, stepInc, 1.5)
|
|
26
|
+
RAI_PARAM("LSZO/", double, stepDec, .5)
|
|
27
|
+
|
|
28
|
+
//-- state and data
|
|
15
29
|
arr x; ///< point where P was last evaluated
|
|
16
|
-
double phi2_x=-1.;
|
|
17
|
-
arr phi_x, J_x, H_x; ///< features at x
|
|
18
30
|
arr J;
|
|
19
|
-
arr
|
|
20
|
-
|
|
31
|
+
arr phi_x; //, J_x, H_x; ///< features at x
|
|
32
|
+
double phi2_x=-1.;
|
|
33
|
+
arr data_X, data_Phi;
|
|
21
34
|
|
|
22
|
-
|
|
35
|
+
uint steps=0, tinySteps=0, rejectedSteps=0;
|
|
36
|
+
|
|
37
|
+
LeastSquaredZeroOrder(shared_ptr<NLP> P, const arr& x_init, std::shared_ptr<OptOptions> _opt=make_shared<OptOptions>());
|
|
23
38
|
|
|
24
39
|
shared_ptr<SolverReturn> solve(){
|
|
25
40
|
while(!step()){}
|
|
26
41
|
shared_ptr<SolverReturn> ret = make_shared<SolverReturn>();
|
|
27
42
|
ret->x = x;
|
|
28
|
-
|
|
43
|
+
arr err = P->summarizeErrors(phi_x);
|
|
44
|
+
ret->f = err(OT_f);
|
|
45
|
+
ret->sos = err(OT_sos);
|
|
46
|
+
ret->eq = err(OT_eq);
|
|
47
|
+
ret->ineq = err(OT_ineq);
|
|
29
48
|
ret->feasible=true;
|
|
30
49
|
return ret;
|
|
31
50
|
}
|
|
@@ -34,5 +53,7 @@ struct LeastSquaredZeroOrder{
|
|
|
34
53
|
|
|
35
54
|
void updateJ_rank1(arr& J, const arr& x, const arr& x_last, const arr& phi, const arr& phi_last);
|
|
36
55
|
|
|
37
|
-
void updateJ_linReg(arr& J, const arr& Xraw, const arr& Y
|
|
56
|
+
void updateJ_linReg(arr& J, const arr& Xraw, const arr& Y);
|
|
38
57
|
};
|
|
58
|
+
|
|
59
|
+
} //namespace
|