robotic 0.3.4.dev1__cp311-cp311-manylinux2014_x86_64.whl → 0.3.4.dev2__cp311-cp311-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of robotic might be problematic. Click here for more details.
- robotic/_robotic.pyi +32 -14
- robotic/_robotic.so +0 -0
- robotic/include/rai/Algo/RidgeRegression.h +1 -1
- robotic/include/rai/Algo/rungeKutta.h +1 -1
- robotic/include/rai/Core/array.h +30 -22
- robotic/include/rai/Core/array.ipp +53 -60
- robotic/include/rai/Core/arrayDouble.h +29 -25
- robotic/include/rai/DataGen/shapenetGrasps.h +1 -1
- robotic/include/rai/Geo/mesh.h +2 -2
- robotic/include/rai/Geo/pairCollision.h +40 -36
- robotic/include/rai/Geo/signedDistanceFunctions.h +5 -3
- robotic/include/rai/KOMO/komo.h +1 -1
- robotic/include/rai/Kin/dof_forceExchange.h +3 -3
- robotic/include/rai/Kin/feature.h +1 -1
- robotic/include/rai/Kin/frame.h +1 -1
- robotic/include/rai/Kin/proxy.h +1 -1
- robotic/include/rai/Optim/BayesOpt.h +14 -7
- robotic/include/rai/Optim/CMA/boundary_transformation.h +73 -0
- robotic/include/rai/Optim/CMA/cmaes.h +175 -0
- robotic/include/rai/Optim/CMA/cmaes_interface.h +68 -0
- robotic/include/rai/Optim/GlobalIterativeNewton.h +7 -3
- robotic/include/rai/Optim/NLP.h +15 -1
- robotic/include/rai/Optim/NLP_Solver.h +5 -5
- robotic/include/rai/Optim/constrained.h +3 -3
- robotic/include/rai/Optim/lagrangian.h +6 -5
- robotic/include/rai/Optim/m_EvoStrategies.h +94 -0
- robotic/include/rai/Optim/{gradient.h → m_Gradient.h} +12 -13
- robotic/include/rai/Optim/m_LBFGS.h +21 -0
- robotic/include/rai/Optim/m_LeastSquaresZeroOrder.h +18 -11
- robotic/include/rai/Optim/m_LocalGreedy.h +31 -0
- robotic/include/rai/Optim/m_NelderMead.h +17 -0
- robotic/include/rai/Optim/{newton.h → m_Newton.h} +8 -5
- robotic/include/rai/Optim/options.h +6 -7
- robotic/include/rai/Optim/primalDual.h +9 -5
- robotic/include/rai/Optim/testProblems_Opt.h +5 -5
- robotic/include/rai/Optim/utils.h +9 -20
- robotic/librai.so +0 -0
- robotic/meshTool +0 -0
- robotic/version.py +1 -1
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev2.dist-info}/METADATA +1 -1
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev2.dist-info}/RECORD +62 -56
- robotic/include/rai/Optim/lbfgs.h +0 -18
- /robotic/include/rai/Geo/{assimpInterface.h → i_assimp.h} +0 -0
- /robotic/include/rai/Geo/{fclInterface.h → i_fcl.h} +0 -0
- /robotic/include/rai/Kin/{kin_bullet.h → i_Bullet.h} +0 -0
- /robotic/include/rai/Kin/{kin_feather.h → i_Feather.h} +0 -0
- /robotic/include/rai/Kin/{kin_ode.h → i_Ode.h} +0 -0
- /robotic/include/rai/Kin/{kin_physx.h → i_Physx.h} +0 -0
- /robotic/include/rai/Optim/{opt-ceres.h → i_Ceres.h} +0 -0
- /robotic/include/rai/Optim/{opt-ipopt.h → i_Ipopt.h} +0 -0
- /robotic/include/rai/Optim/{opt-nlopt.h → i_NLopt.h} +0 -0
- /robotic/include/rai/Optim/{liblbfgs.h → liblbfgs/liblbfgs.h} +0 -0
- /robotic/include/rai/Optim/{SlackGaussNewton.h → m_SlackGaussNewton.h} +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-bot +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-h5info +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-info +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-meshTool +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-test +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-urdfConvert.py +0 -0
- {robotic-0.3.4.dev1.data → robotic-0.3.4.dev2.data}/scripts/ry-view +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev2.dist-info}/WHEEL +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev2.dist-info}/licenses/LICENSE +0 -0
- {robotic-0.3.4.dev1.dist-info → robotic-0.3.4.dev2.dist-info}/top_level.txt +0 -0
robotic/_robotic.pyi
CHANGED
|
@@ -1301,7 +1301,7 @@ class NLP_Solver:
|
|
|
1301
1301
|
"""
|
|
1302
1302
|
def setInitialization(self, arg0: arr) -> NLP_Solver:
|
|
1303
1303
|
...
|
|
1304
|
-
def setOptions(self, verbose: int = 1, stopTolerance: float = 0.01, stopFTolerance: float = -1.0, stopGTolerance: float = -1.0, stopEvals: int = 1000, stopInners: int = 1000, stopOuters: int = 1000,
|
|
1304
|
+
def setOptions(self, verbose: int = 1, stopTolerance: float = 0.01, stopFTolerance: float = -1.0, stopGTolerance: float = -1.0, stopEvals: int = 1000, stopInners: int = 1000, stopOuters: int = 1000, stopLineSteps: int = 10, stopTinySteps: int = 4, stepInit: float = 1.0, stepMin: float = -1.0, stepMax: float = 0.2, stepInc: float = 1.5, stepDec: float = 0.5, damping: float = 1.0, wolfe: float = 0.01, muInit: float = 1.0, muInc: float = 5.0, muMax: float = 10000.0, muLBInit: float = 0.1, muLBDec: float = 0.2, lambdaMax: float = -1.0, interiorPadding: float = 0.01, boundedNewton: bool = True, finiteDifference: bool = False) -> NLP_Solver:
|
|
1305
1305
|
"""
|
|
1306
1306
|
set solver options
|
|
1307
1307
|
"""
|
|
@@ -1457,19 +1457,21 @@ class OptMethod:
|
|
|
1457
1457
|
|
|
1458
1458
|
none
|
|
1459
1459
|
|
|
1460
|
-
|
|
1460
|
+
GradientDescent
|
|
1461
1461
|
|
|
1462
|
-
|
|
1462
|
+
Rprop
|
|
1463
1463
|
|
|
1464
1464
|
LBFGS
|
|
1465
1465
|
|
|
1466
|
-
|
|
1466
|
+
Newton
|
|
1467
1467
|
|
|
1468
|
-
|
|
1468
|
+
AugmentedLag
|
|
1469
1469
|
|
|
1470
|
-
|
|
1470
|
+
LogBarrier
|
|
1471
1471
|
|
|
1472
|
-
|
|
1472
|
+
slackGN_logBarrier
|
|
1473
|
+
|
|
1474
|
+
SquaredPenalty
|
|
1473
1475
|
|
|
1474
1476
|
singleSquaredPenalty
|
|
1475
1477
|
|
|
@@ -1479,22 +1481,38 @@ class OptMethod:
|
|
|
1479
1481
|
|
|
1480
1482
|
Ipopt
|
|
1481
1483
|
|
|
1484
|
+
slackGN_Ipopt
|
|
1485
|
+
|
|
1482
1486
|
Ceres
|
|
1487
|
+
|
|
1488
|
+
LSZO
|
|
1489
|
+
|
|
1490
|
+
greedy
|
|
1491
|
+
|
|
1492
|
+
NelderMead
|
|
1493
|
+
|
|
1494
|
+
CMA
|
|
1483
1495
|
"""
|
|
1496
|
+
AugmentedLag: typing.ClassVar[OptMethod] # value = <OptMethod.AugmentedLag: 5>
|
|
1497
|
+
CMA: typing.ClassVar[OptMethod] # value = <OptMethod.CMA: 18>
|
|
1484
1498
|
Ceres: typing.ClassVar[OptMethod] # value = <OptMethod.Ceres: 14>
|
|
1499
|
+
GradientDescent: typing.ClassVar[OptMethod] # value = <OptMethod.GradientDescent: 1>
|
|
1485
1500
|
Ipopt: typing.ClassVar[OptMethod] # value = <OptMethod.Ipopt: 12>
|
|
1486
1501
|
LBFGS: typing.ClassVar[OptMethod] # value = <OptMethod.LBFGS: 3>
|
|
1502
|
+
LSZO: typing.ClassVar[OptMethod] # value = <OptMethod.LSZO: 15>
|
|
1503
|
+
LogBarrier: typing.ClassVar[OptMethod] # value = <OptMethod.LogBarrier: 6>
|
|
1487
1504
|
NLopt: typing.ClassVar[OptMethod] # value = <OptMethod.NLopt: 11>
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1505
|
+
NelderMead: typing.ClassVar[OptMethod] # value = <OptMethod.NelderMead: 17>
|
|
1506
|
+
Newton: typing.ClassVar[OptMethod] # value = <OptMethod.Newton: 4>
|
|
1507
|
+
Rprop: typing.ClassVar[OptMethod] # value = <OptMethod.Rprop: 2>
|
|
1508
|
+
SquaredPenalty: typing.ClassVar[OptMethod] # value = <OptMethod.SquaredPenalty: 8>
|
|
1509
|
+
__members__: typing.ClassVar[dict[str, OptMethod]] # value = {'none': <OptMethod.none: 0>, 'GradientDescent': <OptMethod.GradientDescent: 1>, 'Rprop': <OptMethod.Rprop: 2>, 'LBFGS': <OptMethod.LBFGS: 3>, 'Newton': <OptMethod.Newton: 4>, 'AugmentedLag': <OptMethod.AugmentedLag: 5>, 'LogBarrier': <OptMethod.LogBarrier: 6>, 'slackGN_logBarrier': <OptMethod.slackGN_logBarrier: 7>, 'SquaredPenalty': <OptMethod.SquaredPenalty: 8>, 'singleSquaredPenalty': <OptMethod.singleSquaredPenalty: 9>, 'slackGN': <OptMethod.slackGN: 10>, 'NLopt': <OptMethod.NLopt: 11>, 'Ipopt': <OptMethod.Ipopt: 12>, 'slackGN_Ipopt': <OptMethod.slackGN_Ipopt: 13>, 'Ceres': <OptMethod.Ceres: 14>, 'LSZO': <OptMethod.LSZO: 15>, 'greedy': <OptMethod.greedy: 16>, 'NelderMead': <OptMethod.NelderMead: 17>, 'CMA': <OptMethod.CMA: 18>}
|
|
1510
|
+
greedy: typing.ClassVar[OptMethod] # value = <OptMethod.greedy: 16>
|
|
1493
1511
|
none: typing.ClassVar[OptMethod] # value = <OptMethod.none: 0>
|
|
1494
|
-
rprop: typing.ClassVar[OptMethod] # value = <OptMethod.rprop: 2>
|
|
1495
1512
|
singleSquaredPenalty: typing.ClassVar[OptMethod] # value = <OptMethod.singleSquaredPenalty: 9>
|
|
1496
1513
|
slackGN: typing.ClassVar[OptMethod] # value = <OptMethod.slackGN: 10>
|
|
1497
|
-
|
|
1514
|
+
slackGN_Ipopt: typing.ClassVar[OptMethod] # value = <OptMethod.slackGN_Ipopt: 13>
|
|
1515
|
+
slackGN_logBarrier: typing.ClassVar[OptMethod] # value = <OptMethod.slackGN_logBarrier: 7>
|
|
1498
1516
|
@staticmethod
|
|
1499
1517
|
def _pybind11_conduit_v1_(*args, **kwargs):
|
|
1500
1518
|
...
|
robotic/_robotic.so
CHANGED
|
Binary file
|
|
@@ -74,7 +74,7 @@ struct KernelRidgeRegression {
|
|
|
74
74
|
arr evaluate(const arr& X, arr& bayesSigma2=NoArr); ///< returns f(x) and \s^2(x) for a set of points X
|
|
75
75
|
|
|
76
76
|
double evaluate(const arr& x, arr& df_x, arr& H, double plusSigma, bool onlySigma); ///< returns f(x) + coeff*\sigma(x) and its gradient and Hessian
|
|
77
|
-
|
|
77
|
+
ScalarFunction getF(double plusSigma);
|
|
78
78
|
};
|
|
79
79
|
|
|
80
80
|
struct KernelLogisticRegression {
|
|
@@ -14,7 +14,7 @@ namespace rai {
|
|
|
14
14
|
|
|
15
15
|
//----- Runge-Kutta
|
|
16
16
|
/// standard Runge-Kutta 4
|
|
17
|
-
void rk4(arr& x, const arr& x0,
|
|
17
|
+
void rk4(arr& x, const arr& x0, VectorFunction f, double dt);
|
|
18
18
|
/// same for second order diff equation
|
|
19
19
|
//void rk4dd(arr& x1, arr& v1, const arr& x0, const arr& v0,
|
|
20
20
|
// void (*ddf)(arr& xdd, const arr& x, const arr& v),
|
robotic/include/rai/Core/array.h
CHANGED
|
@@ -67,7 +67,7 @@ template<class T> struct Array {
|
|
|
67
67
|
uint N; ///< number of elements
|
|
68
68
|
uint nd; ///< number of dimensions
|
|
69
69
|
uint d0, d1, d2; ///< 0th, 1st, 2nd dim
|
|
70
|
-
uint*
|
|
70
|
+
uint* _shape; ///< pointer to dimensions (for nd<=3 points to d0)
|
|
71
71
|
bool isReference; ///< true if this refers to memory of another array
|
|
72
72
|
uint M; ///< memory allocated (>=N)
|
|
73
73
|
SpecialArray* special=0; ///< auxiliary data, e.g. if this is a sparse matrics, depends on special type
|
|
@@ -95,24 +95,30 @@ template<class T> struct Array {
|
|
|
95
95
|
Array<T>& operator=(const Array<T>& a);
|
|
96
96
|
|
|
97
97
|
/// @name iterators
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
98
|
+
using iterator = T*;
|
|
99
|
+
using const_iterator = const T*;
|
|
100
|
+
// struct iterator {
|
|
101
|
+
// using reference = T&;
|
|
102
|
+
// T* p;
|
|
103
|
+
// T& operator()() { return *p; } //access to value by user
|
|
104
|
+
// void operator++() { p++; }
|
|
105
|
+
// reference operator*() { return *p; } //in for(auto& it:array.enumerated()) it is assigned to *iterator
|
|
106
|
+
// friend bool operator!=(const iterator& i, const iterator& j) { return i.p!=j.p; }
|
|
107
|
+
// friend long int operator-(const iterator& i, const iterator& j) { return (long int)(i.p-j.p); }
|
|
108
|
+
// friend iterator operator+(const iterator& i, int j) { return iterator{i.p+j}; }
|
|
109
|
+
// friend iterator operator-(const iterator& i, int j) { return iterator{i.p-j}; }
|
|
110
|
+
// friend reference operator*(const iterator& i, int j) { return i.p[j]; }
|
|
111
|
+
// T& operator->() { return *p; }
|
|
112
|
+
// };
|
|
113
|
+
// struct const_iterator {
|
|
114
|
+
// using reference = const T&;
|
|
115
|
+
// const T* p;
|
|
116
|
+
// const T& operator()() { return *p; } //access to value by user
|
|
117
|
+
// void operator++() { p++; }
|
|
118
|
+
// reference operator*() { return *p; } //in for(auto& it:array.enumerated()) it is assigned to *iterator
|
|
119
|
+
// friend bool operator!=(const const_iterator& i, const const_iterator& j) { return i.p!=j.p; }
|
|
120
|
+
// const T& operator->() { return *p; }
|
|
121
|
+
// };
|
|
116
122
|
|
|
117
123
|
iterator begin() { return iterator{p}; }
|
|
118
124
|
const_iterator begin() const { return const_iterator{p}; }
|
|
@@ -304,7 +310,7 @@ template<class T> struct Array {
|
|
|
304
310
|
void resizeMEM(uint n, bool copy, int Mforce=-1);
|
|
305
311
|
void reserveMEM(uint Mforce) { resizeMEM(N, true, Mforce); if(!nd) nd=1; }
|
|
306
312
|
void freeMEM();
|
|
307
|
-
void
|
|
313
|
+
void resetShape(uint* dim=0);
|
|
308
314
|
|
|
309
315
|
/// @name serialization
|
|
310
316
|
uint serial_size();
|
|
@@ -447,8 +453,8 @@ template<class T> Array<T> catCol(const rai::Array<T>& a, const rai::Array<T>& b
|
|
|
447
453
|
namespace rai {
|
|
448
454
|
template<class T, class S> void resizeAs(Array<T>& x, const Array<S>& a) {
|
|
449
455
|
x.nd=a.nd; x.d0=a.d0; x.d1=a.d1; x.d2=a.d2;
|
|
450
|
-
x.
|
|
451
|
-
if(x.nd>3) { x.
|
|
456
|
+
x.resetShape();
|
|
457
|
+
if(x.nd>3) { x._shape=new uint[x.nd]; memmove(x._shape, a._shape, x.nd*sizeof(uint)); }
|
|
452
458
|
x.resizeMEM(a.N, false);
|
|
453
459
|
}
|
|
454
460
|
template<class T, class S> void resizeCopyAs(Array<T>& x, const Array<S>& a);
|
|
@@ -679,6 +685,8 @@ namespace rai {
|
|
|
679
685
|
uint product(const uintA& x);
|
|
680
686
|
template<class T> T& min(const Array<T>& x);
|
|
681
687
|
template<class T> T& max(const Array<T>& x);
|
|
688
|
+
template<class T> std::tuple<T&,uint> min_arg(const Array<T>& x);
|
|
689
|
+
template<class T> std::tuple<T&,uint> max_arg(const Array<T>& x);
|
|
682
690
|
uint sum(const uintA& x);
|
|
683
691
|
float sum(const floatA& x);
|
|
684
692
|
template<class T> Array<T> integral(const Array<T>& x);
|
|
@@ -41,7 +41,7 @@ template<class T> Array<T>::Array()
|
|
|
41
41
|
N(0),
|
|
42
42
|
nd(0),
|
|
43
43
|
d0(0), d1(0), d2(0),
|
|
44
|
-
|
|
44
|
+
_shape(&d0),
|
|
45
45
|
isReference(false),
|
|
46
46
|
M(0),
|
|
47
47
|
special(0) {
|
|
@@ -72,17 +72,18 @@ template<class T> Array<T>::Array(Array<T>&& a)
|
|
|
72
72
|
N(a.N),
|
|
73
73
|
nd(a.nd),
|
|
74
74
|
d0(a.d0), d1(a.d1), d2(a.d2),
|
|
75
|
-
|
|
75
|
+
_shape(&d0),
|
|
76
76
|
isReference(a.isReference),
|
|
77
77
|
M(a.M),
|
|
78
78
|
special(a.special) {
|
|
79
79
|
if constexpr(std::is_same_v<T, double>){
|
|
80
80
|
if(a.jac) jac = std::move(a.jac);
|
|
81
81
|
}
|
|
82
|
-
// CHECK_EQ(a.
|
|
83
|
-
if(a.
|
|
82
|
+
// CHECK_EQ(a.shape, &a.d0, "NIY for larger tensors");
|
|
83
|
+
if(a._shape!=&a.d0) { _shape=a._shape; a._shape=&a.d0; }
|
|
84
84
|
a.p=NULL;
|
|
85
85
|
a.N=a.nd=a.d0=a.d1=a.d2=0;
|
|
86
|
+
a.resetShape();
|
|
86
87
|
a.isReference=false;
|
|
87
88
|
a.special=NULL;
|
|
88
89
|
}
|
|
@@ -109,7 +110,7 @@ template<class T> Array<T>::~Array() {
|
|
|
109
110
|
clear();
|
|
110
111
|
#else //faster (leaves members non-zeroed..)
|
|
111
112
|
if(special) { delete special; special=NULL; }
|
|
112
|
-
if(
|
|
113
|
+
if(_shape!=&d0) { delete[] _shape; }
|
|
113
114
|
if(M) {
|
|
114
115
|
globalMemoryTotal -= M*sizeT;
|
|
115
116
|
if(memMove==1) free(p); else delete[] p;
|
|
@@ -125,57 +126,56 @@ template<class T> Array<T>& Array<T>::clear() {
|
|
|
125
126
|
}
|
|
126
127
|
|
|
127
128
|
/// resize 1D array, discard the previous contents
|
|
128
|
-
template<class T> Array<T>& Array<T>::resize(uint D0) { nd=1; d0=D0;
|
|
129
|
+
template<class T> Array<T>& Array<T>::resize(uint D0) { nd=1; d0=D0; resetShape(); resizeMEM(d0, false); return *this; }
|
|
129
130
|
|
|
130
131
|
/// resize but copy the previous contents
|
|
131
|
-
template<class T> Array<T>& Array<T>::resizeCopy(uint D0) { nd=1; d0=D0;
|
|
132
|
+
template<class T> Array<T>& Array<T>::resizeCopy(uint D0) { nd=1; d0=D0; resetShape(); resizeMEM(d0, true); return *this; }
|
|
132
133
|
|
|
133
134
|
/// reshape the dimensionality (e.g. from 2D to 1D); throw an error if this actually requires to resize the memory
|
|
134
135
|
template<class T> Array<T>& Array<T>::reshape(int D0) {
|
|
135
136
|
if(D0<0) D0=N;
|
|
136
137
|
CHECK_EQ((int)N, D0, "reshape must preserve total memory size");
|
|
137
|
-
nd=1; d0=D0; d1=d2=0;
|
|
138
|
+
nd=1; d0=D0; d1=d2=0; resetShape();
|
|
138
139
|
return *this;
|
|
139
140
|
}
|
|
140
141
|
|
|
141
142
|
/// same for 2D ...
|
|
142
|
-
template<class T> Array<T>& Array<T>::resize(uint D0, uint D1) { nd=2; d0=D0; d1=D1;
|
|
143
|
+
template<class T> Array<T>& Array<T>::resize(uint D0, uint D1) { nd=2; d0=D0; d1=D1; resetShape(); resizeMEM(d0*d1, false); return *this; }
|
|
143
144
|
|
|
144
145
|
/// ...
|
|
145
|
-
template<class T> Array<T>& Array<T>::resizeCopy(uint D0, uint D1) { nd=2; d0=D0; d1=D1;
|
|
146
|
+
template<class T> Array<T>& Array<T>::resizeCopy(uint D0, uint D1) { nd=2; d0=D0; d1=D1; resetShape(); resizeMEM(d0*d1, true); return *this; }
|
|
146
147
|
|
|
147
148
|
/// ...
|
|
148
149
|
template<class T> Array<T>& Array<T>::reshape(int D0, int D1) {
|
|
149
150
|
if(D0<0) D0=N/D1; else if(D1<0) D1=N/D0;
|
|
150
151
|
CHECK_EQ((int)N, D0*D1, "reshape must preserve total memory size");
|
|
151
152
|
nd=2; d0=D0; d1=D1; d2=0;
|
|
152
|
-
|
|
153
|
+
resetShape();
|
|
153
154
|
return *this;
|
|
154
155
|
}
|
|
155
156
|
|
|
156
157
|
/// same for 3D ...
|
|
157
|
-
template<class T> Array<T>& Array<T>::resize(uint D0, uint D1, uint D2) { nd=3; d0=D0; d1=D1; d2=D2;
|
|
158
|
+
template<class T> Array<T>& Array<T>::resize(uint D0, uint D1, uint D2) { nd=3; d0=D0; d1=D1; d2=D2; resetShape(); resizeMEM(d0*d1*d2, false); return *this; }
|
|
158
159
|
|
|
159
160
|
/// ...
|
|
160
|
-
template<class T> Array<T>& Array<T>::resizeCopy(uint D0, uint D1, uint D2) { nd=3; d0=D0; d1=D1; d2=D2;
|
|
161
|
+
template<class T> Array<T>& Array<T>::resizeCopy(uint D0, uint D1, uint D2) { nd=3; d0=D0; d1=D1; d2=D2; resetShape(); resizeMEM(d0*d1*d2, true); return *this; }
|
|
161
162
|
|
|
162
163
|
/// ...
|
|
163
164
|
template<class T> Array<T>& Array<T>::reshape(int D0, int D1, int D2) {
|
|
164
165
|
if(D0<0) D0=N/(D1*D2); else if(D1<0) D1=N/(D0*D2); else if(D2<0) D2=N/(D0*D1);
|
|
165
166
|
CHECK_EQ((int)N, D0*D1*D2, "reshape must preserve total memory size");
|
|
166
167
|
nd=3; d0=D0; d1=D1; d2=D2;
|
|
167
|
-
|
|
168
|
+
resetShape();
|
|
168
169
|
return *this;
|
|
169
170
|
}
|
|
170
171
|
|
|
171
172
|
/// resize to multi-dimensional tensor
|
|
172
173
|
template<class T> Array<T>& Array<T>::resize(uint ND, uint* dim) {
|
|
173
|
-
nd=ND; d0=d1=d2=0;
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
for(S=1, j=0; j<nd; j++) S*=dim[j];
|
|
174
|
+
nd=ND; d0=d1=d2=0;
|
|
175
|
+
if(nd>0){ d0=dim[0]; if(nd>1){ d1=dim[1]; if(nd>2){ d2=dim[2]; } } }
|
|
176
|
+
resetShape(dim);
|
|
177
|
+
uint64_t S=(nd>0?1:0);
|
|
178
|
+
for(uint j=0; j<nd; j++) S*=dim[j];
|
|
179
179
|
if(S>=(1ull <<32)) HALT("Array #elements " <<(S>>30) <<"G is >= 2^32");
|
|
180
180
|
resizeMEM((uint)S, false);
|
|
181
181
|
return *this;
|
|
@@ -183,12 +183,11 @@ template<class T> Array<T>& Array<T>::resize(uint ND, uint* dim) {
|
|
|
183
183
|
|
|
184
184
|
/// resize to multi-dimensional tensor
|
|
185
185
|
template<class T> Array<T>& Array<T>::resizeCopy(uint ND, uint* dim) {
|
|
186
|
-
nd=ND; d0=d1=d2=0;
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
for(S=1, j=0; j<nd; j++) S*=dim[j];
|
|
186
|
+
nd=ND; d0=d1=d2=0;
|
|
187
|
+
if(nd>0){ d0=dim[0]; if(nd>1){ d1=dim[1]; if(nd>2){ d2=dim[2]; } } }
|
|
188
|
+
resetShape(dim);
|
|
189
|
+
uint64_t S=(nd>0?1:0);
|
|
190
|
+
for(uint j=0; j<nd; j++) S*=dim[j];
|
|
192
191
|
if(S>=(1ull <<32)) HALT("Array #elements " <<(S>>30) <<"G is >= 2^32");
|
|
193
192
|
resizeMEM((uint)S, true);
|
|
194
193
|
return *this;
|
|
@@ -196,19 +195,10 @@ template<class T> Array<T>& Array<T>::resizeCopy(uint ND, uint* dim) {
|
|
|
196
195
|
|
|
197
196
|
/// resize to multi-dimensional tensor
|
|
198
197
|
template<class T> Array<T>& Array<T>::reshape(uint ND, uint* dim) {
|
|
199
|
-
nd=ND; d0=d1=d2=0;
|
|
200
|
-
if(nd>0){
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
d1=dim[1];
|
|
204
|
-
if(nd>2){
|
|
205
|
-
d2=dim[2];
|
|
206
|
-
if(nd>3) { d=new uint[nd]; memmove(d, dim, nd*sizeof(uint)); }
|
|
207
|
-
}
|
|
208
|
-
}
|
|
209
|
-
}
|
|
210
|
-
//for(uint j=0; j<nd && j<3; j++) {(&d0)[j]=dim[j]; }
|
|
211
|
-
uint S=(nd>0?1:0);
|
|
198
|
+
nd=ND; d0=d1=d2=0;
|
|
199
|
+
if(nd>0){ d0=dim[0]; if(nd>1){ d1=dim[1]; if(nd>2){ d2=dim[2]; } } }
|
|
200
|
+
resetShape(dim);
|
|
201
|
+
uint64_t S=(nd>0?1:0);
|
|
212
202
|
for(uint j=0; j<nd; j++) S*=dim[j];
|
|
213
203
|
CHECK_EQ(N, S, "reshape must preserve total memory size");
|
|
214
204
|
return *this;
|
|
@@ -229,8 +219,7 @@ template<class T> Array<T>& Array<T>::resizeAs(const Array<T>& a) {
|
|
|
229
219
|
CHECK(this!=&a, "never do this!!!");
|
|
230
220
|
if(isReference) CHECK_EQ(N, a.N, "resize of a reference (e.g. subarray) is not allowed! (only a resize without changing memory size)");
|
|
231
221
|
nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
232
|
-
|
|
233
|
-
if(nd>3) { d=new uint[nd]; memmove(d, a.d, nd*sizeof(uint)); }
|
|
222
|
+
resetShape(a._shape);
|
|
234
223
|
resizeMEM(a.N, false);
|
|
235
224
|
return *this;
|
|
236
225
|
}
|
|
@@ -238,8 +227,8 @@ template<class T> Array<T>& Array<T>::resizeAs(const Array<T>& a) {
|
|
|
238
227
|
/// make it the same size as \c a and copy previous content
|
|
239
228
|
template<class T> Array<T>& Array<T>::resizeCopyAs(const Array<T>& a) {
|
|
240
229
|
CHECK(this!=&a, "never do this!!!");
|
|
241
|
-
nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
242
|
-
|
|
230
|
+
nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
231
|
+
resetShape(a._shape);
|
|
243
232
|
resizeMEM(a.N, true);
|
|
244
233
|
return *this;
|
|
245
234
|
}
|
|
@@ -247,15 +236,15 @@ template<class T> Array<T>& Array<T>::resizeCopyAs(const Array<T>& a) {
|
|
|
247
236
|
template<class T> Array<T>& Array<T>::reshapeAs(const Array<T>& a) {
|
|
248
237
|
CHECK(this!=&a, "never do this!!!");
|
|
249
238
|
CHECK_EQ(N, a.N, "reshape must preserve total memory size");
|
|
250
|
-
nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
251
|
-
|
|
239
|
+
nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
240
|
+
resetShape(a._shape);
|
|
252
241
|
return *this;
|
|
253
242
|
}
|
|
254
243
|
|
|
255
244
|
/// return the k-th dimensionality
|
|
256
245
|
template<class T> uint Array<T>::dim(uint k) const {
|
|
257
246
|
CHECK(k<nd, "dimensionality range check error: " <<k <<"!<" <<nd);
|
|
258
|
-
if(!
|
|
247
|
+
if(!_shape && k<3) return (&d0)[k]; else return _shape[k];
|
|
259
248
|
}
|
|
260
249
|
|
|
261
250
|
#ifdef RAI_CLANG
|
|
@@ -363,10 +352,9 @@ template<class T> void Array<T>::freeMEM() {
|
|
|
363
352
|
M=0;
|
|
364
353
|
}
|
|
365
354
|
#endif
|
|
366
|
-
if(
|
|
355
|
+
if(_shape!=&d0) { delete[] _shape; _shape=&d0; }
|
|
367
356
|
p=NULL;
|
|
368
357
|
N=nd=d0=d1=d2=0;
|
|
369
|
-
d=&d0;
|
|
370
358
|
isReference=false;
|
|
371
359
|
}
|
|
372
360
|
|
|
@@ -386,9 +374,13 @@ template<class T> Array<T>& Array<T>::dereference() {
|
|
|
386
374
|
}
|
|
387
375
|
|
|
388
376
|
/// reset the dimensionality pointer d to point to &d0
|
|
389
|
-
template<class T> void Array<T>::
|
|
390
|
-
if(
|
|
391
|
-
|
|
377
|
+
template<class T> void Array<T>::resetShape(uint* dim) {
|
|
378
|
+
if(_shape!=&d0) { delete[] _shape; _shape=&d0; }
|
|
379
|
+
if(nd>3) {
|
|
380
|
+
CHECK(dim, "need shape tuple");
|
|
381
|
+
_shape=new uint[nd];
|
|
382
|
+
memmove(_shape, dim, nd*sizeof(uint));
|
|
383
|
+
}
|
|
392
384
|
}
|
|
393
385
|
|
|
394
386
|
//***** append, insert & remove
|
|
@@ -688,7 +680,7 @@ template<class T> void Array<T>::resizeDim(uint k, uint dk) {
|
|
|
688
680
|
/// return a uint-Array that contains (acutally refers to) the dimensions of 'this'
|
|
689
681
|
template<class T> Array<uint> Array<T>::dim() const {
|
|
690
682
|
Array<uint> dims;
|
|
691
|
-
dims.setCarray(
|
|
683
|
+
dims.setCarray(_shape, nd);
|
|
692
684
|
return dims;
|
|
693
685
|
}
|
|
694
686
|
|
|
@@ -1340,8 +1332,8 @@ template<class T> void Array<T>::referToDim(const Array<T>& a, int i) {
|
|
|
1340
1332
|
} else if(a.nd>3) {
|
|
1341
1333
|
uint n=a.N/a.d0;
|
|
1342
1334
|
referTo(a.p+i*n, n);
|
|
1343
|
-
nd=a.nd-1; d0=a.d1; d1=a.d2; d2=a.
|
|
1344
|
-
|
|
1335
|
+
nd=a.nd-1; d0=a.d1; d1=a.d2; d2=a._shape[3];
|
|
1336
|
+
resetShape(a._shape+1);
|
|
1345
1337
|
}
|
|
1346
1338
|
}
|
|
1347
1339
|
|
|
@@ -1363,14 +1355,14 @@ template<class T> void Array<T>::referToDim(const Array<T>& a, uint i, uint j, u
|
|
|
1363
1355
|
CHECK(i<a.d0 && j<a.d1 && k<a.d2, "SubDim range error (" <<i <<"<" <<a.d0 <<", " <<j <<"<" <<a.d1 <<", " <<k <<"<" <<a.d2 << ")");
|
|
1364
1356
|
|
|
1365
1357
|
if(a.nd==4) {
|
|
1366
|
-
referTo(&a(i, j, k), a.
|
|
1358
|
+
referTo(&a(i, j, k), a._shape[3]);
|
|
1367
1359
|
} else if(a.nd==5) {
|
|
1368
1360
|
NIY;
|
|
1369
1361
|
// nd=2; d0=a.d[3]; d1=a.d[4]; d2=0; N=d0*d1;
|
|
1370
1362
|
} else if(a.nd>5) {
|
|
1371
1363
|
NIY;
|
|
1372
1364
|
// nd=a.nd-3; d0=a.d[3]; d1=a.d[4]; d2=a.d[5]; N=a.N/(a.d0*a.d1*a.d2);
|
|
1373
|
-
//
|
|
1365
|
+
// resetShape();
|
|
1374
1366
|
// if(nd>3) { d=new uint[nd]; memmove(d, a.d+3, nd*sizeof(uint)); }
|
|
1375
1367
|
}
|
|
1376
1368
|
// p=a.p+(i*a.N+(j*a.N+(k*a.N/a.d2))/a.d1)/a.d0;
|
|
@@ -1383,6 +1375,7 @@ template<class T> void Array<T>::takeOver(Array<T>& a) {
|
|
|
1383
1375
|
freeMEM();
|
|
1384
1376
|
memMove=a.memMove;
|
|
1385
1377
|
N=a.N; nd=a.nd; d0=a.d0; d1=a.d1; d2=a.d2;
|
|
1378
|
+
resetShape(a._shape);
|
|
1386
1379
|
p=a.p; M=a.M;
|
|
1387
1380
|
special=a.special;
|
|
1388
1381
|
#if 0 //a remains reference on this
|
|
@@ -1391,7 +1384,7 @@ template<class T> void Array<T>::takeOver(Array<T>& a) {
|
|
|
1391
1384
|
#else //a is cleared
|
|
1392
1385
|
a.p=NULL;
|
|
1393
1386
|
a.M=a.N=a.nd=a.d0=a.d1=a.d2=0;
|
|
1394
|
-
|
|
1387
|
+
a.resetShape();
|
|
1395
1388
|
a.special=0;
|
|
1396
1389
|
a.isReference=false;
|
|
1397
1390
|
#endif
|
|
@@ -1611,10 +1604,10 @@ template<class T> void Array<T>::write(std::ostream& os, const char* ELEMSEP, co
|
|
|
1611
1604
|
}
|
|
1612
1605
|
}
|
|
1613
1606
|
if(nd>3) {
|
|
1614
|
-
CHECK(
|
|
1607
|
+
CHECK(_shape && _shape!=&d0, "");
|
|
1615
1608
|
for(i=0; i<N; i++) {
|
|
1616
|
-
if(i && !(i%
|
|
1617
|
-
if(nd>1 && !(i%(
|
|
1609
|
+
if(i && !(i%_shape[nd-1])) os <<LINESEP;
|
|
1610
|
+
if(nd>1 && !(i%(_shape[nd-2]*_shape[nd-1]))) os <<LINESEP;
|
|
1618
1611
|
os <<(i?ELEMSEP:"") <<elem(i);
|
|
1619
1612
|
}
|
|
1620
1613
|
}
|
|
@@ -284,22 +284,23 @@ arr& getNoArr();
|
|
|
284
284
|
/// @{
|
|
285
285
|
|
|
286
286
|
/// a generic vector-valued function \f$f:~x\mapsto y\in\mathbb{R}^d\f$, where return value may have Jacobian attached
|
|
287
|
-
typedef std::function<arr(const arr& x)> fct;
|
|
287
|
+
// typedef std::function<arr(const arr& x)> fct;
|
|
288
288
|
typedef std::function<arr(const arr& x)> VectorFunction;
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
};
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
}
|
|
289
|
+
typedef std::function<double(arr& g, arr& H, const arr& x)> ScalarFunction;
|
|
290
|
+
|
|
291
|
+
// /// a scalar function \f$f:~x\mapsto y\in\mathbb{R}\f$ with optional gradient and hessian
|
|
292
|
+
// struct ScalarFunction {
|
|
293
|
+
// uint dim;
|
|
294
|
+
// virtual double f(arr& g, arr& H, const arr& x) = 0;
|
|
295
|
+
// virtual ~ScalarFunction() {}
|
|
296
|
+
// std::function<double(const arr& x)> cfunc(){ return [this](const arr& x){ return this->f(NoArr, NoArr, x); }; }
|
|
297
|
+
// };
|
|
298
|
+
|
|
299
|
+
// struct Conv_cfunc2ScalarFunction : ScalarFunction {
|
|
300
|
+
// std::function<double(arr& g, arr& H, const arr& x)> cfunc;
|
|
301
|
+
// Conv_cfunc2ScalarFunction(std::function<double(arr& g, arr& H, const arr& x)> _cfunc) : cfunc(_cfunc) {}
|
|
302
|
+
// double f(arr& g, arr& H, const arr& x){ return cfunc(g, H, x); }
|
|
303
|
+
// };
|
|
303
304
|
|
|
304
305
|
/// a kernel function
|
|
305
306
|
struct KernelFunction {
|
|
@@ -370,6 +371,7 @@ inline arr range(double lo, double hi, uint steps) { return rai::grid(1, lo, hi,
|
|
|
370
371
|
//inline uintA range(uint n) { uintA r; r.setStraightPerm(n); return r; }
|
|
371
372
|
|
|
372
373
|
arr repmat(const arr& A, uint m, uint n);
|
|
374
|
+
arr match(const arr& A, const uintA& shape);
|
|
373
375
|
|
|
374
376
|
//inline uintA randperm(uint n) { uintA z; z.setRandomPerm(n); return z; }
|
|
375
377
|
inline arr linspace(double base, double limit, uint n) { return rai::grid(1, base, limit, n).reshape(-1); }
|
|
@@ -437,11 +439,13 @@ arr reshapeColor(const arr& col, int d0=-1);
|
|
|
437
439
|
|
|
438
440
|
void scanArrFile(const char* name);
|
|
439
441
|
|
|
440
|
-
arr
|
|
441
|
-
arr
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
bool
|
|
442
|
+
arr finiteDifference_gradient(ScalarFunction f, const arr& x0, double y0, double eps=1e-8);
|
|
443
|
+
arr finiteDifference_jacobian(VectorFunction f, const arr& x0, const arr& y0, double eps=1e-8);
|
|
444
|
+
// arr finiteDifferenceGradient(ScalarFunction f, const arr& x, arr& Janalytic=NoArr, double eps=1e-8);
|
|
445
|
+
// arr finiteDifferenceJacobian(VectorFunction f, const arr& _x, arr& Janalytic=NoArr, double eps=1e-8);
|
|
446
|
+
bool checkGradient(ScalarFunction f, const arr& x, double tolerance, bool verbose=false);
|
|
447
|
+
bool checkHessian(ScalarFunction f, const arr& x, double tolerance, bool verbose=false);
|
|
448
|
+
bool checkJacobian(VectorFunction f, const arr& x, double tolerance, bool verbose=false, const StringA& featureNames= {});
|
|
445
449
|
void boundClip(arr& y, const arr& bounds);
|
|
446
450
|
bool boundCheck(const arr& x, const arr& bounds, double eps=1e-3, bool verbose=true);
|
|
447
451
|
|
|
@@ -494,8 +498,8 @@ double euclideanDistance(const arr& v, const arr& w);
|
|
|
494
498
|
double metricDistance(const arr& g, const arr& v, const arr& w);
|
|
495
499
|
|
|
496
500
|
//min max
|
|
497
|
-
arr max(const arr& v, uint
|
|
498
|
-
arr min(const arr& v, uint
|
|
501
|
+
arr max(const arr& v, uint axis);
|
|
502
|
+
arr min(const arr& v, uint axis);
|
|
499
503
|
uint argmin(const arr& x);
|
|
500
504
|
uint argmax(const arr& x);
|
|
501
505
|
void argmax(uint& i, uint& j, const arr& x);
|
|
@@ -505,7 +509,7 @@ double absMax(const arr& x);
|
|
|
505
509
|
double absMin(const arr& x);
|
|
506
510
|
|
|
507
511
|
double sum(const arr& v);
|
|
508
|
-
arr sum(const arr& v, uint
|
|
512
|
+
arr sum(const arr& v, uint axis);
|
|
509
513
|
double sumOfAbs(const arr& v);
|
|
510
514
|
double sumOfPos(const arr& v);
|
|
511
515
|
double sumOfSqr(const arr& v);
|
|
@@ -514,9 +518,9 @@ double product(const arr& v);
|
|
|
514
518
|
|
|
515
519
|
double trace(const arr& v);
|
|
516
520
|
double var(const arr& v);
|
|
517
|
-
arr mean(const arr& v);
|
|
521
|
+
arr mean(const arr& v, uint axis=0);
|
|
518
522
|
arr covar(const arr& X);
|
|
519
|
-
arr
|
|
523
|
+
arr vardiag(const arr& X);
|
|
520
524
|
void clip(const arr& x, double lo, double hi);
|
|
521
525
|
|
|
522
526
|
void op_transpose(arr& x, const arr& y);
|
robotic/include/rai/Geo/mesh.h
CHANGED
|
@@ -68,11 +68,11 @@ struct Mesh {
|
|
|
68
68
|
void setCapsule(double r, double l, uint fineness=2);
|
|
69
69
|
void setSSBox(double x_width, double y_width, double z_height, double r, uint fineness=2);
|
|
70
70
|
void setSSCvx(const arr& core, double r, uint fineness=2);
|
|
71
|
-
void setImplicitSurface(
|
|
71
|
+
void setImplicitSurface(ScalarFunction f, double lo=-10., double up=+10., uint res=100);
|
|
72
72
|
void setImplicitSurface(std::function<double(const arr& x)> f, const arr& bounds, uint res);
|
|
73
73
|
void setImplicitSurface(const arr& gridValues, const arr& size);
|
|
74
74
|
void setImplicitSurface(const floatA& gridValues, const arr& size);
|
|
75
|
-
void setImplicitSurfaceBySphereProjection(ScalarFunction
|
|
75
|
+
void setImplicitSurfaceBySphereProjection(ScalarFunction f, double rad, uint fineness=3);
|
|
76
76
|
Mesh& setRandom(uint vertices=10);
|
|
77
77
|
void setGrid(uint X, uint Y);
|
|
78
78
|
|