circe 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/circe.gemspec +24 -0
- data/data/face_detection_yunet_2022mar.onnx +0 -0
- data/data/yolov5s.onnx +0 -0
- data/ext/camera_model.h +560 -0
- data/ext/circe.cpp +404 -0
- data/ext/extconf.rb +15 -0
- data/ext/yolo.cpp +94 -0
- data/ext/yolo.h +48 -0
- data/ext/yunet.cpp +132 -0
- data/ext/yunet.h +38 -0
- data/lib/circe/version.rb +3 -0
- data/lib/circe.rb +13 -0
- metadata +60 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 683391e9186e4a20233cb25ce28f37b7bf56c88586b1ac72f1312081e4e48c7d
|
4
|
+
data.tar.gz: 680a92f2318df871cdd2f6f98732f85adab5098e067fe7c4fa986832e5f32caf
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 84422ff365869d1615c4cf17bbc035426e7a4cbfc5bd501104d5e1b2a5ea12535150de13e192b0a2849102fb0a49f7e174b2f9b804027614fc50f68858d139de
|
7
|
+
data.tar.gz: d679d1b61f155a5170426d007e8d06e73b5e01c12fd4dc12ab70ceddb29fc1c80a35ab178b83986c32f0f05e1b508d9e6cf56ce8fd4840bacbf1d67f96dc6312
|
data/circe.gemspec
ADDED
@@ -0,0 +1,24 @@
|
|
1
|
+
require_relative 'lib/circe/version'
|
2
|
+
|
3
|
+
Gem::Specification.new do |s|
|
4
|
+
s.name = 'circe'
|
5
|
+
s.version = Circe::VERSION
|
6
|
+
s.summary = "Face and object recognition"
|
7
|
+
s.description = <<~EOF
|
8
|
+
|
9
|
+
Face and object recognition
|
10
|
+
|
11
|
+
EOF
|
12
|
+
|
13
|
+
s.homepage = 'https://github.com/sdalu/ruby-circe'
|
14
|
+
s.license = 'MIT'
|
15
|
+
|
16
|
+
s.authors = [ "Stéphane D'Alu" ]
|
17
|
+
s.email = [ 'sdalu@sdalu.com' ]
|
18
|
+
|
19
|
+
s.extensions = [ 'ext/extconf.rb' ]
|
20
|
+
s.files = %w[ circe.gemspec ] +
|
21
|
+
Dir['ext/**/*.{cpp,h,rb}'] +
|
22
|
+
Dir['lib/**/*.rb' ] +
|
23
|
+
Dir['data/*.onnx' ]
|
24
|
+
end
|
Binary file
|
data/data/yolov5s.onnx
ADDED
Binary file
|
data/ext/camera_model.h
ADDED
@@ -0,0 +1,560 @@
|
|
1
|
+
/* Copyright 2021 iwatake2222
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef CAMERA_MODEL_
|
16
|
+
#define CAMERA_MODEL_
|
17
|
+
|
18
|
+
/*** Include ***/
|
19
|
+
#include <cstdio>
|
20
|
+
#include <cstdlib>
|
21
|
+
#include <cstring>
|
22
|
+
#define _USE_MATH_DEFINES
|
23
|
+
#include <cmath>
|
24
|
+
#include <string>
|
25
|
+
#include <vector>
|
26
|
+
#include <array>
|
27
|
+
|
28
|
+
#include <opencv2/opencv.hpp>
|
29
|
+
#ifdef _OPENMP
|
30
|
+
#include <omp.h>
|
31
|
+
#endif
|
32
|
+
|
33
|
+
#ifndef M_PI
|
34
|
+
#define M_PI 3.141592653f
|
35
|
+
#endif
|
36
|
+
|
37
|
+
static inline float Deg2Rad(float deg) { return static_cast<float>(deg * M_PI / 180.0); }
|
38
|
+
static inline float Rad2Deg(float rad) { return static_cast<float>(rad * 180.0 / M_PI); }
|
39
|
+
static inline float FocalLength(int32_t image_size, float fov)
|
40
|
+
{
|
41
|
+
/* (w/2) / f = tan(fov/2) */
|
42
|
+
return (image_size / 2) / std::tan(Deg2Rad(fov / 2));
|
43
|
+
}
|
44
|
+
|
45
|
+
class CameraModel {
|
46
|
+
/***
|
47
|
+
* s[x, y, 1] = K * [R t] * [Mw, 1]
|
48
|
+
* K: カメラの内部パラメータ
|
49
|
+
* [R t]: カメラの外部パラメータ
|
50
|
+
* R: ワールド座標上でのカメラの回転行列 (カメラの姿勢)
|
51
|
+
* t: カメラ座標上での、カメラ位置(Oc)からワールド座標原点(Ow)へのベクトル= Ow - Oc
|
52
|
+
* = -RT (T = ワールド座標上でのOwからOcへのベクトル)
|
53
|
+
* Mw: ワールド座標上での対象物体の座標 (Xw, Yw, Zw)
|
54
|
+
* s[x, y, 1] = K * Mc
|
55
|
+
* Mc: カメラ座標上での対象物体の座標 (Xc, Yc, Zc)
|
56
|
+
* = [R t] * [Mw, 1]
|
57
|
+
*
|
58
|
+
* 理由: Mc = R(Mw - T) = RMw - RT = RMw + t (t = -RT)
|
59
|
+
*
|
60
|
+
* 注意1: tはカメラ座標上でのベクトルである。そのため、Rを変更した場合はtを再計算する必要がある
|
61
|
+
*
|
62
|
+
* 注意2: 座標系は右手系。X+ = 右、Y+ = 下、Z+ = 奥 (例. カメラから見て物体が上にある場合、Ycは負値)
|
63
|
+
***/
|
64
|
+
|
65
|
+
public:
|
66
|
+
/*** Intrinsic parameters ***/
|
67
|
+
/* float, 3 x 3 */
|
68
|
+
cv::Mat K;
|
69
|
+
cv::Mat K_new;
|
70
|
+
|
71
|
+
int32_t width;
|
72
|
+
int32_t height;
|
73
|
+
|
74
|
+
/* float, 5 x 1 */
|
75
|
+
cv::Mat dist_coeff;
|
76
|
+
|
77
|
+
/*** Extrinsic parameters ***/
|
78
|
+
/* float, 3 x 1, pitch(rx), yaw(ry), roll(rz) [rad] */
|
79
|
+
cv::Mat rvec;
|
80
|
+
|
81
|
+
/* float, 3 x 1, (tx, ty, tz): horizontal, vertical, depth (Camera location: Ow - Oc in camera coordinate) */
|
82
|
+
cv::Mat tvec;
|
83
|
+
|
84
|
+
public:
|
85
|
+
CameraModel() {
|
86
|
+
/* Default Parameters */
|
87
|
+
SetIntrinsic(1280, 720, 500.0f);
|
88
|
+
SetDist({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f });
|
89
|
+
//SetDist({ -0.1f, 0.01f, -0.005f, -0.001f, 0.0f });
|
90
|
+
SetExtrinsic({ 0, 0, 0 }, { 0, 0, 0 });
|
91
|
+
}
|
92
|
+
|
93
|
+
/*** Accessor for camera parameters ***/
|
94
|
+
float& rx() { return rvec.at<float>(0); } /* pitch */
|
95
|
+
float& ry() { return rvec.at<float>(1); } /* yaw */
|
96
|
+
float& rz() { return rvec.at<float>(2); } /* roll */
|
97
|
+
float& tx() { return tvec.at<float>(0); }
|
98
|
+
float& ty() { return tvec.at<float>(1); }
|
99
|
+
float& tz() { return tvec.at<float>(2); }
|
100
|
+
float& fx() { return K.at<float>(0); }
|
101
|
+
float& cx() { return K.at<float>(2); }
|
102
|
+
float& fy() { return K.at<float>(4); }
|
103
|
+
float& cy() { return K.at<float>(5); }
|
104
|
+
|
105
|
+
|
106
|
+
/*** Methods for camera parameters ***/
|
107
|
+
void SetIntrinsic(int32_t width, int32_t height, float focal_length) {
|
108
|
+
this->width = width;
|
109
|
+
this->height = height;
|
110
|
+
this->K = (cv::Mat_<float>(3, 3) <<
|
111
|
+
focal_length, 0, width / 2.f,
|
112
|
+
0, focal_length, height / 2.f,
|
113
|
+
0, 0, 1);
|
114
|
+
UpdateNewCameraMatrix();
|
115
|
+
}
|
116
|
+
|
117
|
+
void SetDist(const std::array<float, 5>& dist) {
|
118
|
+
this->dist_coeff = (cv::Mat_<float>(5, 1) << dist[0], dist[1], dist[2], dist[3], dist[4]);
|
119
|
+
UpdateNewCameraMatrix();
|
120
|
+
}
|
121
|
+
|
122
|
+
void UpdateNewCameraMatrix()
|
123
|
+
{
|
124
|
+
if (!this->K.empty() && !this->dist_coeff.empty()) {
|
125
|
+
this->K_new = cv::getOptimalNewCameraMatrix(this->K, this->dist_coeff, cv::Size(this->width, this->height), 0.0);
|
126
|
+
}
|
127
|
+
}
|
128
|
+
|
129
|
+
void SetExtrinsic(const std::array<float, 3>& rvec_deg, const std::array<float, 3>& tvec, bool is_t_on_world = true)
|
130
|
+
{
|
131
|
+
this->rvec = (cv::Mat_<float>(3, 1) << Deg2Rad(rvec_deg[0]), Deg2Rad(rvec_deg[1]), Deg2Rad(rvec_deg[2]));
|
132
|
+
this->tvec = (cv::Mat_<float>(3, 1) << tvec[0], tvec[1], tvec[2]);
|
133
|
+
|
134
|
+
/*
|
135
|
+
is_t_on_world == true: tvec = T (Oc - Ow in world coordinate)
|
136
|
+
is_t_on_world == false: tvec = tvec (Ow - Oc in camera coordinate)
|
137
|
+
*/
|
138
|
+
if (is_t_on_world) {
|
139
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
140
|
+
this->tvec = -R * this->tvec; /* t = -RT */
|
141
|
+
}
|
142
|
+
}
|
143
|
+
|
144
|
+
void GetExtrinsic(std::array<float, 3>& rvec_deg, std::array<float, 3>& tvec, bool is_t_on_world = true)
|
145
|
+
{
|
146
|
+
rvec_deg = { Rad2Deg(this->rvec.at<float>(0)), Rad2Deg(this->rvec.at<float>(1)) , Rad2Deg(this->rvec.at<float>(2)) };
|
147
|
+
tvec = { this->tvec.at<float>(0), this->tvec.at<float>(1), this->tvec.at<float>(2) };
|
148
|
+
/*
|
149
|
+
is_t_on_world == true: tvec = T (Oc - Ow in world coordinate)
|
150
|
+
is_t_on_world == false: tvec = tvec (Ow - Oc in camera coordinate)
|
151
|
+
*/
|
152
|
+
if (is_t_on_world) {
|
153
|
+
/* t = -RT -> T = -R^1 t */
|
154
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
155
|
+
cv::Mat R_inv;
|
156
|
+
cv::invert(R, R_inv);
|
157
|
+
cv::Mat T = -R_inv * this->tvec;
|
158
|
+
tvec = { T.at<float>(0), T.at<float>(1), T.at<float>(2) };
|
159
|
+
} else {
|
160
|
+
tvec = { this->tvec.at<float>(0), this->tvec.at<float>(1), this->tvec.at<float>(2) };
|
161
|
+
}
|
162
|
+
|
163
|
+
}
|
164
|
+
|
165
|
+
void SetCameraPos(float tx, float ty, float tz, bool is_on_world = true) /* Oc - Ow */
|
166
|
+
{
|
167
|
+
this->tvec = (cv::Mat_<float>(3, 1) << tx, ty, tz);
|
168
|
+
if (is_on_world) {
|
169
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
170
|
+
this->tvec = -R * this->tvec; /* t = -RT */
|
171
|
+
} else {
|
172
|
+
/* Oc - Ow -> Ow - Oc */
|
173
|
+
this->tvec *= -1;
|
174
|
+
}
|
175
|
+
}
|
176
|
+
|
177
|
+
void MoveCameraPos(float dtx, float dty, float dtz, bool is_on_world = true) /* Oc - Ow */
|
178
|
+
{
|
179
|
+
cv::Mat tvec_delta = (cv::Mat_<float>(3, 1) << dtx, dty, dtz);
|
180
|
+
if (is_on_world) {
|
181
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
182
|
+
tvec_delta = -R * tvec_delta;
|
183
|
+
} else {
|
184
|
+
/* Oc - Ow -> Ow - Oc */
|
185
|
+
tvec_delta *= -1;
|
186
|
+
}
|
187
|
+
this->tvec += tvec_delta;
|
188
|
+
}
|
189
|
+
|
190
|
+
void SetCameraAngle(float pitch_deg, float yaw_deg, float roll_deg)
|
191
|
+
{
|
192
|
+
/* t vec is vector in camera coordinate, so need to re-calculate it when rvec is updated */
|
193
|
+
cv::Mat R_old = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
194
|
+
cv::Mat T = -R_old.inv() * this->tvec; /* T is tvec in world coordinate. t = -RT */
|
195
|
+
this->rvec = (cv::Mat_<float>(3, 1) << Deg2Rad(pitch_deg), Deg2Rad(yaw_deg), Deg2Rad(roll_deg));
|
196
|
+
cv::Mat R_new = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
197
|
+
this->tvec = -R_new * T; /* t = -RT */
|
198
|
+
}
|
199
|
+
|
200
|
+
void RotateCameraAngle(float dpitch_deg, float dyaw_deg, float droll_deg)
|
201
|
+
{
|
202
|
+
/* t vec is vector in camera coordinate, so need to re-calculate it when rvec is updated */
|
203
|
+
cv::Mat R_old = MakeRotationMat(Rad2Deg(rx()), Rad2Deg(ry()), Rad2Deg(rz()));
|
204
|
+
cv::Mat T = -R_old.inv() * this->tvec; /* T is tvec in world coordinate. t = -RT */
|
205
|
+
cv::Mat R_delta = MakeRotationMat(dpitch_deg, dyaw_deg, droll_deg);
|
206
|
+
cv::Mat R_new = R_delta * R_old;
|
207
|
+
this->tvec = -R_new * T; /* t = -RT */
|
208
|
+
cv::Rodrigues(R_new, this->rvec); /* Rotation matrix -> rvec */
|
209
|
+
}
|
210
|
+
|
211
|
+
/*** Methods for projection ***/
|
212
|
+
void ConvertWorld2Image(const cv::Point3f& object_point, cv::Point2f& image_point)
|
213
|
+
{
|
214
|
+
std::vector<cv::Point3f> object_point_list = { object_point };
|
215
|
+
std::vector<cv::Point2f> image_point_list;
|
216
|
+
ConvertWorld2Image(object_point_list, image_point_list);
|
217
|
+
image_point = image_point_list[0];
|
218
|
+
}
|
219
|
+
|
220
|
+
void ConvertWorld2Image(const std::vector<cv::Point3f>& object_point_list, std::vector<cv::Point2f>& image_point_list)
|
221
|
+
{
|
222
|
+
/*** Mw -> Image ***/
|
223
|
+
/* the followings get exactly the same result */
|
224
|
+
#if 1
|
225
|
+
/*** Projection ***/
|
226
|
+
/* s[x, y, 1] = K * [R t] * [M, 1] = K * M_from_cam */
|
227
|
+
cv::Mat K = this->K;
|
228
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(this->rx()), Rad2Deg(this->ry()), Rad2Deg(this->rz()));
|
229
|
+
cv::Mat Rt = (cv::Mat_<float>(3, 4) <<
|
230
|
+
R.at<float>(0), R.at<float>(1), R.at<float>(2), this->tx(),
|
231
|
+
R.at<float>(3), R.at<float>(4), R.at<float>(5), this->ty(),
|
232
|
+
R.at<float>(6), R.at<float>(7), R.at<float>(8), this->tz());
|
233
|
+
|
234
|
+
image_point_list.resize(object_point_list.size());
|
235
|
+
|
236
|
+
#ifdef _OPENMP
|
237
|
+
#pragma omp parallel for
|
238
|
+
#endif
|
239
|
+
for (int32_t i = 0; i < object_point_list.size(); i++) {
|
240
|
+
const auto& object_point = object_point_list[i];
|
241
|
+
auto& image_point = image_point_list[i];
|
242
|
+
cv::Mat Mw = (cv::Mat_<float>(4, 1) << object_point.x, object_point.y, object_point.z, 1);
|
243
|
+
cv::Mat Mc = Rt * Mw;
|
244
|
+
float Zc = Mc.at<float>(2);
|
245
|
+
if (Zc <= 0) {
|
246
|
+
/* Do not project points behind the camera */
|
247
|
+
image_point = cv::Point2f(-1, -1);
|
248
|
+
continue;
|
249
|
+
}
|
250
|
+
|
251
|
+
cv::Mat XY = K * Mc;
|
252
|
+
float x = XY.at<float>(0);
|
253
|
+
float y = XY.at<float>(1);
|
254
|
+
float s = XY.at<float>(2);
|
255
|
+
x /= s;
|
256
|
+
y /= s;
|
257
|
+
|
258
|
+
if (this->dist_coeff.empty() || this->dist_coeff.at<float>(0) == 0) {
|
259
|
+
image_point.x = x;
|
260
|
+
image_point.y = y;
|
261
|
+
} else {
|
262
|
+
/*** Distort ***/
|
263
|
+
float u = (x - this->cx()) / this->fx(); /* from optical center*/
|
264
|
+
float v = (y - this->cy()) / this->fy(); /* from optical center*/
|
265
|
+
float r2 = u * u + v * v;
|
266
|
+
float r4 = r2 * r2;
|
267
|
+
float k1 = this->dist_coeff.at<float>(0);
|
268
|
+
float k2 = this->dist_coeff.at<float>(1);
|
269
|
+
float p1 = this->dist_coeff.at<float>(3);
|
270
|
+
float p2 = this->dist_coeff.at<float>(4);
|
271
|
+
u = u + u * (k1 * r2 + k2 * r4 /*+ k3 * r6 */) + (2 * p1 * u * v) + p2 * (r2 + 2 * u * u);
|
272
|
+
v = v + v * (k1 * r2 + k2 * r4 /*+ k3 * r6 */) + (2 * p2 * u * v) + p1 * (r2 + 2 * v * v);
|
273
|
+
image_point.x = u * this->fx() + this->cx();
|
274
|
+
image_point.y = v * this->fy() + this->cy();
|
275
|
+
}
|
276
|
+
}
|
277
|
+
#else
|
278
|
+
cv::projectPoints(object_point_list, this->rvec, this->tvec, this->K, this->dist_coeff, image_point_list);
|
279
|
+
#endif
|
280
|
+
}
|
281
|
+
|
282
|
+
void ConvertWorld2Camera(const std::vector<cv::Point3f>& object_point_in_world_list, std::vector<cv::Point3f>& object_point_in_camera_list)
|
283
|
+
{
|
284
|
+
/*** Mw -> Mc ***/
|
285
|
+
/* Mc = [R t] * [M, 1] */
|
286
|
+
cv::Mat K = this->K;
|
287
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(this->rx()), Rad2Deg(this->ry()), Rad2Deg(this->rz()));
|
288
|
+
cv::Mat Rt = (cv::Mat_<float>(3, 4) <<
|
289
|
+
R.at<float>(0), R.at<float>(1), R.at<float>(2), this->tx(),
|
290
|
+
R.at<float>(3), R.at<float>(4), R.at<float>(5), this->ty(),
|
291
|
+
R.at<float>(6), R.at<float>(7), R.at<float>(8), this->tz());
|
292
|
+
|
293
|
+
object_point_in_camera_list.resize(object_point_in_world_list.size());
|
294
|
+
|
295
|
+
#ifdef _OPENMP
|
296
|
+
#pragma omp parallel for
|
297
|
+
#endif
|
298
|
+
for (int32_t i = 0; i < object_point_in_world_list.size(); i++) {
|
299
|
+
const auto& object_point_in_world = object_point_in_world_list[i];
|
300
|
+
auto& object_point_in_camera = object_point_in_camera_list[i];
|
301
|
+
cv::Mat Mw = (cv::Mat_<float>(4, 1) << object_point_in_world.x, object_point_in_world.y, object_point_in_world.z, 1);
|
302
|
+
cv::Mat Mc = Rt * Mw;
|
303
|
+
object_point_in_camera.x = Mc.at<float>(0);
|
304
|
+
object_point_in_camera.y = Mc.at<float>(1);
|
305
|
+
object_point_in_camera.z = Mc.at<float>(2);
|
306
|
+
}
|
307
|
+
}
|
308
|
+
|
309
|
+
void ConvertCamera2World(const std::vector<cv::Point3f>& object_point_in_camera_list, std::vector<cv::Point3f>& object_point_in_world_list)
|
310
|
+
{
|
311
|
+
/*** Mc -> Mw ***/
|
312
|
+
/* Mc = [R t] * [Mw, 1] */
|
313
|
+
/* -> [M, 1] = [R t]^1 * Mc <- Unable to get the inverse of [R t] because it's 4x3 */
|
314
|
+
/* So, Mc = R * Mw + t */
|
315
|
+
/* -> Mw = R^1 * (Mc - t) */
|
316
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(this->rx()), Rad2Deg(this->ry()), Rad2Deg(this->rz()));
|
317
|
+
cv::Mat R_inv;
|
318
|
+
cv::invert(R, R_inv);
|
319
|
+
cv::Mat t = this->tvec;
|
320
|
+
|
321
|
+
|
322
|
+
object_point_in_world_list.resize(object_point_in_camera_list.size());
|
323
|
+
|
324
|
+
#ifdef _OPENMP
|
325
|
+
#pragma omp parallel for
|
326
|
+
#endif
|
327
|
+
for (int32_t i = 0; i < object_point_in_camera_list.size(); i++) {
|
328
|
+
const auto& object_point_in_camera = object_point_in_camera_list[i];
|
329
|
+
auto& object_point_in_world = object_point_in_world_list[i];
|
330
|
+
cv::Mat Mc = (cv::Mat_<float>(3, 1) << object_point_in_camera.x, object_point_in_camera.y, object_point_in_camera.z);
|
331
|
+
cv::Mat Mw = R_inv * (Mc - t);
|
332
|
+
object_point_in_world.x = Mw.at<float>(0);
|
333
|
+
object_point_in_world.y = Mw.at<float>(1);
|
334
|
+
object_point_in_world.z = Mw.at<float>(2);
|
335
|
+
}
|
336
|
+
}
|
337
|
+
|
338
|
+
void ConvertImage2GroundPlane(const std::vector<cv::Point2f>& image_point_list, std::vector<cv::Point3f>& object_point_list)
|
339
|
+
{
|
340
|
+
/*** Image -> Mw ***/
|
341
|
+
/*** Calculate point in ground plane (in world coordinate) ***/
|
342
|
+
/* Main idea:*/
|
343
|
+
/* s * [x, y, 1] = K * [R t] * [M, 1] */
|
344
|
+
/* s * [x, y, 1] = K * R * M + K * t */
|
345
|
+
/* s * Kinv * [x, y, 1] = R * M + t */
|
346
|
+
/* s * Kinv * [x, y, 1] - t = R * M */
|
347
|
+
/* Rinv * (s * Kinv * [x, y, 1] - t) = M */
|
348
|
+
/* calculate s */
|
349
|
+
/* s * Rinv * Kinv * [x, y, 1] = M + R_inv * t */
|
350
|
+
/* where, M = (X, Y, Z), and we assume Y = 0(ground_plane) */
|
351
|
+
/* so , we can solve left[1] = R_inv * t[1](camera_height) */
|
352
|
+
|
353
|
+
if (image_point_list.size() == 0) return;
|
354
|
+
|
355
|
+
cv::Mat K = this->K;
|
356
|
+
cv::Mat R = MakeRotationMat(Rad2Deg(this->rx()), Rad2Deg(this->ry()), Rad2Deg(this->rz()));
|
357
|
+
cv::Mat K_inv;
|
358
|
+
cv::invert(K, K_inv);
|
359
|
+
cv::Mat R_inv;
|
360
|
+
cv::invert(R, R_inv);
|
361
|
+
cv::Mat t = this->tvec;
|
362
|
+
|
363
|
+
/*** Undistort image point ***/
|
364
|
+
std::vector<cv::Point2f> image_point_undistort;
|
365
|
+
if (this->dist_coeff.empty() || this->dist_coeff.at<float>(0) == 0) {
|
366
|
+
image_point_undistort = image_point_list;
|
367
|
+
} else {
|
368
|
+
cv::undistortPoints(image_point_list, image_point_undistort, this->K, this->dist_coeff, this->K); /* don't use K_new */
|
369
|
+
}
|
370
|
+
|
371
|
+
object_point_list.resize(image_point_list.size());
|
372
|
+
for (int32_t i = 0; i < object_point_list.size(); i++) {
|
373
|
+
const auto& image_point = image_point_list[i];
|
374
|
+
auto& object_point = object_point_list[i];
|
375
|
+
|
376
|
+
float x = image_point_undistort[i].x;
|
377
|
+
float y = image_point_undistort[i].y;
|
378
|
+
if (y < EstimateVanishmentY()) {
|
379
|
+
object_point.x = 999;
|
380
|
+
object_point.y = 999;
|
381
|
+
object_point.z = 999;
|
382
|
+
continue;
|
383
|
+
}
|
384
|
+
|
385
|
+
cv::Mat XY = (cv::Mat_<float>(3, 1) << x, y, 1);
|
386
|
+
|
387
|
+
/* calculate s */
|
388
|
+
cv::Mat LEFT_WO_S = R_inv * K_inv * XY;
|
389
|
+
cv::Mat RIGHT_WO_M = R_inv * t; /* no need to add M because M[1] = 0 (ground plane)*/
|
390
|
+
float s = RIGHT_WO_M.at<float>(1) / LEFT_WO_S.at<float>(1);
|
391
|
+
|
392
|
+
/* calculate M */
|
393
|
+
cv::Mat TEMP = R_inv * (s * K_inv * XY - t);
|
394
|
+
|
395
|
+
object_point.x = TEMP.at<float>(0);
|
396
|
+
object_point.y = TEMP.at<float>(1);
|
397
|
+
object_point.z = TEMP.at<float>(2);
|
398
|
+
if (object_point.z < 0) object_point.z = 999;
|
399
|
+
}
|
400
|
+
}
|
401
|
+
|
402
|
+
void ConvertImage2Camera(std::vector<cv::Point2f>& image_point_list, const std::vector<float>& z_list, std::vector<cv::Point3f>& object_point_list)
|
403
|
+
{
|
404
|
+
/*** Image -> Mc ***/
|
405
|
+
if (image_point_list.size() == 0) {
|
406
|
+
/* Convert for all pixels on image, when image_point_list = empty */
|
407
|
+
if (z_list.size() != this->width * this->height) {
|
408
|
+
printf("[ConvertImage2Camera] Invalid z_list size\n");
|
409
|
+
return;
|
410
|
+
}
|
411
|
+
/* Generate the original image point mat */
|
412
|
+
/* todo: no need to generate every time */
|
413
|
+
for (int32_t y = 0; y < this->height; y++) {
|
414
|
+
for (int32_t x = 0; x < this->width; x++) {
|
415
|
+
image_point_list.push_back(cv::Point2f(float(x), float(y)));
|
416
|
+
}
|
417
|
+
}
|
418
|
+
} else {
|
419
|
+
/* Convert for the input pixels only */
|
420
|
+
if (z_list.size() != image_point_list.size()) {
|
421
|
+
printf("[ConvertImage2Camera] Invalid z_list size\n");
|
422
|
+
return;
|
423
|
+
}
|
424
|
+
}
|
425
|
+
|
426
|
+
/*** Undistort image point ***/
|
427
|
+
std::vector<cv::Point2f> image_point_undistort;
|
428
|
+
if (this->dist_coeff.empty() || this->dist_coeff.at<float>(0) == 0) {
|
429
|
+
image_point_undistort = image_point_list; /* todo: redundant copy. I do this just because to make code simple */
|
430
|
+
} else {
|
431
|
+
cv::undistortPoints(image_point_list, image_point_undistort, this->K, this->dist_coeff, this->K); /* don't use K_new */
|
432
|
+
}
|
433
|
+
|
434
|
+
object_point_list.resize(image_point_list.size());
|
435
|
+
#ifdef _OPENMP
|
436
|
+
#pragma omp parallel for
|
437
|
+
#endif
|
438
|
+
for (int32_t i = 0; i < object_point_list.size(); i++) {
|
439
|
+
const auto& Zc = z_list[i];
|
440
|
+
auto& object_point = object_point_list[i];
|
441
|
+
|
442
|
+
float x = image_point_undistort[i].x;
|
443
|
+
float y = image_point_undistort[i].y;
|
444
|
+
|
445
|
+
float u = x - this->cx();
|
446
|
+
float v = y - this->cy();
|
447
|
+
float Xc = Zc * u / this->fx();
|
448
|
+
float Yc = Zc * v / this->fy();
|
449
|
+
object_point.x = Xc;
|
450
|
+
object_point.y = Yc;
|
451
|
+
object_point.z = Zc;
|
452
|
+
}
|
453
|
+
}
|
454
|
+
|
455
|
+
void ConvertImage2World(std::vector<cv::Point2f>& image_point_list, const std::vector<float>& z_list, std::vector<cv::Point3f>& object_point_list)
|
456
|
+
{
|
457
|
+
/*** Image -> Mw ***/
|
458
|
+
std::vector<cv::Point3f> object_point_in_camera_list;
|
459
|
+
ConvertImage2Camera(image_point_list, z_list, object_point_in_camera_list);
|
460
|
+
ConvertCamera2World(object_point_in_camera_list, object_point_list);
|
461
|
+
}
|
462
|
+
|
463
|
+
|
464
|
+
/* tan(theta) = delta / f */
|
465
|
+
float EstimatePitch(float vanishment_y)
|
466
|
+
{
|
467
|
+
float pitch = std::atan2(this->cy() - vanishment_y, this->fy());
|
468
|
+
return Rad2Deg(pitch);
|
469
|
+
}
|
470
|
+
|
471
|
+
float EstimateYaw(float vanishment_x)
|
472
|
+
{
|
473
|
+
float yaw = std::atan2(this->cx() - vanishment_x, this->fx());
|
474
|
+
return Rad2Deg(yaw);
|
475
|
+
}
|
476
|
+
|
477
|
+
int32_t EstimateVanishmentY()
|
478
|
+
{
|
479
|
+
float fy = this->fy();
|
480
|
+
float cy = this->cy();
|
481
|
+
//float fy = this->K_new.at<float>(4);
|
482
|
+
//float cy = this->K_new.at<float>(5);
|
483
|
+
float px_from_center = std::tan(this->rx()) * fy;
|
484
|
+
float vanishment_y = cy - px_from_center;
|
485
|
+
return static_cast<int32_t>(vanishment_y);
|
486
|
+
}
|
487
|
+
|
488
|
+
int32_t EstimateVanishmentX()
|
489
|
+
{
|
490
|
+
float px_from_center = std::tan(this->ry()) * this->fx();
|
491
|
+
float vanishment_x = this->cx() - px_from_center;
|
492
|
+
return static_cast<int32_t>(vanishment_x);
|
493
|
+
}
|
494
|
+
|
495
|
+
|
496
|
+
/*** Other methods ***/
|
497
|
+
template <typename T = float>
|
498
|
+
static void PRINT_MAT_FLOAT(const cv::Mat& mat, int32_t size)
|
499
|
+
{
|
500
|
+
for (int32_t i = 0; i < size; i++) {
|
501
|
+
printf("%d: %.3f\n", i, mat.at<T>(i));
|
502
|
+
}
|
503
|
+
}
|
504
|
+
|
505
|
+
template <typename T = float>
|
506
|
+
static cv::Mat MakeRotationMat(T x_deg, T y_deg, T z_deg)
|
507
|
+
{
|
508
|
+
T x_rad = Deg2Rad(x_deg);
|
509
|
+
T y_rad = Deg2Rad(y_deg);
|
510
|
+
T z_rad = Deg2Rad(z_deg);
|
511
|
+
#if 0
|
512
|
+
/* Rotation Matrix with Euler Angle */
|
513
|
+
cv::Mat R_x = (cv::Mat_<T>(3, 3) <<
|
514
|
+
1, 0, 0,
|
515
|
+
0, std::cos(x_rad), -std::sin(x_rad),
|
516
|
+
0, std::sin(x_rad), std::cos(x_rad));
|
517
|
+
|
518
|
+
cv::Mat R_y = (cv::Mat_<T>(3, 3) <<
|
519
|
+
std::cos(y_rad), 0, std::sin(y_rad),
|
520
|
+
0, 1, 0,
|
521
|
+
-std::sin(y_rad), 0, std::cos(y_rad));
|
522
|
+
|
523
|
+
cv::Mat R_z = (cv::Mat_<T>(3, 3) <<
|
524
|
+
std::cos(z_rad), -std::sin(z_rad), 0,
|
525
|
+
std::sin(z_rad), std::cos(z_rad), 0,
|
526
|
+
0, 0, 1);
|
527
|
+
|
528
|
+
cv::Mat R = R_z * R_x * R_y;
|
529
|
+
#else
|
530
|
+
/* Rodrigues */
|
531
|
+
cv::Mat rvec = (cv::Mat_<T>(3, 1) << x_rad, y_rad, z_rad);
|
532
|
+
cv::Mat R;
|
533
|
+
cv::Rodrigues(rvec, R);
|
534
|
+
#endif
|
535
|
+
return R;
|
536
|
+
}
|
537
|
+
|
538
|
+
static void RotateObject(float x_deg, float y_deg, float z_deg, std::vector<cv::Point3f>& object_point_list)
|
539
|
+
{
|
540
|
+
cv::Mat R = MakeRotationMat(x_deg, y_deg, z_deg);
|
541
|
+
for (auto& object_point : object_point_list) {
|
542
|
+
cv::Mat p = (cv::Mat_<float>(3, 1) << object_point.x, object_point.y, object_point.z);
|
543
|
+
p = R * p;
|
544
|
+
object_point.x = p.at<float>(0);
|
545
|
+
object_point.y = p.at<float>(1);
|
546
|
+
object_point.z = p.at<float>(2);
|
547
|
+
}
|
548
|
+
}
|
549
|
+
|
550
|
+
static void MoveObject(float x, float y, float z, std::vector<cv::Point3f>& object_point_list)
|
551
|
+
{
|
552
|
+
for (auto& object_point : object_point_list) {
|
553
|
+
object_point.x += x;
|
554
|
+
object_point.y += y;
|
555
|
+
object_point.z += z;
|
556
|
+
}
|
557
|
+
}
|
558
|
+
};
|
559
|
+
|
560
|
+
#endif
|