Commit fe6558eb authored by Pete Black's avatar Pete Black

code format, misc minor changes

parent 21dc82cd
Pipeline #66758 passed with stages
in 1 minute and 19 seconds
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief OpenCV calibration helpers.
* @author Pete Black <pblack@collabora.com>
*/
#ifndef T_CALIBRATION_H
#define T_CALIBRATION_H
#include <opencv2/opencv.hpp>
#include <sys/stat.h>
#ifdef __cplusplus
extern "C" {
#endif
struct opencv_calibration_params
{
cv::Mat l_intrinsics;
cv::Mat l_distortion;
cv::Mat l_distortion_fisheye;
cv::Mat l_translation;
cv::Mat l_rotation;
cv::Mat l_projection;
cv::Mat r_intrinsics;
cv::Mat r_distortion;
cv::Mat r_distortion_fisheye;
cv::Mat r_translation;
cv::Mat r_rotation;
cv::Mat r_projection;
cv::Mat disparity_to_depth;
cv::Mat mat_image_size;
};
static bool
write_cv_mat(FILE* f, cv::Mat* m)
{
uint32_t header[3];
header[0] = static_cast<uint32_t>(m->elemSize());
header[1] = static_cast<uint32_t>(m->rows);
header[2] = static_cast<uint32_t>(m->cols);
fwrite(static_cast<void*>(header), sizeof(uint32_t), 3, f);
fwrite(static_cast<void*>(m->data), header[0], header[1] * header[2],
f);
return true;
}
static bool
read_cv_mat(FILE* f, cv::Mat* m)
{
uint32_t header[3];
fread(static_cast<void*>(header), sizeof(uint32_t), 3, f);
// TODO: we may have written things other than CV_32F and CV_64F.
if (header[0] == 4) {
m->create(static_cast<int>(header[1]),
static_cast<int>(header[2]), CV_32F);
} else {
m->create(static_cast<int>(header[1]),
static_cast<int>(header[2]), CV_64F);
}
fread(static_cast<void*>(m->data), header[0], header[1] * header[2], f);
return true;
}
XRT_MAYBE_UNUSED static bool
calibration_get_stereo(char* configuration_filename,
uint32_t frame_w,
uint32_t frame_h,
bool use_fisheye,
cv::Mat* l_undistort_map_x,
cv::Mat* l_undistort_map_y,
cv::Mat* l_rectify_map_x,
cv::Mat* l_rectify_map_y,
cv::Mat* r_undistort_map_x,
cv::Mat* r_undistort_map_y,
cv::Mat* r_rectify_map_x,
cv::Mat* r_rectify_map_y,
cv::Mat* disparity_to_depth)
{
struct opencv_calibration_params cp;
cv::Mat zero_distortion = cv::Mat(5, 1, CV_32F, cv::Scalar(0.0f));
char path_string[256]; // TODO: 256 maybe not enough
// TODO: use multiple env vars?
char* config_path = secure_getenv("HOME");
snprintf(path_string, 256, "%s/.config/monado/%s.calibration",
config_path, configuration_filename); // TODO: hardcoded 256
FILE* calib_file = fopen(path_string, "rb");
if (calib_file) {
// read our calibration from this file
read_cv_mat(calib_file, &cp.l_intrinsics);
read_cv_mat(calib_file, &cp.r_intrinsics);
read_cv_mat(calib_file, &cp.l_distortion);
read_cv_mat(calib_file, &cp.r_distortion);
read_cv_mat(calib_file, &cp.l_distortion_fisheye);
read_cv_mat(calib_file, &cp.r_distortion_fisheye);
read_cv_mat(calib_file, &cp.l_rotation);
read_cv_mat(calib_file, &cp.r_rotation);
read_cv_mat(calib_file, &cp.l_translation);
read_cv_mat(calib_file, &cp.r_translation);
read_cv_mat(calib_file, &cp.l_projection);
read_cv_mat(calib_file, &cp.r_projection);
read_cv_mat(calib_file,
disparity_to_depth); // provided by caller
read_cv_mat(calib_file, &cp.mat_image_size);
// TODO: scale our intrinsics if the frame size we request
// calibration for does not match what was saved
cv::Size image_size(int(cp.mat_image_size.at<float>(0, 0)),
int(cp.mat_image_size.at<float>(0, 1)));
// generate our undistortion maps - handle fisheye or
// rectilinear sources
if (use_fisheye) {
cv::fisheye::initUndistortRectifyMap(
cp.l_intrinsics, cp.l_distortion_fisheye,
cv::noArray(), cp.l_intrinsics, image_size,
CV_32FC1, *l_undistort_map_x, *l_undistort_map_y);
cv::fisheye::initUndistortRectifyMap(
cp.r_intrinsics, cp.r_distortion_fisheye,
cv::noArray(), cp.r_intrinsics, image_size,
CV_32FC1, *r_undistort_map_x, *r_undistort_map_y);
} else {
cv::initUndistortRectifyMap(
cp.l_intrinsics, cp.l_distortion, cv::noArray(),
cp.l_intrinsics, image_size, CV_32FC1,
*l_undistort_map_x, *l_undistort_map_y);
cv::initUndistortRectifyMap(
cp.r_intrinsics, cp.r_distortion, cv::noArray(),
cp.r_intrinsics, image_size, CV_32FC1,
*r_undistort_map_x, *r_undistort_map_y);
}
// generate our rectification maps
cv::initUndistortRectifyMap(cp.l_intrinsics, zero_distortion,
cp.l_rotation, cp.l_projection,
image_size, CV_32FC1,
*l_rectify_map_x, *l_rectify_map_y);
cv::initUndistortRectifyMap(cp.r_intrinsics, zero_distortion,
cp.r_rotation, cp.r_projection,
image_size, CV_32FC1,
*r_rectify_map_x, *r_rectify_map_y);
return true;
}
return false;
}
// TODO - move this as it is a generic helper
static int
mkpath(char* path)
{
char tmp[PATH_MAX]; // TODO: PATH_MAX probably not strictly correct
char* p = nullptr;
size_t len;
snprintf(tmp, sizeof(tmp), "%s", path);
len = strlen(tmp) - 1;
if (tmp[len] == '/') {
tmp[len] = 0;
}
for (p = tmp + 1; *p; p++) {
if (*p == '/') {
*p = 0;
if (mkdir(tmp, S_IRWXU) < 0 && errno != EEXIST)
return -1;
*p = '/';
}
}
if (mkdir(tmp, S_IRWXU) < 0 && errno != EEXIST)
return -1;
return 0;
}
// TODO: templatise these 2?
XRT_MAYBE_UNUSED static float
cv_dist3d_point(cv::Point3f& p, cv::Point3f& q)
{
cv::Point3f d = p - q;
return cv::sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
}
XRT_MAYBE_UNUSED static float
cv_dist3d_vec(cv::Vec3f& p, cv::Vec3f& q)
{
cv::Point3f d = p - q;
return cv::sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
}
#ifdef __cplusplus
}
#endif
#endif // T_CALIBRATION_H
......@@ -49,166 +49,168 @@ public:
} fusion;
cv::Mat l_frame_grey;
cv::Mat r_frame_grey;
cv::Mat l_undistort_map_x;
cv::Mat l_undistort_map_y;
cv::Mat l_rectify_map_x;
cv::Mat l_rectify_map_y;
cv::Mat r_undistort_map_x;
cv::Mat r_undistort_map_y;
cv::Mat r_rectify_map_x;
cv::Mat r_rectify_map_y;
cv::Mat disparity_to_depth;
bool calibrated;
std::vector<cv::KeyPoint> l_keypoints;
std::vector<cv::KeyPoint> r_keypoints;
cv::Ptr<cv::SimpleBlobDetector> sbd;
xrt_vec3 tracked_object_position;
cv::Mat l_undistort_map_x;
cv::Mat l_undistort_map_y;
cv::Mat l_rectify_map_x;
cv::Mat l_rectify_map_y;
cv::Mat r_undistort_map_x;
cv::Mat r_undistort_map_y;
cv::Mat r_rectify_map_x;
cv::Mat r_rectify_map_y;
cv::Mat disparity_to_depth;
bool calibrated;
std::vector<cv::KeyPoint> l_keypoints;
std::vector<cv::KeyPoint> r_keypoints;
cv::Ptr<cv::SimpleBlobDetector> sbd;
xrt_vec3 tracked_object_position;
};
static void
procces(TrackerPSMV &t, struct xrt_frame *xf)
{
// Only IMU data
if (xf == NULL) {
// Only IMU data
if (xf == NULL) {
return;
}
if (! t.calibrated) {
if ( calibration_get_stereo("PS4_EYE",xf->width,xf->height,false,
&t.l_undistort_map_x,
&t.l_undistort_map_y,
&t.l_rectify_map_x,
&t.l_rectify_map_y,
&t.r_undistort_map_x,
&t.r_undistort_map_y,
&t.r_rectify_map_x,
&t.r_rectify_map_y,
&t.disparity_to_depth) ) {
printf("loaded calibration for camera!\n");
t.calibrated = true;
}
}
t.l_keypoints.clear();
t.r_keypoints.clear();
cv::Mat l_frame_undist;
cv::Mat r_frame_undist;
// undistort the whole image
cv::remap(t.l_frame_grey, l_frame_undist,
t.l_undistort_map_x, t.l_undistort_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
cv::remap(t.r_frame_grey, r_frame_undist,
t.r_undistort_map_x, t.r_undistort_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
// rectify the whole image
cv::remap(l_frame_undist, t.l_frame_grey,
t.l_rectify_map_x, t.l_rectify_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
cv::remap(r_frame_undist, t.r_frame_grey,
t.r_rectify_map_x, t.r_rectify_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
cv::threshold(t.l_frame_grey, t.l_frame_grey, 32.0, 255.0, 0);
cv::threshold(t.r_frame_grey, t.r_frame_grey, 32.0, 255.0, 0);
//tracker_measurement_t m = {};
// do blob detection with our masks
// TODO: re-enable masks
t.sbd->detect(t.l_frame_grey,t.l_keypoints); //,internal->l_mask_gray);
t.sbd->detect(t.r_frame_grey,t.r_keypoints); //,internal->r_mask_gray);
// do some basic matching to come up with likely disparity-pairs
std::vector<cv::KeyPoint> l_blobs, r_blobs;
for (uint32_t i = 0; i < t.l_keypoints.size(); i++) {
cv::KeyPoint l_blob = t.l_keypoints[i];
int l_index = -1;
int r_index = -1;
for (uint32_t j = 0; j < t.r_keypoints.size(); j++) {
float lowest_dist = 128;
cv::KeyPoint r_blob = t.r_keypoints[j];
// find closest point on same-ish scanline
if ((l_blob.pt.y < r_blob.pt.y + 3) &&
(l_blob.pt.y > r_blob.pt.y - 3) &&
((r_blob.pt.x - l_blob.pt.x) < lowest_dist)) {
lowest_dist = r_blob.pt.x - l_blob.pt.x;
r_index = j;
l_index = i;
}
}
if (l_index > -1 && r_index > -1) {
l_blobs.push_back(t.l_keypoints.at(l_index));
r_blobs.push_back(t.r_keypoints.at(r_index));
}
}
// convert our 2d point + disparities into 3d points.
std::vector<cv::Point3f> world_points;
if (l_blobs.size() > 0) {
for (uint32_t i = 0; i < l_blobs.size(); i++) {
float disp = r_blobs[i].pt.x - l_blobs[i].pt.x;
cv::Vec4d xydw(l_blobs[i].pt.x, l_blobs[i].pt.y, disp,
1.0f);
// Transform
cv::Vec4d h_world = (cv::Matx44d)t.disparity_to_depth * xydw;
// Divide by scale to get 3D vector from homogeneous
// coordinate. invert x while we are here.
world_points.push_back(cv::Point3f(
-h_world[0] / h_world[3], h_world[1] / h_world[3],
h_world[2] / h_world[3]));
}
}
int tracked_index = -1;
float lowest_dist = 65535.0f;
cv::Point3f last_point(t.tracked_object_position.x, t.tracked_object_position.y, t.tracked_object_position.z);
for (uint32_t i = 0; i < world_points.size(); i++) {
cv::Point3f world_point = world_points[i];
float dist = cv_dist3d_point(world_point, last_point);
if (dist < lowest_dist) {
tracked_index = i;
lowest_dist = dist;
}
}
if (tracked_index != -1) {
cv::Point3f world_point = world_points[tracked_index];
/*
//apply our room setup transform
Eigen::Vector3f p = Eigen::Map<Eigen::Vector3f>(&world_point.x);
Eigen::Vector4f pt;
pt.x() = p.x();
pt.y() = p.y();
pt.z() = p.z();
pt.w() = 1.0f;
//this is a glm mat4 written out 'flat'
Eigen::Matrix4f mat = Eigen::Map<Eigen::Matrix<float,4,4>>(internal->rs.origin_transform.v);
pt = mat * pt;
m.pose.position.x = pt.x();
m.pose.position.y = pt.y();
m.pose.position.z = pt.z();
*/
// update internal state
t.tracked_object_position.x = world_point.x;
t.tracked_object_position.y = world_point.y;
t.tracked_object_position.z = world_point.z;
}
xrt_frame_reference(&xf, NULL);
if (!t.calibrated) {
if (calibration_get_stereo(
"PS4_EYE", xf->width, xf->height, false,
&t.l_undistort_map_x, &t.l_undistort_map_y,
&t.l_rectify_map_x, &t.l_rectify_map_y,
&t.r_undistort_map_x, &t.r_undistort_map_y,
&t.r_rectify_map_x, &t.r_rectify_map_y,
&t.disparity_to_depth)) {
printf("loaded calibration for camera!\n");
t.calibrated = true;
}
}
t.l_keypoints.clear();
t.r_keypoints.clear();
// TODO: we assume L8 format here, this may be a bad assumption
cv::Mat l_grey(xf->height, xf->width / 2, CV_8UC1, xf->data,
xf->height);
cv::Mat r_grey(xf->height, xf->width / 2, CV_8UC1,
xf->data + xf->width / 2, xf->height);
cv::Mat l_frame_undist;
cv::Mat r_frame_undist;
// undistort the whole image
cv::remap(l_grey, l_frame_undist, t.l_undistort_map_x,
t.l_undistort_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT,
cv::Scalar(0, 0, 0));
cv::remap(r_grey, r_frame_undist, t.r_undistort_map_x,
t.r_undistort_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT,
cv::Scalar(0, 0, 0));
// rectify the whole image
cv::remap(l_frame_undist, l_grey, t.l_rectify_map_x, t.l_rectify_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
cv::remap(r_frame_undist, r_grey, t.r_rectify_map_x, t.r_rectify_map_y,
cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
cv::threshold(l_grey, l_grey, 32.0, 255.0, 0);
cv::threshold(r_grey, r_grey, 32.0, 255.0, 0);
// tracker_measurement_t m = {};
// do blob detection with our masks
// TODO: re-enable masks
t.sbd->detect(l_grey, t.l_keypoints); //,internal->l_mask_gray);
t.sbd->detect(r_grey, t.r_keypoints); //,internal->r_mask_gray);
// do some basic matching to come up with likely disparity-pairs
std::vector<cv::KeyPoint> l_blobs, r_blobs;
for (uint32_t i = 0; i < t.l_keypoints.size(); i++) {
cv::KeyPoint l_blob = t.l_keypoints[i];
int l_index = -1;
int r_index = -1;
for (uint32_t j = 0; j < t.r_keypoints.size(); j++) {
float lowest_dist = 128;
cv::KeyPoint r_blob = t.r_keypoints[j];
// find closest point on same-ish scanline
if ((l_blob.pt.y < r_blob.pt.y + 3) &&
(l_blob.pt.y > r_blob.pt.y - 3) &&
((r_blob.pt.x - l_blob.pt.x) < lowest_dist)) {
lowest_dist = r_blob.pt.x - l_blob.pt.x;
r_index = j;
l_index = i;
}
}
if (l_index > -1 && r_index > -1) {
l_blobs.push_back(t.l_keypoints.at(l_index));
r_blobs.push_back(t.r_keypoints.at(r_index));
}
}
// convert our 2d point + disparities into 3d points.
std::vector<cv::Point3f> world_points;
if (l_blobs.size() > 0) {
for (uint32_t i = 0; i < l_blobs.size(); i++) {
float disp = r_blobs[i].pt.x - l_blobs[i].pt.x;
cv::Vec4d xydw(l_blobs[i].pt.x, l_blobs[i].pt.y, disp,
1.0f);
// Transform
cv::Vec4d h_world =
(cv::Matx44d)t.disparity_to_depth * xydw;
// Divide by scale to get 3D vector from homogeneous
// coordinate. invert x while we are here.
world_points.push_back(cv::Point3f(
-h_world[0] / h_world[3], h_world[1] / h_world[3],
h_world[2] / h_world[3]));
}
}
int tracked_index = -1;
float lowest_dist = 65535.0f;
cv::Point3f last_point(t.tracked_object_position.x,
t.tracked_object_position.y,
t.tracked_object_position.z);
for (uint32_t i = 0; i < world_points.size(); i++) {
cv::Point3f world_point = world_points[i];
float dist = cv_dist3d_point(world_point, last_point);
if (dist < lowest_dist) {
tracked_index = i;
lowest_dist = dist;
}
}
if (tracked_index != -1) {
cv::Point3f world_point = world_points[tracked_index];
/*
//apply our room setup transform
Eigen::Vector3f p = Eigen::Map<Eigen::Vector3f>(&world_point.x);
Eigen::Vector4f pt;
pt.x() = p.x();
pt.y() = p.y();
pt.z() = p.z();
pt.w() = 1.0f;
//this is a glm mat4 written out 'flat'
Eigen::Matrix4f mat =
Eigen::Map<Eigen::Matrix<float,4,4>>(internal->rs.origin_transform.v);
pt = mat * pt;
m.pose.position.x = pt.x();
m.pose.position.y = pt.y();
m.pose.position.z = pt.z();
*/
// update internal state
t.tracked_object_position.x = world_point.x;
t.tracked_object_position.y = world_point.y;
t.tracked_object_position.z = world_point.z;
}
xrt_frame_reference(&xf, NULL);
}
......@@ -262,7 +264,8 @@ get_pose(TrackerPSMV &t,
return;
}
out_relation->pose.position = t.fusion.pos;
// out_relation->pose.position = t.fusion.pos;
out_relation->pose.position = t.tracked_object_position;
out_relation->pose.orientation = t.fusion.rot;
//! @todo assuming that orientation is actually currently tracked.
......@@ -455,20 +458,22 @@ t_psmv_create(struct xrt_frame_context *xfctx,
break;
}
cv::SimpleBlobDetector::Params blob_params;
blob_params.filterByArea = false;
blob_params.filterByConvexity = false;
blob_params.filterByInertia = false;
blob_params.filterByColor = true;
blob_params.blobColor = 255; // 0 or 255 - color comes from binarized image?
blob_params.minArea = 1;
blob_params.maxArea = 1000;
blob_params.maxThreshold = 51; // using a wide threshold span slows things down bigtime
blob_params.minThreshold = 50;
blob_params.thresholdStep = 1;
blob_params.minDistBetweenBlobs = 5;
blob_params.minRepeatability = 1; // need this to avoid error?
t.sbd = cv::SimpleBlobDetector::create(blob_params);
cv::SimpleBlobDetector::Params blob_params;
blob_params.filterByArea = false;
blob_params.filterByConvexity = false;
blob_params.filterByInertia = false;
blob_params.filterByColor = true;
blob_params.blobColor =
255; // 0 or 255 - color comes from binarized image?
blob_params.minArea = 1;
blob_params.maxArea = 1000;
blob_params.maxThreshold =
51; // using a wide threshold span slows things down bigtime
blob_params.minThreshold = 50;
blob_params.thresholdStep = 1;
blob_params.minDistBetweenBlobs = 5;
blob_params.minRepeatability = 1; // need this to avoid error?
t.sbd = cv::SimpleBlobDetector::create(blob_params);
xrt_frame_context_add(xfctx, &t.node);
*out_sink = &t.sink;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment