Commit 70f154a0 authored by Pete Black's avatar Pete Black

correct axes, resolve upside-down problem (opencv), remove scaling factor,...

correct axes, resolve upside-down problem (opencv), remove scaling factor, unify hmd center handling, tweak constants for new scale
parent 9e1faedb
Pipeline #192487 failed with stages
in 2 minutes and 6 seconds
......@@ -213,10 +213,10 @@ world_point_from_blobs(cv::Point2f left,
// Transform
cv::Vec4d h_world = disparity_to_depth * xydw;
// Divide by scale to get 3D vector from homogeneous
// coordinate. invert x while we are here.
cv::Point3f world_point(h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3],
// Divide by scale to get 3D vector from homogeneous coordinate. we
// also invert x here
cv::Point3f world_point(-h_world[0] / h_world[3],
h_world[1] / h_world[3],
h_world[2] / h_world[3]);
return world_point;
......
......@@ -34,20 +34,22 @@
#include <opencv2/opencv.hpp>
#include <inttypes.h>
#define PSVR_MODEL_SCALE_FACTOR 58.0f
//#define PSVR_MODEL_SCALE_FACTOR 58.0f
#define PSVR_NUM_LEDS 7 // how many LEDs in the tracked configuration
#define PSVR_OPTICAL_SOLVE_THRESH \
5 // how many LEDs do we need to do an optical solve/correction
#define PSVR_DISAMBIG_REJECT_DIST \
1.2f // if potential match vertex is further than this distance from the
// measurement, reject the match - do not set too low
0.02f // if potential match vertex is further than this distance from
// the
// measurement, reject the match - do not set too low
#define PSVR_DISAMBIG_REJECT_ANG \
0.7f // if potential match vertex is further than this distance from the
// measurement, reject the match - do not set too low
#define PSVR_MODEL_CENTER_INDEX 2 //@todo: we should use the tag enum for this
#define PSVR_SEARCH_RADIUS \
2.5f // cutoff distance for keeping the id for a blob from one frame to
// the next
0.043f // cutoff distance for keeping the id for a blob from one frame
// to
// the next
// the magnitude of the correction relative to the previous correction must be
// below this value to contribute towards lock acquisition
#define PSVR_MAX_BAD_CORR 10
......@@ -72,12 +74,12 @@
#define PSVR_POSE_MEASUREMENT_NOISE \
100.0f // our measurements are quite noisy so we need to smooth heavily
#define PSVR_OUTLIER_THRESH 10.0f
#define PSVR_MERGE_THRESH 3.5f
#define PSVR_OUTLIER_THRESH 0.17f
#define PSVR_MERGE_THRESH 0.06f
#define PSVR_HOLD_THRESH \
5.0f // hold the previously recognised configuration unless we depart
// signifcantly
0.086f // hold the previously recognised configuration unless we depart
// signifcantly
// uncomment this to dump comprehensive optical and imu data to
// /tmp/psvr_dump.txt
......@@ -518,7 +520,7 @@ remove_outliers(std::vector<blob_point_t> *orig_points,
for (uint32_t i = 0; i < orig_points->size(); i++) {
cv::Point3f p = orig_points->at(i).p;
if (p.z > 0) {
if (p.z < 0) {
temp_points.push_back(orig_points->at(i));
}
}
......@@ -562,9 +564,9 @@ remove_outliers(std::vector<blob_point_t> *orig_points,
sqrt((error_x * error_x) + (error_y * error_y) +
(error_z * error_z));
printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x,
temp_points[i].p.y, temp_points[i].p.z, error_x, error_y,
error_z);
// printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x,
// temp_points[i].p.y, temp_points[i].p.z, error_x,
// error_y, error_z);
if (rms_error < outlier_thresh) {
pruned_points->push_back(temp_points[i]);
}
......@@ -905,11 +907,10 @@ solve_with_imu(TrackerPSVR &t,
solved->push_back(avg_data);
}
Eigen::Translation3f center_trans(
solved->at(TAG_C).position.head<3>());
Eigen::Affine3f translation(center_trans);
std::vector<match_data_t> _solved;
Eigen::Matrix4f pose =
translation.matrix() * t.corrected_imu_rotation;
solve_for_measurement(&t, solved, &_solved) *
t.corrected_imu_rotation;
t.last_pose = pose;
return pose;
}
......@@ -1076,17 +1077,17 @@ disambiguate(TrackerPSVR &t,
// reject any configuration where 'top' is below
// 'bottom
if (has_bl && has_tl && bl_pos.y() < tl_pos.y()) {
if (has_bl && has_tl && bl_pos.y() > tl_pos.y()) {
// printf("IGNORING BL > TL %f %f\n",
// bl_pos.y(),
// br_pos.y());
ignore = true;
// ignore = true;
}
if (has_br && has_tr && br_pos.y() < tr_pos.y()) {
if (has_br && has_tr && br_pos.y() > tr_pos.y()) {
// printf("IGNORING TL > TR %f %f\n",
// tl_pos.y(),
// tr_pos.y());
ignore = true;
// ignore = true;
}
// once we have a lock, bias the detected
......@@ -1099,11 +1100,11 @@ disambiguate(TrackerPSVR &t,
}
// useful for debugging
printf(
"match %d dist to last: %f dist to imu: %f "
"rmsError: %f squaredSum:%f %d\n",
i, prev_diff, imu_diff, avg_error, error_sum,
ignore);
// printf(
// "match %d dist to last: %f dist to imu: %f "
// "rmsError: %f squaredSum:%f %d\n",
// i, prev_diff, imu_diff, avg_error, error_sum,
// ignore);
}
if (avg_error <= lowest_error && !ignore) {
lowest_error = avg_error;
......@@ -1147,23 +1148,23 @@ create_model(TrackerPSVR &t)
// to minimise the incidence of incorrect led matches.
t.model_vertices[0] = {
0, Eigen::Vector4f(-2.51408f, 3.77113f, 1.54926f, 1.0f), TAG_BL,
0, Eigen::Vector4f(-0.06502f, 0.04335f, 0.01861f, 1.0f), TAG_BL,
true};
t.model_vertices[1] = {
1, Eigen::Vector4f(-2.51408f, -3.77113f, 1.54926f, 1.0f), TAG_BR,
1, Eigen::Vector4f(0.06502f, 0.04335f, 0.01861f, 1.0f), TAG_BR,
true};
t.model_vertices[2] = {2, Eigen::Vector4f(0.0f, 0.0f, 0.0f, 1.0f),
t.model_vertices[2] = {2, Eigen::Vector4f(0.0f, 0.0f, 0.04533f, 1.0f),
TAG_C, true};
t.model_vertices[3] = {
3, Eigen::Vector4f(2.51408f, 3.77113f, 1.54926f, 1.0f), TAG_TL,
3, Eigen::Vector4f(-0.06502f, -0.04335f, 0.01861f, 1.0f), TAG_TL,
true};
t.model_vertices[4] = {
4, Eigen::Vector4f(2.51408f, -3.77113f, 1.54926f, 1.0f), TAG_TR,
4, Eigen::Vector4f(0.06502f, -0.04335f, 0.01861f, 1.0f), TAG_TR,
true};
t.model_vertices[5] = {
5, Eigen::Vector4f(0.0, 4.52535f, 3.92887f, 1.0f), TAG_SL, true};
5, Eigen::Vector4f(-0.07802f, 0.0f, -0.02671f, 1.0f), TAG_SL, true};
t.model_vertices[6] = {
6, Eigen::Vector4f(0.0, -4.52535f, 3.92887f, 1.0f), TAG_SR, true};
6, Eigen::Vector4f(0.07802f, 0.0f, -0.02671f, 1.0f), TAG_SR, true};
}
......@@ -1611,7 +1612,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
float xdiff = r_blob.pt.x - l_blob.pt.x;
float ydiff = r_blob.pt.y - l_blob.pt.y;
if ((ydiff < 3.0f) && (ydiff > -3.0f) &&
(xdiff < 0 && abs(xdiff) < lowest_dist)) {
(xdiff > 0 && abs(xdiff) < lowest_dist)) {
lowest_dist = abs(xdiff);
r_index = j;
l_index = i;
......@@ -1642,12 +1643,11 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
(cv::Matx44d)t.disparity_to_depth * xydw;
// Divide by scale to get 3D vector from
// homogeneous coordinate. invert here too.
// homogeneous coordinate. we also invert x here
blob_point_t bp;
bp.p = cv::Point3f(-1.0f * h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3],
-1.0f * (h_world[2] / h_world[3])) *
PSVR_MODEL_SCALE_FACTOR;
bp.p = cv::Point3f(-h_world[0] / h_world[3],
h_world[1] / h_world[3],
(h_world[2] / h_world[3]));
bp.lkp = t.l_blobs[i];
bp.rkp = t.r_blobs[i];
bp.btype = BLOB_TYPE_UNKNOWN;
......@@ -1705,8 +1705,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// raw debug output for Blender algo development
for (int i = 0; i < t.merged_points.size(); i++) {
cv::Point3f unscaled =
t.merged_points.at(i).p / PSVR_MODEL_SCALE_FACTOR;
cv::Point3f unscaled = t.merged_points.at(i).p;
fprintf(t.dump_file, "P,%" PRIu64 ",%f,%f,%f\n",
......@@ -1728,13 +1727,6 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
disambiguate(t, &t.match_vertices, &predicted_pose, &solved, 0);
// Debug is wanted, draw the keypoints.
// if (t.debug.rgb[0].cols > 0) {
// if (t.match_vertices.size() > 0) {
//
// }
//}
// derive our optical rotation correction from the
// pose transform
......@@ -1874,17 +1866,10 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
Eigen::Vector4f filtered_pose;
pose_filter_predict(&filtered_pose, &t.pose_filter, dt / 1000.0f);
// get our 'working space' coords back to the original
// coordinate system.
//@todo - fix this by scaling all our parameters to work in
//'unscaled'
// space
float inv_model_scale_factor = 1.0f / PSVR_MODEL_SCALE_FACTOR;
filtered_pose *= inv_model_scale_factor;
t.optical.pos.x = -filtered_pose.x();
t.optical.pos.x = filtered_pose.x();
t.optical.pos.y = filtered_pose.y();
t.optical.pos.z = -filtered_pose.z();
t.optical.pos.z = filtered_pose.z();
t.last_frame = xf->source_sequence;
......@@ -2187,7 +2172,7 @@ t_psvr_create(struct xrt_frame_context *xfctx,
Eigen::Quaternionf align2(
Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 1.0f, 0.0f)));
t.axis_align_rot = align2 * align;
t.axis_align_rot = align2; // * align;
t.last_optical_model = 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment