Commit 70f154a0 authored by Pete Black's avatar Pete Black

correct axes, resolve upside-down problem (opencv), remove scaling factor,...

correct axes, resolve upside-down problem (opencv), remove scaling factor, unify hmd center handling, tweak constants for new scale
parent 9e1faedb
Pipeline #192487 failed with stages
in 2 minutes and 6 seconds
...@@ -213,10 +213,10 @@ world_point_from_blobs(cv::Point2f left, ...@@ -213,10 +213,10 @@ world_point_from_blobs(cv::Point2f left,
// Transform // Transform
cv::Vec4d h_world = disparity_to_depth * xydw; cv::Vec4d h_world = disparity_to_depth * xydw;
// Divide by scale to get 3D vector from homogeneous // Divide by scale to get 3D vector from homogeneous coordinate. we
// coordinate. invert x while we are here. // also invert x here
cv::Point3f world_point(h_world[0] / h_world[3], cv::Point3f world_point(-h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3], h_world[1] / h_world[3],
h_world[2] / h_world[3]); h_world[2] / h_world[3]);
return world_point; return world_point;
......
...@@ -34,20 +34,22 @@ ...@@ -34,20 +34,22 @@
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <inttypes.h> #include <inttypes.h>
#define PSVR_MODEL_SCALE_FACTOR 58.0f //#define PSVR_MODEL_SCALE_FACTOR 58.0f
#define PSVR_NUM_LEDS 7 // how many LEDs in the tracked configuration #define PSVR_NUM_LEDS 7 // how many LEDs in the tracked configuration
#define PSVR_OPTICAL_SOLVE_THRESH \ #define PSVR_OPTICAL_SOLVE_THRESH \
5 // how many LEDs do we need to do an optical solve/correction 5 // how many LEDs do we need to do an optical solve/correction
#define PSVR_DISAMBIG_REJECT_DIST \ #define PSVR_DISAMBIG_REJECT_DIST \
1.2f // if potential match vertex is further than this distance from the 0.02f // if potential match vertex is further than this distance from
// measurement, reject the match - do not set too low // the
// measurement, reject the match - do not set too low
#define PSVR_DISAMBIG_REJECT_ANG \ #define PSVR_DISAMBIG_REJECT_ANG \
0.7f // if potential match vertex is further than this distance from the 0.7f // if potential match vertex is further than this distance from the
// measurement, reject the match - do not set too low // measurement, reject the match - do not set too low
#define PSVR_MODEL_CENTER_INDEX 2 //@todo: we should use the tag enum for this #define PSVR_MODEL_CENTER_INDEX 2 //@todo: we should use the tag enum for this
#define PSVR_SEARCH_RADIUS \ #define PSVR_SEARCH_RADIUS \
2.5f // cutoff distance for keeping the id for a blob from one frame to 0.043f // cutoff distance for keeping the id for a blob from one frame
// the next // to
// the next
// the magnitude of the correction relative to the previous correction must be // the magnitude of the correction relative to the previous correction must be
// below this value to contribute towards lock acquisition // below this value to contribute towards lock acquisition
#define PSVR_MAX_BAD_CORR 10 #define PSVR_MAX_BAD_CORR 10
...@@ -72,12 +74,12 @@ ...@@ -72,12 +74,12 @@
#define PSVR_POSE_MEASUREMENT_NOISE \ #define PSVR_POSE_MEASUREMENT_NOISE \
100.0f // our measurements are quite noisy so we need to smooth heavily 100.0f // our measurements are quite noisy so we need to smooth heavily
#define PSVR_OUTLIER_THRESH 10.0f #define PSVR_OUTLIER_THRESH 0.17f
#define PSVR_MERGE_THRESH 3.5f #define PSVR_MERGE_THRESH 0.06f
#define PSVR_HOLD_THRESH \ #define PSVR_HOLD_THRESH \
5.0f // hold the previously recognised configuration unless we depart 0.086f // hold the previously recognised configuration unless we depart
// signifcantly // signifcantly
// uncomment this to dump comprehensive optical and imu data to // uncomment this to dump comprehensive optical and imu data to
// /tmp/psvr_dump.txt // /tmp/psvr_dump.txt
...@@ -518,7 +520,7 @@ remove_outliers(std::vector<blob_point_t> *orig_points, ...@@ -518,7 +520,7 @@ remove_outliers(std::vector<blob_point_t> *orig_points,
for (uint32_t i = 0; i < orig_points->size(); i++) { for (uint32_t i = 0; i < orig_points->size(); i++) {
cv::Point3f p = orig_points->at(i).p; cv::Point3f p = orig_points->at(i).p;
if (p.z > 0) { if (p.z < 0) {
temp_points.push_back(orig_points->at(i)); temp_points.push_back(orig_points->at(i));
} }
} }
...@@ -562,9 +564,9 @@ remove_outliers(std::vector<blob_point_t> *orig_points, ...@@ -562,9 +564,9 @@ remove_outliers(std::vector<blob_point_t> *orig_points,
sqrt((error_x * error_x) + (error_y * error_y) + sqrt((error_x * error_x) + (error_y * error_y) +
(error_z * error_z)); (error_z * error_z));
printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x, // printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x,
temp_points[i].p.y, temp_points[i].p.z, error_x, error_y, // temp_points[i].p.y, temp_points[i].p.z, error_x,
error_z); // error_y, error_z);
if (rms_error < outlier_thresh) { if (rms_error < outlier_thresh) {
pruned_points->push_back(temp_points[i]); pruned_points->push_back(temp_points[i]);
} }
...@@ -905,11 +907,10 @@ solve_with_imu(TrackerPSVR &t, ...@@ -905,11 +907,10 @@ solve_with_imu(TrackerPSVR &t,
solved->push_back(avg_data); solved->push_back(avg_data);
} }
Eigen::Translation3f center_trans( std::vector<match_data_t> _solved;
solved->at(TAG_C).position.head<3>());
Eigen::Affine3f translation(center_trans);
Eigen::Matrix4f pose = Eigen::Matrix4f pose =
translation.matrix() * t.corrected_imu_rotation; solve_for_measurement(&t, solved, &_solved) *
t.corrected_imu_rotation;
t.last_pose = pose; t.last_pose = pose;
return pose; return pose;
} }
...@@ -1076,17 +1077,17 @@ disambiguate(TrackerPSVR &t, ...@@ -1076,17 +1077,17 @@ disambiguate(TrackerPSVR &t,
// reject any configuration where 'top' is below // reject any configuration where 'top' is below
// 'bottom // 'bottom
if (has_bl && has_tl && bl_pos.y() < tl_pos.y()) { if (has_bl && has_tl && bl_pos.y() > tl_pos.y()) {
// printf("IGNORING BL > TL %f %f\n", // printf("IGNORING BL > TL %f %f\n",
// bl_pos.y(), // bl_pos.y(),
// br_pos.y()); // br_pos.y());
ignore = true; // ignore = true;
} }
if (has_br && has_tr && br_pos.y() < tr_pos.y()) { if (has_br && has_tr && br_pos.y() > tr_pos.y()) {
// printf("IGNORING TL > TR %f %f\n", // printf("IGNORING TL > TR %f %f\n",
// tl_pos.y(), // tl_pos.y(),
// tr_pos.y()); // tr_pos.y());
ignore = true; // ignore = true;
} }
// once we have a lock, bias the detected // once we have a lock, bias the detected
...@@ -1099,11 +1100,11 @@ disambiguate(TrackerPSVR &t, ...@@ -1099,11 +1100,11 @@ disambiguate(TrackerPSVR &t,
} }
// useful for debugging // useful for debugging
printf( // printf(
"match %d dist to last: %f dist to imu: %f " // "match %d dist to last: %f dist to imu: %f "
"rmsError: %f squaredSum:%f %d\n", // "rmsError: %f squaredSum:%f %d\n",
i, prev_diff, imu_diff, avg_error, error_sum, // i, prev_diff, imu_diff, avg_error, error_sum,
ignore); // ignore);
} }
if (avg_error <= lowest_error && !ignore) { if (avg_error <= lowest_error && !ignore) {
lowest_error = avg_error; lowest_error = avg_error;
...@@ -1147,23 +1148,23 @@ create_model(TrackerPSVR &t) ...@@ -1147,23 +1148,23 @@ create_model(TrackerPSVR &t)
// to minimise the incidence of incorrect led matches. // to minimise the incidence of incorrect led matches.
t.model_vertices[0] = { t.model_vertices[0] = {
0, Eigen::Vector4f(-2.51408f, 3.77113f, 1.54926f, 1.0f), TAG_BL, 0, Eigen::Vector4f(-0.06502f, 0.04335f, 0.01861f, 1.0f), TAG_BL,
true}; true};
t.model_vertices[1] = { t.model_vertices[1] = {
1, Eigen::Vector4f(-2.51408f, -3.77113f, 1.54926f, 1.0f), TAG_BR, 1, Eigen::Vector4f(0.06502f, 0.04335f, 0.01861f, 1.0f), TAG_BR,
true}; true};
t.model_vertices[2] = {2, Eigen::Vector4f(0.0f, 0.0f, 0.0f, 1.0f), t.model_vertices[2] = {2, Eigen::Vector4f(0.0f, 0.0f, 0.04533f, 1.0f),
TAG_C, true}; TAG_C, true};
t.model_vertices[3] = { t.model_vertices[3] = {
3, Eigen::Vector4f(2.51408f, 3.77113f, 1.54926f, 1.0f), TAG_TL, 3, Eigen::Vector4f(-0.06502f, -0.04335f, 0.01861f, 1.0f), TAG_TL,
true}; true};
t.model_vertices[4] = { t.model_vertices[4] = {
4, Eigen::Vector4f(2.51408f, -3.77113f, 1.54926f, 1.0f), TAG_TR, 4, Eigen::Vector4f(0.06502f, -0.04335f, 0.01861f, 1.0f), TAG_TR,
true}; true};
t.model_vertices[5] = { t.model_vertices[5] = {
5, Eigen::Vector4f(0.0, 4.52535f, 3.92887f, 1.0f), TAG_SL, true}; 5, Eigen::Vector4f(-0.07802f, 0.0f, -0.02671f, 1.0f), TAG_SL, true};
t.model_vertices[6] = { t.model_vertices[6] = {
6, Eigen::Vector4f(0.0, -4.52535f, 3.92887f, 1.0f), TAG_SR, true}; 6, Eigen::Vector4f(0.07802f, 0.0f, -0.02671f, 1.0f), TAG_SR, true};
} }
...@@ -1611,7 +1612,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf) ...@@ -1611,7 +1612,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
float xdiff = r_blob.pt.x - l_blob.pt.x; float xdiff = r_blob.pt.x - l_blob.pt.x;
float ydiff = r_blob.pt.y - l_blob.pt.y; float ydiff = r_blob.pt.y - l_blob.pt.y;
if ((ydiff < 3.0f) && (ydiff > -3.0f) && if ((ydiff < 3.0f) && (ydiff > -3.0f) &&
(xdiff < 0 && abs(xdiff) < lowest_dist)) { (xdiff > 0 && abs(xdiff) < lowest_dist)) {
lowest_dist = abs(xdiff); lowest_dist = abs(xdiff);
r_index = j; r_index = j;
l_index = i; l_index = i;
...@@ -1642,12 +1643,11 @@ process(TrackerPSVR &t, struct xrt_frame *xf) ...@@ -1642,12 +1643,11 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
(cv::Matx44d)t.disparity_to_depth * xydw; (cv::Matx44d)t.disparity_to_depth * xydw;
// Divide by scale to get 3D vector from // Divide by scale to get 3D vector from
// homogeneous coordinate. invert here too. // homogeneous coordinate. we also invert x here
blob_point_t bp; blob_point_t bp;
bp.p = cv::Point3f(-1.0f * h_world[0] / h_world[3], bp.p = cv::Point3f(-h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3], h_world[1] / h_world[3],
-1.0f * (h_world[2] / h_world[3])) * (h_world[2] / h_world[3]));
PSVR_MODEL_SCALE_FACTOR;
bp.lkp = t.l_blobs[i]; bp.lkp = t.l_blobs[i];
bp.rkp = t.r_blobs[i]; bp.rkp = t.r_blobs[i];
bp.btype = BLOB_TYPE_UNKNOWN; bp.btype = BLOB_TYPE_UNKNOWN;
...@@ -1705,8 +1705,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf) ...@@ -1705,8 +1705,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// raw debug output for Blender algo development // raw debug output for Blender algo development
for (int i = 0; i < t.merged_points.size(); i++) { for (int i = 0; i < t.merged_points.size(); i++) {
cv::Point3f unscaled = cv::Point3f unscaled = t.merged_points.at(i).p;
t.merged_points.at(i).p / PSVR_MODEL_SCALE_FACTOR;
fprintf(t.dump_file, "P,%" PRIu64 ",%f,%f,%f\n", fprintf(t.dump_file, "P,%" PRIu64 ",%f,%f,%f\n",
...@@ -1728,13 +1727,6 @@ process(TrackerPSVR &t, struct xrt_frame *xf) ...@@ -1728,13 +1727,6 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
disambiguate(t, &t.match_vertices, &predicted_pose, &solved, 0); disambiguate(t, &t.match_vertices, &predicted_pose, &solved, 0);
// Debug is wanted, draw the keypoints.
// if (t.debug.rgb[0].cols > 0) {
// if (t.match_vertices.size() > 0) {
//
// }
//}
// derive our optical rotation correction from the // derive our optical rotation correction from the
// pose transform // pose transform
...@@ -1874,17 +1866,10 @@ process(TrackerPSVR &t, struct xrt_frame *xf) ...@@ -1874,17 +1866,10 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
Eigen::Vector4f filtered_pose; Eigen::Vector4f filtered_pose;
pose_filter_predict(&filtered_pose, &t.pose_filter, dt / 1000.0f); pose_filter_predict(&filtered_pose, &t.pose_filter, dt / 1000.0f);
// get our 'working space' coords back to the original
// coordinate system.
//@todo - fix this by scaling all our parameters to work in
//'unscaled'
// space
float inv_model_scale_factor = 1.0f / PSVR_MODEL_SCALE_FACTOR;
filtered_pose *= inv_model_scale_factor;
t.optical.pos.x = -filtered_pose.x(); t.optical.pos.x = filtered_pose.x();
t.optical.pos.y = filtered_pose.y(); t.optical.pos.y = filtered_pose.y();
t.optical.pos.z = -filtered_pose.z(); t.optical.pos.z = filtered_pose.z();
t.last_frame = xf->source_sequence; t.last_frame = xf->source_sequence;
...@@ -2187,7 +2172,7 @@ t_psvr_create(struct xrt_frame_context *xfctx, ...@@ -2187,7 +2172,7 @@ t_psvr_create(struct xrt_frame_context *xfctx,
Eigen::Quaternionf align2( Eigen::Quaternionf align2(
Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 1.0f, 0.0f))); Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 1.0f, 0.0f)));
t.axis_align_rot = align2 * align; t.axis_align_rot = align2; // * align;
t.last_optical_model = 0; t.last_optical_model = 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment