Commit 9e1faedb authored by Pete Black's avatar Pete Black

latest updates to tracking - enhance filtering, rework lock on and...

latest updates to tracking - enhance filtering, rework lock on and drift-towards-target-rotation, unify psvr and psmv directions
parent 156d6fe1
Pipeline #190721 failed with stages
in 1 minute and 30 seconds
......@@ -127,7 +127,7 @@ do_view(TrackerPSMV &t, View &view, cv::Mat &grey, cv::Mat &rgb)
view.frame_undist_rectified, // dst
view.undistort_rectify_map_x, // map1
view.undistort_rectify_map_y, // map2
cv::INTER_LINEAR, // interpolation
cv::INTER_NEAREST, // interpolation
cv::BORDER_CONSTANT, // borderMode
cv::Scalar(0, 0, 0)); // borderValue
......@@ -215,8 +215,8 @@ world_point_from_blobs(cv::Point2f left,
// Divide by scale to get 3D vector from homogeneous
// coordinate. invert x while we are here.
cv::Point3f world_point(-h_world[0] / h_world[3],
h_world[1] / h_world[3],
cv::Point3f world_point(h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3],
h_world[2] / h_world[3]);
return world_point;
......
......@@ -50,22 +50,29 @@
// the next
// the magnitude of the correction relative to the previous correction must be
// below this value to contribute towards lock acquisition
#define PSVR_MAX_BAD_CORR 200
#define PSVR_BAD_CORRECTION_THRESH 0.9f
#define PSVR_CORRECTION_THRESH 0.1f
#define PSVR_MAX_CORRECTION \
#define PSVR_MAX_BAD_CORR 10
#define PSVR_BAD_CORRECTION_THRESH 0.1f
#define PSVR_CORRECTION_THRESH 0.05f
#define PSVR_FAST_CORRECTION \
0.05f // we will 'drift' our imu-solved rotation towards our
// optically
// solved correction to avoid jumps
#define PSVR_SLOW_CORRECTION \
0.005f // we will 'drift' our imu-solved rotation towards our
// optically
// solved correction to avoid jumps
// kalman filter coefficients
#define PSVR_BLOB_PROCESS_NOISE 0.1f // R
#define PSVR_BLOB_MEASUREMENT_NOISE 1.0f // Q
#define PSVR_POSE_PROCESS_NOISE 0.005f // R
#define PSVR_POSE_MEASUREMENT_NOISE 0.1f // Q
#define PSVR_POSE_PROCESS_NOISE 0.5f // R
#define PSVR_POSE_MEASUREMENT_NOISE \
100.0f // our measurements are quite noisy so we need to smooth heavily
#define PSVR_OUTLIER_THRESH 15.0f
#define PSVR_OUTLIER_THRESH 10.0f
#define PSVR_MERGE_THRESH 3.5f
#define PSVR_HOLD_THRESH \
......@@ -255,6 +262,7 @@ public:
bool done_correction; // set after a 'lock' is acquired
float max_correction;
// if we have made a lot of optical measurements that *should*
// be converging, but have not - we should reset
......@@ -554,9 +562,9 @@ remove_outliers(std::vector<blob_point_t> *orig_points,
sqrt((error_x * error_x) + (error_y * error_y) +
(error_z * error_z));
// printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x,
// temp_points[i].p.y, temp_points[i].p.z, error_x,
// error_y, error_z);
printf("ERROR: %f %f %f %f %f %f\n", temp_points[i].p.x,
temp_points[i].p.y, temp_points[i].p.z, error_x, error_y,
error_z);
if (rms_error < outlier_thresh) {
pruned_points->push_back(temp_points[i]);
}
......@@ -906,7 +914,7 @@ solve_with_imu(TrackerPSVR &t,
return pose;
}
printf("LOST TRACKING - RETURNING LAST POSE\n");
t.max_correction = PSVR_SLOW_CORRECTION;
return t.last_pose;
}
......@@ -957,8 +965,9 @@ disambiguate(TrackerPSVR &t,
Eigen::Matrix4f res =
solve_for_measurement(&t, measured_points, solved);
float diff = last_diff(t, solved, &t.last_vertices);
if (diff < PSVR_HOLD_THRESH) {
// printf("diff from last: %f\n", diff);
return res;
}
}
......@@ -989,7 +998,7 @@ disambiguate(TrackerPSVR &t,
//@todo: use tags instead of numeric vertex indices
for (uint32_t j = 1; j < measured_points->size(); j++) {
for (uint32_t j = 0; j < measured_points->size(); j++) {
if (measured_points->at(j).src_blob.btype ==
BLOB_TYPE_FRONT &&
......@@ -1090,11 +1099,11 @@ disambiguate(TrackerPSVR &t,
}
// useful for debugging
/*printf(
printf(
"match %d dist to last: %f dist to imu: %f "
"rmsError: %f squaredSum:%f %d\n",
i, prev_diff, imu_diff, avgError, errorSum,
ignore);*/
i, prev_diff, imu_diff, avg_error, error_sum,
ignore);
}
if (avg_error <= lowest_error && !ignore) {
lowest_error = avg_error;
......@@ -1236,7 +1245,7 @@ do_view(TrackerPSVR &t, View &view, cv::Mat &grey, cv::Mat &rgb)
cv::INTER_NEAREST, // interpolation - LINEAR seems
// very slow on my setup
cv::BORDER_CONSTANT, // borderMode
cv::Scalar(0, 255, 0)); // borderValue
cv::Scalar(0, 0, 0)); // borderValue
cv::threshold(view.frame_undist_rectified, // src
view.frame_undist_rectified, // dst
......@@ -1601,7 +1610,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// find closest point on same-ish scanline
float xdiff = r_blob.pt.x - l_blob.pt.x;
float ydiff = r_blob.pt.y - l_blob.pt.y;
if ((ydiff < 1.0f) && (ydiff > -1.0f) &&
if ((ydiff < 3.0f) && (ydiff > -3.0f) &&
(xdiff < 0 && abs(xdiff) < lowest_dist)) {
lowest_dist = abs(xdiff);
r_index = j;
......@@ -1635,8 +1644,8 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// Divide by scale to get 3D vector from
// homogeneous coordinate. invert here too.
blob_point_t bp;
bp.p = cv::Point3f(1.0f * h_world[0] / h_world[3],
1.0f * h_world[1] / h_world[3],
bp.p = cv::Point3f(-1.0f * h_world[0] / h_world[3],
-1.0f * h_world[1] / h_world[3],
-1.0f * (h_world[2] / h_world[3])) *
PSVR_MODEL_SCALE_FACTOR;
bp.lkp = t.l_blobs[i];
......@@ -1671,9 +1680,9 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// uncomment to debug 'overpruning' or other issues
// that may be related to calibration scale
// printf("world points: %d pruned points: %d merged points %d\n",
// t.world_points.size(), t.pruned_points.size(),
// t.merged_points.size());
printf("world points: %d pruned points: %d merged points %d\n",
t.world_points.size(), t.pruned_points.size(),
t.merged_points.size());
// put our blob positions in a slightly more
......@@ -1786,6 +1795,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
t.target_optical_rotation_correction = correction;
t.done_correction = true;
printf("LOCKED\n");
t.max_correction = PSVR_FAST_CORRECTION;
t.bad_correction_count = 0;
}
if (t.avg_optical_correction > PSVR_BAD_CORRECTION_THRESH) {
......@@ -1793,9 +1803,10 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
}
if (t.bad_correction_count > PSVR_MAX_BAD_CORR) {
t.max_correction = PSVR_SLOW_CORRECTION;
t.target_optical_rotation_correction =
t.target_optical_rotation_correction.slerp(
PSVR_MAX_CORRECTION, correction);
t.max_correction, correction);
t.bad_correction_count = 0;
printf("TOO MANY BAD CORRECTIONS. DRIFTED?\n");
}
......@@ -1814,7 +1825,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// immediately to smooth things out.
t.optical_rotation_correction = t.optical_rotation_correction.slerp(
PSVR_MAX_CORRECTION, t.target_optical_rotation_correction);
t.max_correction, t.target_optical_rotation_correction);
#ifdef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
fprintf(t.dump_file, "\n");
......@@ -1847,7 +1858,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
}
if (t.last_vertices.size() > 0) {
filter_update(&t.last_vertices, t.track_filters, dt);
filter_update(&t.last_vertices, t.track_filters, dt / 1000.0f);
}
......@@ -1861,7 +1872,7 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
// correction at this time. We can update our
// position now.
Eigen::Vector4f filtered_pose;
pose_filter_predict(&filtered_pose, &t.pose_filter, dt);
pose_filter_predict(&filtered_pose, &t.pose_filter, dt / 1000.0f);
// get our 'working space' coords back to the original
// coordinate system.
......@@ -1872,8 +1883,8 @@ process(TrackerPSVR &t, struct xrt_frame *xf)
filtered_pose *= inv_model_scale_factor;
t.optical.pos.x = -filtered_pose.x();
t.optical.pos.y = -filtered_pose.y();
t.optical.pos.z = filtered_pose.z();
t.optical.pos.y = filtered_pose.y();
t.optical.pos.z = -filtered_pose.z();
t.last_frame = xf->source_sequence;
......@@ -2168,15 +2179,15 @@ t_psvr_create(struct xrt_frame_context *xfctx,
t.corrected_imu_rotation = Eigen::Matrix4f().Identity();
t.avg_optical_correction = 10.0f; // initialise to a high value, so we
// can converge to a low one.
t.max_correction = PSVR_FAST_CORRECTION;
t.bad_correction_count = 0;
Eigen::Quaternionf align(Eigen::AngleAxis<float>(
-M_PI / 2, Eigen::Vector3f(0.0f, 0.0f, 1.0f)));
Eigen::Quaternionf align2(
Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 0.0f, 1.0f)));
Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 1.0f, 0.0f)));
t.axis_align_rot = align;
t.axis_align_rot = align2 * align;
t.last_optical_model = 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment