Commit c8b0db7a authored by Pete Black's avatar Pete Black

add 2D blob tracks to debug output

parent dd78aab8
Pipeline #213480 failed with stages
in 1 minute and 27 seconds
......@@ -59,7 +59,7 @@
#define EXPMONO_Y_MIN 70
#define EXPMONO_Y_MAX 255
#define EXPMONO_BLOB_MERGE_THRESH 16
#define EXPMONO_BLOB_MERGE_THRESH 20
#define EXPMONO_BLOB_SIZE_THRESH 32
#define EXPMONO_BBOX_RATIO_THRESH 20
......@@ -223,7 +223,7 @@ struct exp_mono_hmd
FILE *raw_file;
std::vector<pix_span_t> cur_spans;
std::vector<blob_extent_t> last_blobs;
std::vector<object_blob_t> last_blobs;
std::vector<blob_extent_t> cur_blobs;
pthread_mutex_t process_lock;
......@@ -233,6 +233,8 @@ struct exp_mono_hmd
int calibration_count;
std::vector<std::vector<cv::Vec3f>> calib_object_points;
std::vector<std::vector<cv::Vec2f>> calib_image_points;
std::vector<cv::Point2f> blob_tracks[EXPMONO_MAX_BLOB_COUNT];
};
......@@ -389,7 +391,11 @@ magnitudeVec2(Eigen::Vector2f v)
return (sqrtf((v.x() * v.x()) + (v.y() * v.y())));
}
float
magnitudeVec2(cv::Point2f v)
{
return (sqrtf((v.x * v.x) + (v.y * v.y)));
}
bool
optical_pose(struct exp_mono_hmd *emh,
......@@ -816,8 +822,6 @@ create_spans(struct exp_mono_hmd *emh, uint32_t width, uint32_t height)
temp_span.ex = j;
}
} else {
if (started_span) {
temp_span.l =
......@@ -878,7 +882,7 @@ spans_to_blobs(struct exp_mono_hmd *emh)
temp_blob.bry = sp->y;
temp_blob.train_id = sp->train_id;
emh->cur_blobs.push_back(temp_blob);
sp->blob_id = emh->cur_blobs.size();
sp->blob_id = -1;
} else {
......@@ -1011,6 +1015,13 @@ analyse_blobs(struct exp_mono_hmd *emh, bool train)
}
}
void
reproject_pose(struct exp_mono_hmd *emh,
Eigen::Matrix4f pose,
std::vector<object_blob_t> *blobs)
{}
void
process_frame(struct exp_mono_hmd *emh,
uint32_t width,
......@@ -1046,6 +1057,8 @@ process_frame(struct exp_mono_hmd *emh,
// draw some debug info
// undist in red, rectified in cyan
if (emh->debug.rgb->cols > 0) {
for (uint32_t i = 0; i < emh->cur_blobs.size(); i++) {
blob_extent_t *b = &emh->cur_blobs.at(i);
......@@ -1121,20 +1134,6 @@ process_frame(struct exp_mono_hmd *emh,
if (abs(b->size.width * b->size.height) >
EXPMONO_BLOB_SIZE_THRESH) {
/*fprintf(emh->metadata_file,
"%d,%d,%f,%f,%d,%d,%d,%d,%d,%d,%d,%d,%"
"d,%d\n",
frame_id, i,
b->rectified_angle_from_centroid,
b->rectified_rect_angle,
num_blobs_lt, num_blobs_rt,
num_blobs_up, num_blobs_dn,
num_blobs_simangle_lt,
num_blobs_simangle_rt,
num_blobs_simangle_up,
num_blobs_simangle_dn,
blobs.size(), b->train_id);*/
if (b->train_id == 0) {
ignore = true;
}
......@@ -1143,47 +1142,27 @@ process_frame(struct exp_mono_hmd *emh,
data += std::to_string(num_blobs_up);
data += std::to_string(num_blobs_dn);
desc += std::to_string(b->train_id);
}
if (emh->create_sequence_data) {
char outfilename[128];
sprintf(outfilename, "/tmp/out_y%d.jpg", frame_id);
// cv::imwrite(outfilename,
// emh->frame_y);
// this set of angles for each of the
// 'lengthwise' extremeities of the
// blobs should allow us to discern
// which is which, and shhould be a
// workable input to a neural network
}
// we can use the center of the blob.
// for now we can just rely on opencv
// but ideally we would write a rotating
// calipers implementation that wo
// printf("\n");
}
}
if (emh->create_sequence_data && !ignore) {
char s[128];
sprintf(s, "%s,%s", data.c_str(), desc.c_str());
emh->tempdb.push_back(s);
for (uint32_t j = 0; j < EXPMONO_MAX_BLOB_COUNT * 4; j++) {
if (j >= data.size()) {
for (uint32_t i = 0; i < EXPMONO_MAX_BLOB_COUNT * 4; i++) {
if (i >= data.size()) {
fprintf(emh->raw_file, "-1,");
} else {
fprintf(emh->raw_file, "%c,", (data[j]));
fprintf(emh->raw_file, "%c,", (data[i]));
}
}
for (uint32_t j = 0; j < 6; j++) {
if (j >= desc.size()) {
for (uint32_t i = 0; i < 6; i++) {
if (i >= desc.size()) {
fprintf(emh->raw_file, "-1,");
} else {
fprintf(emh->raw_file, "%c,", (desc[j]));
fprintf(emh->raw_file, "%c,", (desc[i]));
}
}
fprintf(emh->raw_file, "\n");
......@@ -1194,8 +1173,8 @@ process_frame(struct exp_mono_hmd *emh,
if (train) {
// solve point correspondences with the trained ids
for (uint32_t j = 0; j < emh->cur_blobs.size(); j++) {
blob_extent_t *b = &emh->cur_blobs.at(j);
for (uint32_t i = 0; i < emh->cur_blobs.size(); i++) {
blob_extent_t *b = &emh->cur_blobs.at(i);
object_blob_t ob;
ob.model_rect = b->train_id - 1;
ob.screen_pos_center = b->undist_rect_center;
......@@ -1251,7 +1230,7 @@ process_frame(struct exp_mono_hmd *emh,
printf("FOUND %d DESC: %s for DATA %s\n", found_count,
iter->second.c_str(), data.c_str());
// construct our ob_blobs
for (uint32_t j = 0; j < found_count; j++) {
for (uint32_t i = 0; i < found_count; i++) {
int c = 0;
ob_blobs.clear();
for (std::string::iterator it =
......@@ -1267,9 +1246,10 @@ process_frame(struct exp_mono_hmd *emh,
printf(
"MATCH %d ASSOC: "
"%d with %f %f\n",
j, blob_id, ob.screen_pos_center.x,
i, blob_id, ob.screen_pos_center.x,
ob.screen_pos_center.y);
ob.blob = *b;
ob.blob.blob_id = blob_id;
ob_blobs.push_back(ob);
c++;
}
......@@ -1281,64 +1261,114 @@ process_frame(struct exp_mono_hmd *emh,
}
iter++;
}
if (found_count == 1) {
if (emh->debug.rgb->cols > 0) {
for (uint32_t j = 0;
j < ob_blobs.size(); j++) {
char label[32];
sprintf(label, "%d", j);
cv::putText(
emh->debug.rgb[0], label,
ob_blobs[j]
.screen_pos_center,
cv::FONT_HERSHEY_SIMPLEX,
1.0,
cv::Scalar(128, 128, 128));
// if (emh->last_blobs.size() == ob_blobs.size()) {
// we have the same number of blobs as the
// previous frame, it is likely we can resolve
// their ids based on distance
for (uint32_t i = 0; i < ob_blobs.size(); i++) {
object_blob_t *b = &ob_blobs.at(i);
int closest_blob_id = -1;
float closest_blob_dist = 65535.0f;
for (uint32_t j = 0; j < emh->last_blobs.size();
j++) {
blob_extent_t *lb =
&emh->last_blobs.at(j).blob;
float blob_dist = magnitudeVec2(
b->blob.undist_rect_center -
lb->undist_rect_center);
if (blob_dist < closest_blob_dist) {
closest_blob_dist = blob_dist;
closest_blob_id = j;
}
}
if (closest_blob_dist < 40) {
emh->blob_tracks[b->blob.blob_id]
.push_back(
b->blob.undist_rect_center);
b->blob.blob_id = closest_blob_id;
}
}
}
//}
// if (found_count == 1) {
if (emh->debug.rgb->cols > 0) {
for (uint32_t i = 0; i < EXPMONO_MAX_BLOB_COUNT; i++) {
cv::Point2f last;
for (uint32_t j = 0;
j < emh->blob_tracks[i].size(); j++) {
if (j == 0) {
if (emh->calibration && emh->calibration_count < 10 &&
found_count == 1 && ob_blobs.size() > 4) {
// we can collect a point
// correspondence to do
// calibration
std::vector<cv::Vec2f> image_points;
std::vector<cv::Vec3f> obj_points;
for (uint32_t j = 0; j < ob_blobs.size(); j++) {
object_blob_t *ob = &ob_blobs.at(j);
image_points.push_back(
ob->screen_pos_center);
obj_points.push_back(
emh->model_rects[ob->model_rect].c);
last = emh->blob_tracks[i][j];
}
cv::line(emh->debug.rgb[0], last,
emh->blob_tracks[i][j],
cv::Scalar(96, 160, 96));
last = emh->blob_tracks[i][j];
}
emh->calib_image_points.push_back(image_points);
emh->calib_object_points.push_back(obj_points);
emh->calibration_count += 1;
printf(
"COLLECTING CALIBRATION "
"%d\n",
emh->calibration_count);
}
if (emh->calibration_count > 9) {
emh->calibration = false;
cv::calibrateCamera(
emh->calib_object_points,
emh->calib_image_points, cv::Size(640, 480),
emh->intrinsic, emh->distortion,
cv::noArray(), cv::noArray());
std::cout
<< "CALIB INTRINSICS: " << emh->intrinsic
<< "\n"
<< "CALIB DIST: " << emh->distortion
<< "\n";
for (uint32_t i = 0; i < ob_blobs.size(); i++) {
char label[32];
sprintf(label, "%d", ob_blobs[i].blob.blob_id);
cv::putText(emh->debug.rgb[0], label,
ob_blobs[i].screen_pos_center,
cv::FONT_HERSHEY_SIMPLEX, 1.0,
cv::Scalar(128, 128, 128));
}
for (uint32_t i = 0; i < ob_blobs.size(); i++) {
char label[32];
sprintf(label, "%d", ob_blobs[i].blob.blob_id);
cv::putText(emh->debug.rgb[0], label,
ob_blobs[i].screen_pos_center,
cv::FONT_HERSHEY_SIMPLEX, 1.0,
cv::Scalar(128, 128, 128));
}
}
//}
emh->last_blobs = ob_blobs;
if (emh->calibration && emh->calibration_count < 10 &&
found_count == 1 && ob_blobs.size() > 4) {
// we can collect a point
// correspondence to do
// calibration
std::vector<cv::Vec2f> image_points;
std::vector<cv::Vec3f> obj_points;
for (uint32_t i = 0; i < ob_blobs.size(); i++) {
object_blob_t *ob = &ob_blobs.at(i);
image_points.push_back(ob->screen_pos_center);
obj_points.push_back(
emh->model_rects[ob->model_rect].c);
}
emh->calib_image_points.push_back(image_points);
emh->calib_object_points.push_back(obj_points);
emh->calibration_count += 1;
printf(
"COLLECTING CALIBRATION "
"%d\n",
emh->calibration_count);
}
if (emh->calibration_count > 9) {
emh->calibration = false;
cv::calibrateCamera(
emh->calib_object_points, emh->calib_image_points,
cv::Size(640, 480), emh->intrinsic, emh->distortion,
cv::noArray(), cv::noArray());
std::cout << "CALIB INTRINSICS: " << emh->intrinsic
<< "\n"
<< "CALIB DIST: " << emh->distortion << "\n";
}
}
}
void
recv_frame(struct xrt_frame_sink *xsink, struct xrt_frame *xf)
{
......@@ -1349,16 +1379,16 @@ recv_frame(struct xrt_frame_sink *xsink, struct xrt_frame *xf)
xf->stereo_format = XRT_STEREO_FORMAT_NONE;
parent->debug.refresh(xf);
// we receive YUYV frames from our camera, and split them into a
// 'luminance plane' and a 'combined chrominance' plane, for easier
// sampling
// 'luminance plane' and a 'combined chrominance' plane, for
// easier sampling
cv::Mat frame = cv::Mat(xf->height, xf->width, CV_8UC2, xf->data);
cv::Mat *channels[] = {&parent->frame_y, &parent->frame_uv};
cv::split(frame, channels[0]);
if (parent->create_sequence_debug_images) {
// we may wish to dump out raw images we capture from the camera
// for subsequent mark-up to generate training data (currently
// not done)
// we may wish to dump out raw images we capture from
// the camera for subsequent mark-up to generate
// training data (currently not done)
char img_file_name[64];
sprintf(img_file_name, "/tmp/exp_mono_%dx%d_%04d.yuyv",
xf->width, xf->height, parent->image_sequence_id);
......@@ -1442,6 +1472,13 @@ run_sequence_debug(void *_emh)
int f_height = 480;
while (1) {
// clear our blob traces
for (uint32_t i = 0; i < EXPMONO_MAX_BLOB_COUNT; i++) {
emh->blob_tracks[i].clear();
}
for (uint32_t i = 0; i < seq_count; i++) {
sprintf(img_file_name,
"/tmp/"
......@@ -1457,7 +1494,7 @@ run_sequence_debug(void *_emh)
fr.data = (uint8_t *)malloc(fr.size);
FILE *f = fopen(img_file_name, "r");
if (f) {
fread(fr.data, 640 * 480 * 2, 1, f);
fread(fr.data, fr.size, 1, f);
fclose(f);
recv_frame(&emh->frame_sink, &fr);
usleep(30000);
......@@ -1498,8 +1535,8 @@ exp_mono_hmd_create(struct xrt_prober *xp)
pthread_mutex_init(&emh->process_lock, NULL);
// Default Setup info - this driver is not currently associated with a
// specific HMD.
// Default Setup info - this driver is not currently associated
// with a specific HMD.
struct u_device_simple_info info;
info.display.w_pixels = 1920;
info.display.h_pixels = 1080;
......@@ -1626,8 +1663,8 @@ exp_mono_hmd_create(struct xrt_prober *xp)
// analyse them with the tags encoded in the
// colour blobs
emh->load_training_images = false;
// create sequence data - write out our 'training' as a header
// file populating the map
// create sequence data - write out our 'training' as a
// header file populating the map
emh->create_sequence_data = false;
// we can dump a live video sequence as images
......@@ -1638,9 +1675,9 @@ exp_mono_hmd_create(struct xrt_prober *xp)
emh->use_sequence_debug_images = true;
// we can attempt to calibrate camera intrinsics and distortion
// using the HMD itself as a calibration target, rather than a
// chessboard.
// we can attempt to calibrate camera intrinsics and
// distortion using the HMD itself as a calibration
// target, rather than a chessboard.
emh->calibration = false;
emh->calibration_count = 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment