...
 
Commits (46)
......@@ -24,10 +24,9 @@ find_package(Vulkan REQUIRED)
find_package(OpenGL REQUIRED COMPONENTS GLX)
find_package(HIDAPI)
find_package(OpenHMD)
find_package(OpenCV)
# @TODO Turn into a find_package LIBUSB-1.0 file.
pkg_check_modules(LIBUSB REQUIRED libusb-1.0)
find_package(OpenCV COMPONENTS core calib3d highgui imgproc imgcodecs features2d video)
find_package(Libusb1)
find_package(JPEG)
# @TODO Turn into a find_package LIBUVC file.
pkg_check_modules(LIBUVC libuvc)
......@@ -35,6 +34,7 @@ pkg_check_modules(LIBUVC libuvc)
# @TODO Turn into a find_package FFMPEG file.
pkg_check_modules(FFMPEG libavcodec)
find_package(uvbi)
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
# Compositor backend
......@@ -51,6 +51,8 @@ cmake_dependent_option(BUILD_WITH_XLIB "Enable xlib support" ON "X11_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_OPENGL "Enable OpenGL Graphics API support" ON "OPENGL_FOUND" OFF)
set(BUILD_WITH_LIBUSB TRUE)
cmake_dependent_option(BUILD_WITH_JPEG "Enable jpeg code (used for some video drivers)" ON "JPEG_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_UVBI "Enable UVBI-based optical tracking driver" ON "LIBUVC_FOUND AND uvbi_FOUND AND OPENCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_OPENCV "Enable OpenCV backend" ON "OpenCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_LIBUVC "Enable libuvc video driver" ON "LIBUVC_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_FFMPEG "Enable ffmpeg testing video driver" ON "FFMPEG_FOUND" OFF)
......@@ -83,6 +85,10 @@ if(BUILD_WITH_OPENCV)
add_definitions(-DXRT_HAVE_OPENCV)
endif()
if(BUILD_WITH_JPEG)
add_definitions(-DXRT_HAVE_JPEG)
endif()
if(BUILD_WITH_LIBUVC)
add_definitions(-DXRT_HAVE_LIBUVC)
endif()
......@@ -115,6 +121,17 @@ if(TRUE)
set(BUILD_DRIVER_PSMV TRUE)
endif()
if(BUILD_WITH_UVBI)
add_definitions(-DXRT_HAVE_UVBI)
endif()
if(BUILD_WITH_OPENCV AND (BUILD_WITH_FFMPEG OR BUILD_WITH_JPEG))
# Condition for enabling the montrack optical tracking driver.
# JPEG required for both UVC and v4l2 backends.
add_definitions(-DXRT_BUILD_MONTRACK)
set(BUILD_DRIVER_MONTRACK TRUE)
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -Wall -Wextra -Wno-unused-parameter")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unused-parameter")
......
# - try to find libusb-1 library
#
# Cache Variables: (probably not for direct use in your scripts)
# LIBUSB1_LIBRARY
# LIBUSB1_INCLUDE_DIR
#
# Non-cache variables you should use in your CMakeLists.txt:
# LIBUSB1_LIBRARIES
# LIBUSB1_INCLUDE_DIRS
# LIBUSB1_FOUND - if this is not true, do not attempt to use this library
#
# Requires these CMake modules:
# ProgramFilesGlob
# FindPackageHandleStandardArgs (known included with CMake >=2.6.2)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
set(LIBUSB1_ROOT_DIR
"${LIBUSB1_ROOT_DIR}"
CACHE
PATH
"Root directory to search for libusb-1")
if(WIN32)
include(ProgramFilesGlob)
program_files_fallback_glob(_dirs "LibUSB-Win32")
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
if(MSVC)
set(_lib_suffixes lib/msvc_x64 MS64/static)
endif()
else()
if(MSVC)
set(_lib_suffixes lib/msvc MS32/static)
elseif(COMPILER_IS_GNUCXX)
set(_lib_suffixes lib/gcc)
endif()
endif()
else()
set(_lib_suffixes)
find_package(PkgConfig QUIET)
if(PKG_CONFIG_FOUND)
pkg_check_modules(PC_LIBUSB1 libusb-1.0)
endif()
endif()
find_path(LIBUSB1_INCLUDE_DIR
NAMES
libusb.h
PATHS
${PC_LIBUSB1_INCLUDE_DIRS}
${PC_LIBUSB1_INCLUDEDIR}
${_dirs}
HINTS
"${LIBUSB1_ROOT_DIR}"
PATH_SUFFIXES
include/libusb-1.0
include
libusb-1.0)
find_library(LIBUSB1_LIBRARY
NAMES
libusb-1.0
usb-1.0
PATHS
${PC_LIBUSB1_LIBRARY_DIRS}
${PC_LIBUSB1_LIBDIR}
${_dirs}
HINTS
"${LIBUSB1_ROOT_DIR}"
PATH_SUFFIXES
${_lib_suffixes})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Libusb1
DEFAULT_MSG
LIBUSB1_LIBRARY
LIBUSB1_INCLUDE_DIR)
if(LIBUSB1_FOUND)
set(LIBUSB1_LIBRARIES "${LIBUSB1_LIBRARY}")
set(LIBUSB1_INCLUDE_DIRS "${LIBUSB1_INCLUDE_DIR}")
mark_as_advanced(LIBUSB1_ROOT_DIR)
endif()
mark_as_advanced(LIBUSB1_INCLUDE_DIR LIBUSB1_LIBRARY)
......@@ -8,6 +8,7 @@ set(MATH_SOURCE_FILES
math/m_hash.cpp
math/m_optics.c
math/m_quatexpmap.cpp
math/m_track.c
)
set(OS_SOURCE_FILES
......
......@@ -307,6 +307,19 @@ math_compute_fovs(double w_total,
double vertfov_total,
struct xrt_fov *fov);
void
math_euler_to_quat(struct xrt_vec3 euler, struct xrt_quat* q);
int
math_min(int a, int b);
int
math_max(int a, int b);
float math_quat_magnitude(struct xrt_quat q);
float math_distance(struct xrt_vec3 a, struct xrt_vec3 b);
#ifdef __cplusplus
}
#endif
......@@ -424,7 +424,7 @@ math_relation_openxr_locate(const struct xrt_pose* space_pose,
math_relation_accumulate_relation(relative_relation,
&accumulating_relation);
// Apply the space pose.
// Apply the space pose.
math_relation_accumulate_transform(&spc, &accumulating_relation);
*result = accumulating_relation;
......
......@@ -110,3 +110,16 @@ position(struct xrt_pose& pose)
{
return map_vec3(pose.position);
}
/*!
* @brief Wrap an internal 4x4 matrix struct in an Eigen type, non-const
* overload.
*
* Permits zero-overhead manipulation of `xrt_matrix_4x4&` by Eigen routines as
* if it were a `Eigen::Matrix4f&`.
*/
static inline Eigen::Map<Eigen::MatrixXf>
map_mat4x4(struct xrt_matrix_4x4& m)
{
return Eigen::Map<Eigen::MatrixXf>(m.v,4,4);
}
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Functions related to tracking.
* @author Pete Black <pete.black@collabora.com>
* @ingroup aux_math
*/
#include "m_api.h"
#include "util/u_debug.h"
#include <math.h>
#include <assert.h>
#include <stdio.h>
void
math_euler_to_quat(struct xrt_vec3 euler, struct xrt_quat* q)
{
double c_yaw = cos(euler.z * 0.5);
double s_yaw = sin(euler.z * 0.5);
double c_pitch = cos(euler.x * 0.5);
double s_pitch = sin(euler.x * 0.5);
double c_roll = cos(euler.y * 0.5);
double s_roll = sin(euler.y * 0.5);
q->w = c_yaw * c_pitch * c_roll + s_yaw * s_pitch * s_roll;
q->x = c_yaw * c_pitch * s_roll - s_yaw * s_pitch * c_roll;
q->y = s_yaw * c_pitch * s_roll + c_yaw * s_pitch * c_roll;
q->z = s_yaw * c_pitch * c_roll - c_yaw * s_pitch * s_roll;
}
int
math_min(int a, int b) {
if (a > b) {
return b;
}
return a;
}
int
math_max(int a, int b) {
if (a > b) {
return a;
}
return b;
}
float math_quat_magnitude(struct xrt_quat q) {
return sqrt(q.x * q.x + q.y*q.y + q.z*q.z + q.w*q.w);
}
float
math_distance(struct xrt_vec3 a, struct xrt_vec3 b) {
struct xrt_vec3 d;
d.x = a.x - b.x;
d.y = a.y - b.y;
d.z = a.z - b.z;
return sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
}
......@@ -214,12 +214,12 @@ vk_swapchain_select_extent(struct vk_swapchain *sc,
static void
vk_swapchain_destroy_old(struct vk_swapchain *sc, VkSwapchainKHR old)
{
for (uint32_t i = 0; i < sc->image_count; i++) {
sc->vk->vkDestroyImageView(sc->vk->device, sc->buffers[i].view,
NULL);
}
for (uint32_t i = 0; i < sc->image_count; i++) {
sc->vk->vkDestroyImageView(sc->vk->device, sc->buffers[i].view,
NULL);
}
sc->vk->vkDestroySwapchainKHR(sc->vk->device, old, NULL);
sc->vk->vkDestroySwapchainKHR(sc->vk->device, old, NULL);
}
VkResult
......
......@@ -5,8 +5,13 @@
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}/montrack
${CMAKE_CURRENT_SOURCE_DIR}/montrack/frameservers/common
)
if(BUILD_DRIVER_MONTRACK)
add_subdirectory(montrack)
endif()
if(BUILD_DRIVER_HDK)
set(HDK_SOURCE_FILES
......
set (MONTRACK_SOURCE_FILES
mt_device.c
mt_device.h
mt_interface.h
mt_prober.c
mt_events.h
mt_framequeue.h
mt_framequeue.c
)
add_subdirectory(frameservers)
add_subdirectory(filters)
add_subdirectory(optical_tracking)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(drv_montrack OBJECT ${MONTRACK_SOURCE_FILES}
$<TARGET_OBJECTS:frameserver>
$<TARGET_OBJECTS:filter>
$<TARGET_OBJECTS:optical_tracking>
)
set_property(TARGET drv_montrack PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(drv_montrack SYSTEM
PRIVATE frameservers
PRIVATE optical_tracking
PRIVATE filters)
target_link_libraries (drv_montrack frameserver filter optical_tracking)
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FILTER_SOURCE_FILES
common/filter.h
common/filter.c
common/measurementqueue.h
common/measurementqueue.c
filter_opencv_kalman.cpp
filter_opencv_kalman.h
filter_complementary.c
filter_complementary.h
)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(filter OBJECT ${FILTER_SOURCE_FILES})
set_property(TARGET filter PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(filter SYSTEM
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}../
PRIVATE ${OpenCV_INCLUDE_DIRS}
)
#include "filter.h"
#include "filter_complementary.h"
#include "filter_opencv_kalman.h"
#include <string.h>
#include "util/u_misc.h"
filter_instance_t*
filter_create(filter_type_t t)
{
filter_instance_t* i = U_TYPED_CALLOC(filter_instance_t);
if (i) {
switch (t) {
case FILTER_TYPE_OPENCV_KALMAN:
i->tracker_type = t;
i->filter_configure = filter_opencv_kalman_configure;
i->filter_get_state = filter_opencv_kalman_get_state;
i->filter_predict_state =
filter_opencv_kalman_predict_state;
i->filter_set_state = filter_opencv_kalman_set_state;
i->filter_queue = filter_opencv_kalman_queue;
i->internal_instance = filter_opencv_kalman_create(i);
i->measurement_queue = measurement_queue_create();
break;
case FILTER_TYPE_COMPLEMENTARY:
i->tracker_type = t;
i->filter_configure = filter_complementary_configure;
i->filter_get_state = filter_complementary_get_state;
i->filter_predict_state =
filter_complementary_predict_state;
i->filter_set_state = filter_complementary_set_state;
i->filter_queue = filter_complementary_queue;
i->internal_instance = filter_complementary_create(i);
i->measurement_queue = measurement_queue_create();
break;
case FILTER_TYPE_NONE:
default:
free(i);
return NULL;
break;
}
return i;
}
return NULL;
}
bool
filters_test()
{
// create a filter
filter_instance_t* filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
if (!filter) {
return false;
}
return true;
}
#ifndef FILTER_H
#define FILTER_H
#include <xrt/xrt_defines.h>
#include <../auxiliary/util/u_time.h>
#include <optical_tracking/common/tracker.h>
#include "measurementqueue.h"
typedef void* filter_instance_ptr;
typedef void* filter_internal_instance_ptr;
typedef void* filter_configuration_ptr;
typedef void* filter_state_ptr;
typedef struct filter_state
{
struct xrt_pose pose;
bool has_position;
bool has_rotation;
struct xrt_vec3 rotation_euler;
struct xrt_vec3 velocity;
struct xrt_vec3 acceleration;
struct xrt_quat angular_velocity;
struct xrt_quat angular_accel;
int64_t timestamp;
} filter_state_t;
typedef enum filter_type
{
FILTER_TYPE_NONE,
FILTER_TYPE_OPENCV_KALMAN,
FILTER_TYPE_COMPLEMENTARY
} filter_type_t;
typedef struct _filter_instance
{
filter_type_t tracker_type;
bool (*filter_queue)(filter_instance_ptr inst,
tracker_measurement_t* measurement);
bool (*filter_set_state)(filter_instance_ptr inst,
filter_state_ptr state);
bool (*filter_get_state)(filter_instance_ptr inst,
filter_state_ptr state);
bool (*filter_predict_state)(filter_instance_ptr inst,
filter_state_t* state,
timepoint_ns time);
bool (*filter_configure)(filter_instance_ptr inst,
filter_configuration_ptr config);
filter_internal_instance_ptr internal_instance;
measurement_queue_t* measurement_queue;
} filter_instance_t;
filter_instance_t*
filter_create(filter_type_t t);
bool
filter_destroy(filter_instance_t* inst);
bool
filters_test();
#endif // FILTER_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <util/u_misc.h>
#include "measurementqueue.h"
measurement_queue_t* measurement_queue_create()
{
measurement_queue_t* mq = U_TYPED_CALLOC(measurement_queue_t);
if (! mq) {
printf("ERROR: could not malloc!\n");
exit(0);
}
mq->measurements.items=U_TYPED_ARRAY_CALLOC(tracker_measurement_t,MEASUREMENTQUEUE_INITIAL_CAPACITY);
mq->measurements.capacity = MEASUREMENTQUEUE_INITIAL_CAPACITY;
mq->measurements.size = 0;
mq->measurements.head_index = 0;
mq->measurements.tail_index = 0;
mq->measurements.last_optical_index = 0;
pthread_mutex_init(&mq->queue_lock,NULL);
mq->source_id_counter = 0;
mq->timestamp_inc=0;
return mq;
}
void measurement_queue_destroy(measurement_queue_t* mq)
{
free(mq->measurements.items);
}
uint64_t measurement_queue_uniq_source_id(measurement_queue_t* mq) {
//pthread_mutex_lock(&mq->queue_lock);
return mq->source_id_counter++;
//pthread_mutex_unlock(&mq->queue_lock);
}
void measurement_queue_add(measurement_queue_t* mq,tracker_measurement_t* tm)
{
if (!mq) {
return;
}
bool full = false;
pthread_mutex_lock(&mq->queue_lock);
mq->measurements.items[mq->measurements.tail_index] = *tm;
if (tm->flags & MEASUREMENT_OPTICAL) {
mq->measurements.last_optical_index = mq->measurements.tail_index;
}
//printf("added data at index %d head %d \n",mq->measurements.tail_index, mq->measurements.head_index);
mq->measurements.tail_index++;
if (mq->measurements.tail_index == mq->measurements.capacity) {
mq->measurements.tail_index = 0;
}
if (mq->measurements.size >= mq->measurements.capacity) {
full = true;
}
if (full) {
if ((mq->measurements.head_index + 1) == mq->measurements.capacity) {
mq->measurements.head_index = 0;
} else {
mq->measurements.head_index++;}
} else {
mq->measurements.size++;
}
//printf("measurements size %d\n",mq->measurements.size);
pthread_mutex_unlock(&mq->queue_lock);
}
//NOTE: we alloc here, caller must free.
uint32_t measurement_queue_get_since_last_frame(measurement_queue_t* mq,uint32_t source_id,tracker_measurement_t** cm){
pthread_mutex_lock(&mq->queue_lock);
//iterate over our measurements, count the number of records we will copy
//our starting timestamp is the one on our last frame
uint64_t ts = mq->measurements.items[mq->measurements.last_optical_index].source_timestamp;
uint32_t count=0;
for (uint32_t i=0;i<mq->measurements.size;i++)
if (mq->measurements.items[i].source_timestamp > ts) {
count++;
}
*cm = U_TYPED_ARRAY_CALLOC(tracker_measurement_t,count);
count =0;
for (uint32_t i=0;i<mq->measurements.size;i++) {
if (mq->measurements.items[i].source_timestamp > ts) {
memcpy(*cm+count,&mq->measurements.items[i],sizeof(tracker_measurement_t));
count++;
}
}
pthread_mutex_unlock(&mq->queue_lock);
return count;
}
uint32_t measurement_queue_get_since_timestamp(measurement_queue_t* mq,uint32_t source_id,int64_t timestamp_us,tracker_measurement_t** cm) {
pthread_mutex_lock(&mq->queue_lock);
//iterate over our measurements, count the number of records we will copy
//our starting timestamp is the one on our last frame
uint32_t count=0;
for (uint32_t i=mq->measurements.head_index;i<mq->measurements.size;i++) {
if (mq->measurements.items[i].source_timestamp > timestamp_us) {
//printf("H %lld\n",mq->measurements.items[i].source_timestamp);
count++;
}
}
for (uint32_t i=0;i<mq->measurements.tail_index;i++) {
if (mq->measurements.items[i].source_timestamp > timestamp_us) {
//printf("0 %lld\n",mq->measurements.items[i].source_timestamp);
count++;
}
}
*cm = U_TYPED_ARRAY_CALLOC(tracker_measurement_t,count);
count =0;
for (uint32_t i=mq->measurements.head_index;i<mq->measurements.size;i++) {
if (mq->measurements.items[i].source_timestamp > timestamp_us) {
memcpy(*cm+count,&mq->measurements.items[i],sizeof(tracker_measurement_t));
count++;
}
}
for (uint32_t i=0;i<mq->measurements.tail_index;i++) {
if (mq->measurements.items[i].source_timestamp > timestamp_us) {
memcpy(*cm+count,&mq->measurements.items[i],sizeof(tracker_measurement_t));
count++;
}
}
pthread_mutex_unlock(&mq->queue_lock);
return count;
}
#ifndef VECTOR_H
#define VECTOR_H
#include <pthread.h>
#include <optical_tracking/common/tracker.h>
#define MEASUREMENTQUEUE_INITIAL_CAPACITY 8
#define MAX_MEASUREMENT_SOURCES 32
typedef struct measurement_ring {
tracker_measurement_t* items;
uint32_t size;
uint32_t capacity;
uint32_t head_index;
uint32_t tail_index;
uint32_t last_optical_index; //optimisation to avoid search
} measurement_ring_t;
typedef struct measurment_queue {
measurement_ring_t measurements;
uint64_t source_id_counter;
uint64_t timestamp_inc;
pthread_mutex_t queue_lock;
} measurement_queue_t;
measurement_queue_t* measurement_queue_create();
void measurement_queue_destroy(measurement_queue_t* mq);
//bool measurement_queue_get_latest_n(measurement_queue_t* mq,uint32_t source_id, uint32_t* count,tracker_measurement_t* cm); //used by consumers
uint32_t measurement_queue_get_since_last_frame(measurement_queue_t* mq,uint32_t source_id,tracker_measurement_t** cm);
uint32_t measurement_queue_get_since_timestamp(measurement_queue_t* mq,uint32_t source_id,int64_t timestamp_us,tracker_measurement_t** cm);
void measurement_queue_add(measurement_queue_t* mq, tracker_measurement_t* m); //used by producers
uint64_t measurement_queue_uniq_source_id(measurement_queue_t* mq); //used by producers
#endif
#include "filter_complementary.h"
#include "util/u_misc.h"
struct filter_complementary_instance_t
{
bool configured;
filter_complementary_configuration_t configuration;
filter_state_t last_state;
filter_state_t state;
float gyro_x_bias,gyro_y_bias,gyro_z_bias;
uint16_t avg_count;
float alpha;
bool running;
};
/*!
* Casts the internal instance pointer from the generic opaque type to our
* opencv_kalman internal type.
*/
static inline filter_complementary_instance_t*
filter_complementary_instance(filter_internal_instance_ptr ptr)
{
return (filter_complementary_instance_t*)ptr;
}
bool
filter_complementary__destroy(filter_instance_t* inst)
{
// do nothing
return false;
}
bool
filter_complementary_queue(filter_instance_t* inst,
tracker_measurement_t* measurement)
{
filter_complementary_instance_t* internal =
filter_complementary_instance(inst->internal_instance);
measurement_queue_add(inst->measurement_queue,measurement);
internal->running = true;
return true;
}
bool
filter_complementary_get_state(filter_instance_t* inst, filter_state_t* state)
{
filter_complementary_instance_t* internal =
filter_complementary_instance(inst->internal_instance);
if (!internal->running) {
return false;
}
tracker_measurement_t* measurement_array;
uint32_t count = measurement_queue_get_since_timestamp(inst->measurement_queue,0,internal->last_state.timestamp,&measurement_array);
float one_minus_bias = 1.0f - internal->configuration.bias;
int avg_max =8;
for (uint32_t i=0;i<count;i++) {
tracker_measurement_t* m = &measurement_array[i];
if (m->flags & ( MEASUREMENT_OPTICAL | MEASUREMENT_POSITION) ) {
//we can just stuff our position into our state for now, ignoring timestamps.
internal->state.pose.position = m->pose.position;
internal->state.pose.orientation = m->pose.orientation;
internal->state.timestamp = m->source_timestamp;
internal->last_state = internal->state;
}
if (m->flags & ( MEASUREMENT_IMU | MEASUREMENT_RAW_ACCEL | MEASUREMENT_RAW_GYRO) ) {
float dt = (m->source_timestamp - internal->last_state.timestamp) * 0.000001;
//printf("dt %f\n",dt);
if (dt > 1.0f) {
internal->last_state.timestamp = m->source_timestamp;
return false; //this is our first frame, or something has gone wrong... big dt will blow up calculations.
}
float raw_mag_accel = sqrt(m->accel.x * m->accel.x + m->accel.y * m->accel.y + m->accel.z * m->accel.z);
float mag_accel = raw_mag_accel * internal->configuration.accel_to_g;
//determine roll/pitch angles with gravity as a reference
float roll = atan2(-1.0f * m->accel.x,sqrt(m->accel.y * m->accel.y + m->accel.z * m->accel.z));
float pitch = atan2(m->accel.y, m->accel.z);
//assume that, if acceleration is only gravity, then any change in the gyro is drift and update compensation
//if acceleration magnitude is close to 1.0f, we assume its just gravity, and can integrate our gyro reading
//as drift compensation - we can assume controller is static at startup, and gather data then, or continuously
//sample
if (fabs(1.0f - mag_accel) < 0.05 ) { //looks like gravity only
//fill up the running average count as fast as possible, but subsequently
//only integrate measurements that are not outliers w/respect to the average
if (internal->avg_count < avg_max || fabs(m->gyro.z - internal->gyro_z_bias) < (internal->gyro_z_bias / 4)) {
if (internal->avg_count < avg_max) {
internal->avg_count++;
}
internal->gyro_x_bias = internal->gyro_x_bias + (m->gyro.x - internal->gyro_x_bias) / math_min(internal->avg_count, avg_max);
internal->gyro_y_bias = internal->gyro_y_bias + (m->gyro.y - internal->gyro_y_bias) / math_min(internal->avg_count, avg_max);
internal->gyro_z_bias = internal->gyro_z_bias + (m->gyro.z - internal->gyro_z_bias) / math_min(internal->avg_count, avg_max);
//printf("yaw correction: %f %f %f\n",internal->gyro_x_bias,internal->gyro_y_bias,internal->gyro_z_bias);
}
}
//printf("roll %f pitch %f yaw 0.0f,gbc,%f,%f,%f,axyz,%f,%f,%f,gxyz,%f,%f,%f,dt,%f\n",roll,pitch,internal->gyro_x_bias,internal->gyro_y_bias,internal->gyro_z_bias,m->accel.x,m->accel.y,m->accel.z,m->gyro.x,m->gyro.y,m->gyro.z,dt);;
internal->state.rotation_euler.z = internal->last_state.rotation_euler.z + (m->gyro.z - internal->gyro_z_bias) * internal->configuration.gyro_to_radians_per_second * dt;
//push back towards zero, as 'returning to 0 slowly' is better than 'just drift', probably
internal->state.rotation_euler.z -=internal->state.rotation_euler.z * internal->configuration.drift_z_to_zero * dt;
internal->state.rotation_euler.y = internal->last_state.rotation_euler.y + (one_minus_bias * ((m->gyro.y - internal->gyro_y_bias) * internal->configuration.gyro_to_radians_per_second * dt)) - internal->configuration.bias * (internal->last_state.rotation_euler.y - roll);
internal->state.rotation_euler.x = internal->last_state.rotation_euler.x + (one_minus_bias * ((m->gyro.x - internal->gyro_x_bias) * internal->configuration.gyro_to_radians_per_second * dt)) - internal->configuration.bias * (internal->last_state.rotation_euler.x - pitch);
//DISABLED FOR POS TRQCKER TESTING
//internal->state.timestamp = m->source_timestamp;
//internal->last_state = internal->state;
//printf("source tstamp: %lld\n",m->source_timestamp);
}
}
//TODO: come up with a way to avoid alloc/free - use max length buffer?
free(measurement_array);
//convert to a quat for consumption as pose
//printf(" integrated %d measurements after %lld X %f Y %f Z %f\n",count,internal->last_state.timestamp,internal->state.rotation_euler.x,internal->state.rotation_euler.y,internal->state.rotation_euler.z);
//removed for debugging pos tracking
//math_euler_to_quat(internal->state.rotation_euler,&internal->state.pose.orientation);
*state = internal->state;
return true;
}
bool
filter_complementary_set_state(filter_instance_t* inst, filter_state_t* state)
{
//TODO: implement this
return false;
}
bool
filter_complementary_predict_state(filter_instance_t* inst,
filter_state_t* state,
timepoint_ns time)
{
//dont do any prediction right now
return filter_complementary_get_state(inst,state);
}
bool
filter_complementary_configure(filter_instance_t* inst,
filter_configuration_ptr config_generic)
{
filter_complementary_instance_t* internal =
filter_complementary_instance(inst->internal_instance);
filter_complementary_configuration_t* config =
(filter_complementary_configuration_t*)config_generic;
internal->configuration = *config;
internal->gyro_x_bias=0.0f;
internal->gyro_y_bias=0.0f;
internal->gyro_z_bias=0.0f;
internal->configured = true;
return true;
}
filter_complementary_instance_t*
filter_complementary_create(filter_instance_t* inst)
{
filter_complementary_instance_t* i =
U_TYPED_CALLOC(filter_complementary_instance_t);
if (i) {
i->configured = false;
i->running = false;
// set up our state stuff that will not change.
// we a are a rotational-only filter
i->state.has_rotation = true;
i->state.has_position = false;
i->state.timestamp = 0;
i->last_state.rotation_euler.x=0.0f;
i->last_state.rotation_euler.y=0.0f;
i->last_state.rotation_euler.z=0.0f;
i->last_state = i->state;
return i;
}
return NULL;
}
#ifndef FILTER_COMPLEMENTARY_H
#define FILTER_COMPLEMENTARY_H
#include <xrt/xrt_defines.h>
#include "common/filter.h"
#include "common/measurementqueue.h"
#include <stdlib.h>
typedef struct filter_complementary_configuration
{
float bias;
float scale;
uint64_t max_timestamp; //wrap timestamp here
float accel_to_g;
float gyro_to_radians_per_second;
float drift_z_to_zero;
} filter_complementary_configuration_t;
#ifdef __cplusplus
extern "C" {
#endif
// forward declare this, as it contains C++ stuff
typedef struct filter_complementary_instance_t filter_complementary_instance_t;
filter_complementary_instance_t*
filter_complementary_create(filter_instance_t* inst);
bool
filter_complementary_destroy(filter_instance_t* inst);
bool
filter_complementary_queue(filter_instance_t* inst,
tracker_measurement_t* measurement);
bool
filter_complementary_get_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_complementary_set_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_complementary_predict_state(filter_instance_t* inst,
filter_state_t*,
timepoint_ns time);
bool
filter_complementary_configure(filter_instance_t* inst,
filter_configuration_ptr config);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // FILTER_COMPLEMENTARY_H
#include <opencv2/opencv.hpp>
#include "filter_opencv_kalman.h"
#include "util/u_misc.h"
struct filter_opencv_kalman_instance_t
{
bool configured;
opencv_filter_configuration_t configuration;
cv::KalmanFilter kalman_filter;
cv::Mat observation;
cv::Mat prediction;
cv::Mat state;
bool running;
};
/*!
* Casts the internal instance pointer from the generic opaque type to our
* opencv_kalman internal type.
*/
static inline filter_opencv_kalman_instance_t*
filter_opencv_kalman_instance(filter_internal_instance_ptr ptr)
{
return (filter_opencv_kalman_instance_t*)ptr;
}
bool
filter_opencv_kalman__destroy(filter_instance_t* inst)
{
// do nothing
return false;
}
bool
filter_opencv_kalman_queue(filter_instance_t* inst,
tracker_measurement_t* measurement)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
printf("queueing measurement in filter\n");
measurement_queue_add(inst->measurement_queue,measurement);
//internal->observation.at<float>(0, 0) = measurement->pose.position.x;
//internal->observation.at<float>(1, 0) = measurement->pose.position.y;
//internal->observation.at<float>(2, 0) = measurement->pose.position.z;
//internal->kalman_filter.correct(internal->observation);
internal->running = true;
return false;
}
bool
filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state)
{
return false;
}
bool
filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state)
{
return false;
}
bool
filter_opencv_kalman_predict_state(filter_instance_t* inst,
filter_state_t* state,
timepoint_ns time)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
// printf("getting filtered pose\n");
if (!internal->running) {
return false;
}
//get all our measurements including the last optical frame,
//and run our filter on them to make a prediction
tracker_measurement_t* measurement_array;
internal->prediction = internal->kalman_filter.predict();
state->has_position = true;
state->pose.position.x = internal->prediction.at<float>(0, 0);
state->pose.position.y = internal->prediction.at<float>(1, 0);
state->pose.position.z = internal->prediction.at<float>(2, 0);
return true;
}
bool
filter_opencv_kalman_configure(filter_instance_t* inst,
filter_configuration_ptr config_generic)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
opencv_filter_configuration_t* config =
(opencv_filter_configuration_t*)config_generic;
internal->configuration = *config;
cv::setIdentity(
internal->kalman_filter.processNoiseCov,
cv::Scalar::all(internal->configuration.process_noise_cov));
cv::setIdentity(
internal->kalman_filter.measurementNoiseCov,
cv::Scalar::all(internal->configuration.measurement_noise_cov));
internal->configured = true;
return true;
}
filter_opencv_kalman_instance_t*
filter_opencv_kalman_create(filter_instance_t* inst)
{
filter_opencv_kalman_instance_t* i =
U_TYPED_CALLOC(filter_opencv_kalman_instance_t);
if (i) {
float dt = 1.0;
i->kalman_filter.init(6, 3);
i->observation = cv::Mat(3, 1, CV_32F);
i->prediction = cv::Mat(6, 1, CV_32F);
i->kalman_filter.transitionMatrix =
(cv::Mat_<float>(6, 6) << 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0);
cv::setIdentity(i->kalman_filter.measurementMatrix,
cv::Scalar::all(1.0f));
cv::setIdentity(i->kalman_filter.errorCovPost,
cv::Scalar::all(0.0f));
// our filter parameters set the process and measurement noise
// covariances.
cv::setIdentity(
i->kalman_filter.processNoiseCov,
cv::Scalar::all(i->configuration.process_noise_cov));
cv::setIdentity(
i->kalman_filter.measurementNoiseCov,
cv::Scalar::all(i->configuration.measurement_noise_cov));
i->configured = false;
i->running = false;
return i;
}
return NULL;
}
#ifndef FILTER_OPENCV_KALMAN_H
#define FILTER_OPENCV_KALMAN_H
#include <xrt/xrt_defines.h>
#include "common/filter.h"
#include "common/measurementqueue.h"
typedef struct opencv_filter_configuration
{
float measurement_noise_cov;
float process_noise_cov;
} opencv_filter_configuration_t;
#ifdef __cplusplus
extern "C" {
#endif
// forward declare this, as it contains C++ stuff
typedef struct filter_opencv_kalman_instance_t filter_opencv_kalman_instance_t;
filter_opencv_kalman_instance_t*
filter_opencv_kalman_create(filter_instance_t* inst);
bool
filter_opencv_kalman_destroy(filter_instance_t* inst);
bool
filter_opencv_kalman_queue(filter_instance_t* inst,
tracker_measurement_t* measurement);
bool
filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_opencv_kalman_predict_state(filter_instance_t* inst,
filter_state_t*,
timepoint_ns time);
bool
filter_opencv_kalman_configure(filter_instance_t* inst,
filter_configuration_ptr config);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // FILTER_OPENCV_KALMAN_H
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FRAMESERVER_SOURCE_FILES
common/frameserver.c
common/frameserver.h
)
if(BUILD_WITH_FFMPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
ffmpeg/ffmpeg_frameserver.c
ffmpeg/ffmpeg_frameserver.h
)
endif()
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
uvc/uvc_frameserver.c
uvc/uvc_frameserver.h
)
endif()
if(BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
v4l2/v4l2_frameserver.c
v4l2/v4l2_frameserver.h
)
endif()
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(frameserver OBJECT ${FRAMESERVER_SOURCE_FILES})
set_property(TARGET frameserver PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(frameserver SYSTEM
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/..
)
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${libuvc_INCLUDE_DIRS}
${LIBUSB1_INCLUDE_DIRS}
)
endif()
if(BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${JPEG_INCLUDE_DIRS}
)
endif()
#include "frameserver.h"
#ifdef XRT_HAVE_FFMPEG
#include "ffmpeg/ffmpeg_frameserver.h"
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
#include "uvc/uvc_frameserver.h"
#endif // XRT_HAVE_LIBUVC
#include "v4l2/v4l2_frameserver.h"
#include "util/u_misc.h"
float
format_bytes_per_pixel(frame_format_t f)
{
switch (f) {
case FORMAT_Y_UINT8: return 1.0f;
case FORMAT_YUV420_UINT8: return 1.5f;
case FORMAT_Y_UINT16:
case FORMAT_YUV422_UINT8:
case FORMAT_YUYV_UINT8: return 2.0f;
case FORMAT_BGR_UINT8:
case FORMAT_RGB_UINT8:
case FORMAT_YUV444_UINT8: return 3.0f;
case FORMAT_RAW:
case FORMAT_JPG:
default:
printf("cannot compute format bytes per pixel\n");
return -1.0f;
}
return -1.0f;
}
int32_t
frame_size_in_bytes(frame_t* f)
{
if (f) {
int32_t frame_bytes = -1;
// TODO: alpha formats, padding etc.
switch (f->format) {
case FORMAT_Y_UINT8:
case FORMAT_YUV420_UINT8:
case FORMAT_Y_UINT16:
case FORMAT_YUV422_UINT8:
case FORMAT_BGR_UINT8:
case FORMAT_RGB_UINT8:
case FORMAT_YUV444_UINT8:
case FORMAT_YUYV_UINT8:
frame_bytes = f->stride * f->height;
break;
case FORMAT_JPG:
// this is a maximum (assuming YUV444)
frame_bytes = f->width * f->height * 3;
case FORMAT_RAW:
case FORMAT_NONE:
default: printf("cannot compute frame size for this format\n");
}
return frame_bytes;
}
return -1;
}
int32_t
frame_bytes_per_pixel(frame_t* f)
{
printf("ERROR: Not implemented\n");
return -1;
}
bool
frame_split_stereo(frame_t* source, frame_t* left, frame_t* right)
{
printf("ERROR: Not implemented!\n");
return false;
}
bool
frame_extract_plane(frame_t* source, plane_t plane, frame_t* out)
{
// only handle splitting Y out of YUYV for now
if (source->format != FORMAT_YUYV_UINT8 && plane != PLANE_Y) {
printf("ERROR: unhandled plane extraction\n");
return false;
}
if (!source->data) {
printf("ERROR: no frame data!\n");
return false;
}
uint8_t* source_ptr;
uint8_t* dest_ptr;
uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
uint32_t source_line_bytes = source->stride;
uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
uint32_t dest_line_bytes = out->width;
if (!out->data) {
printf(
"allocating data for NULL plane - someone needs to free "
"this!\n");
out->data = malloc(frame_size_in_bytes(out));
}
switch (source->format) {
case FORMAT_YUYV_UINT8:
case FORMAT_YUV444_UINT8:
for (uint32_t i = 0; i < source->height; i++) {
for (uint32_t j = 0; j < source->width; j++) {
source_ptr = source->data +
(j * source_pixel_bytes) +
(i * source_line_bytes);
dest_ptr = out->data + (j * dest_pixel_bytes) +
(i * dest_line_bytes);
*dest_ptr = *source_ptr;
}
}
break;
default: return false;
}
return true;
}
bool
frame_resample(frame_t* source, frame_t* out)
{
// TODO: more complete resampling.
if (source->format != FORMAT_YUYV_UINT8 &&
out->format != FORMAT_YUV444_UINT8) {
printf("ERROR: unhandled resample operation\n");
return false;
}
if (!source->data) {
printf("ERROR: no frame data!\n");
return false;
}
uint8_t* source_ptr;
uint8_t* dest_ptr;
uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
uint32_t source_line_bytes = source->stride;
uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
uint32_t dest_line_bytes = out->stride;
if (!out->data) {
printf(
"allocating data for NULL plane - someone needs to free "
"this!\n");
out->data = (uint8_t*)malloc(frame_size_in_bytes(out));
}
uint8_t lastU = 0;
switch (source->format) {
case FORMAT_YUYV_UINT8:
for (uint32_t i = 0; i < source->height; i++) {
for (uint32_t j = 0; j < source->width; j++) {
source_ptr = source->data +
(j * source_pixel_bytes) +
(i * source_line_bytes);
dest_ptr = out->data + (j * dest_pixel_bytes) + (i * dest_line_bytes);
*dest_ptr = *source_ptr; // Y
if (j % 2 == 0) {
*(dest_ptr + 1) = *(source_ptr + 1); // U
*(dest_ptr + 2) = *(source_ptr + 3); // V from next source pixel
lastU = *(dest_ptr + 1);
} else {
*(dest_ptr + 1) = lastU;
*(dest_ptr + 2) = *(source_ptr + 1);
}
}
}
return true;
break;
default: return false;
}
return false;
}
frameserver_instance_t*
frameserver_create(frameserver_type_t t)
{
frameserver_instance_t* i = U_TYPED_CALLOC(frameserver_instance_t);
frameserver_internal_instance_ptr internal = NULL;
if (i == NULL) {
return NULL;
}
/*
* Each implementation constructor should set up the members of the
* frameserver instance, as well as return a pointer to itself. If it
* fails, it should return NULL without de-allocating the frameserver
* instance: that is the responsibility of this function.
*/
switch (t) {
#ifdef XRT_HAVE_FFMPEG
case FRAMESERVER_TYPE_FFMPEG:
internal = (frameserver_internal_instance_ptr)
ffmpeg_frameserver_create(i);
break;
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_UVC:
internal =
(frameserver_internal_instance_ptr)uvc_frameserver_create(
i);
break;
#endif // XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_V4L2:
internal =
(frameserver_internal_instance_ptr)v4l2_frameserver_create(
i);
break;
case FRAMESERVER_TYPE_NONE:
default:
free(i);
return NULL;
break;
}
if (internal == NULL) {
/* Failed to allocate/create internal implementation */
free(i);
return NULL;
}
return i;
}
bool
frameservers_test()
{
ffmpeg_frameserver_test();
// uvc_frameserver_test();
// v4l2_frameserver_test();
return true;
}
#ifndef FRAMESERVER_H
#define FRAMESERVER_H
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <math/m_api.h>
#include <mt_events.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_PLANES 3 // this is what we see currently in e.g. RGB,YUV
// frame