...
 
Commits (44)
......@@ -7,8 +7,8 @@ variables:
DEBIAN_DEBS: 'build-essential git cmake meson ninja-build doxygen graphviz libeigen3-dev curl patch python3 pkg-config libx11-dev libxxf86vm-dev libxrandr-dev libxcb-randr0-dev libhidapi-dev libwayland-dev libvulkan-dev glslang-dev glslang-tools libglvnd-dev libgl1-mesa-dev ca-certificates libusb-1.0-0-dev libuvc-dev libavcodec-dev libopencv-dev libudev-dev clang-format-7 codespell'
DEBIAN_EXEC: 'bash .gitlab-ci/build-openxr-openhmd.sh'
ARCH_TAG: '2019-04-18.0'
ARCH_PKGS: 'git gcc cmake meson ninja pkgconfig python3 diffutils patch doxygen graphviz eigen hidapi libxrandr mesa glslang vulkan-headers vulkan-icd-loader check glfw-x11 libusb opencv'
ARCH_TAG: '2019-06-19.0'
ARCH_PKGS: 'git gcc cmake meson ninja pkgconfig python3 diffutils patch doxygen graphviz eigen hidapi libxrandr mesa glslang vulkan-headers vulkan-icd-loader check glfw-x11 libusb opencv gtk3 ffmpeg'
ARCH_EXEC: 'bash .gitlab-ci/build-openxr-openhmd.sh'
ARCH_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/archlinux/rolling:$ARCH_TAG
......
#!/bin/sh
# Install the OpenXR SDK, whatever version, with @haagch's patch so we can
# actually install it system-wide.
# Install the OpenXR SDK, whatever version, installed system-wide.
git clone https://github.com/KhronosGroup/OpenXR-SDK
pushd OpenXR-SDK
curl 'https://aur.archlinux.org/cgit/aur.git/plain/support_installing_the_loader.diff?h=openxr-loader-git' | patch -p1
mkdir build
pushd build
cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=Off -DPRESENTATION_BACKEND=xlib -DDYNAMIC_LOADER=ON -DOpenGL_GL_PREFERENCE=GLVND -GNinja ..
......
......@@ -25,10 +25,9 @@ find_package(Vulkan REQUIRED)
find_package(OpenGL REQUIRED COMPONENTS GLX)
find_package(HIDAPI)
find_package(OpenHMD)
find_package(OpenCV)
# @TODO Turn into a find_package LIBUSB-1.0 file.
pkg_check_modules(LIBUSB REQUIRED libusb-1.0)
find_package(OpenCV COMPONENTS core calib3d highgui imgproc imgcodecs features2d video)
find_package(Libusb1)
find_package(JPEG)
# @TODO Turn into a find_package LIBUVC file.
pkg_check_modules(LIBUVC libuvc)
......@@ -36,6 +35,7 @@ pkg_check_modules(LIBUVC libuvc)
# @TODO Turn into a find_package FFMPEG file.
pkg_check_modules(FFMPEG libavcodec)
find_package(uvbi)
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
# Compositor backend
......@@ -52,6 +52,8 @@ cmake_dependent_option(BUILD_WITH_XLIB "Enable xlib support" ON "X11_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_OPENGL "Enable OpenGL Graphics API support" ON "OPENGL_FOUND" OFF)
set(BUILD_WITH_LIBUSB TRUE)
cmake_dependent_option(BUILD_WITH_JPEG "Enable jpeg code (used for some video drivers)" ON "JPEG_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_UVBI "Enable UVBI-based optical tracking driver" ON "LIBUVC_FOUND AND uvbi_FOUND AND OPENCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_OPENCV "Enable OpenCV backend" ON "OpenCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_LIBUVC "Enable libuvc video driver" ON "LIBUVC_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_FFMPEG "Enable ffmpeg testing video driver" ON "FFMPEG_FOUND" OFF)
......@@ -88,6 +90,10 @@ if(BUILD_WITH_OPENCV)
add_definitions(-DXRT_HAVE_OPENCV)
endif()
if(BUILD_WITH_JPEG)
add_definitions(-DXRT_HAVE_JPEG)
endif()
if(BUILD_WITH_LIBUVC)
add_definitions(-DXRT_HAVE_LIBUVC)
endif()
......@@ -120,6 +126,17 @@ if(TRUE)
set(BUILD_DRIVER_PSMV TRUE)
endif()
if(BUILD_WITH_UVBI)
add_definitions(-DXRT_HAVE_UVBI)
endif()
if(BUILD_WITH_OPENCV AND (BUILD_WITH_FFMPEG OR BUILD_WITH_JPEG))
# Condition for enabling the montrack optical tracking driver.
# JPEG required for both UVC and v4l2 backends.
add_definitions(-DXRT_BUILD_MONTRACK)
set(BUILD_DRIVER_MONTRACK TRUE)
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -Wall -Wextra -Wno-unused-parameter")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unused-parameter")
......
# - try to find libusb-1 library
#
# Cache Variables: (probably not for direct use in your scripts)
# LIBUSB1_LIBRARY
# LIBUSB1_INCLUDE_DIR
#
# Non-cache variables you should use in your CMakeLists.txt:
# LIBUSB1_LIBRARIES
# LIBUSB1_INCLUDE_DIRS
# LIBUSB1_FOUND - if this is not true, do not attempt to use this library
#
# Requires these CMake modules:
# ProgramFilesGlob
# FindPackageHandleStandardArgs (known included with CMake >=2.6.2)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
set(LIBUSB1_ROOT_DIR
"${LIBUSB1_ROOT_DIR}"
CACHE
PATH
"Root directory to search for libusb-1")
if(WIN32)
include(ProgramFilesGlob)
program_files_fallback_glob(_dirs "LibUSB-Win32")
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
if(MSVC)
set(_lib_suffixes lib/msvc_x64 MS64/static)
endif()
else()
if(MSVC)
set(_lib_suffixes lib/msvc MS32/static)
elseif(COMPILER_IS_GNUCXX)
set(_lib_suffixes lib/gcc)
endif()
endif()
else()
set(_lib_suffixes)
find_package(PkgConfig QUIET)
if(PKG_CONFIG_FOUND)
pkg_check_modules(PC_LIBUSB1 libusb-1.0)
endif()
endif()
find_path(LIBUSB1_INCLUDE_DIR
NAMES
libusb.h
PATHS
${PC_LIBUSB1_INCLUDE_DIRS}
${PC_LIBUSB1_INCLUDEDIR}
${_dirs}
HINTS
"${LIBUSB1_ROOT_DIR}"
PATH_SUFFIXES
include/libusb-1.0
include
libusb-1.0)
find_library(LIBUSB1_LIBRARY
NAMES
libusb-1.0
usb-1.0
PATHS
${PC_LIBUSB1_LIBRARY_DIRS}
${PC_LIBUSB1_LIBDIR}
${_dirs}
HINTS
"${LIBUSB1_ROOT_DIR}"
PATH_SUFFIXES
${_lib_suffixes})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Libusb1
DEFAULT_MSG
LIBUSB1_LIBRARY
LIBUSB1_INCLUDE_DIR)
if(LIBUSB1_FOUND)
set(LIBUSB1_LIBRARIES "${LIBUSB1_LIBRARY}")
set(LIBUSB1_INCLUDE_DIRS "${LIBUSB1_INCLUDE_DIR}")
mark_as_advanced(LIBUSB1_ROOT_DIR)
endif()
mark_as_advanced(LIBUSB1_INCLUDE_DIR LIBUSB1_LIBRARY)
......@@ -7,6 +7,9 @@ include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
)
if(BUILD_DRIVER_MONTRACK)
add_subdirectory(montrack)
endif()
if(BUILD_DRIVER_HDK)
set(HDK_SOURCE_FILES
......
set (MONTRACK_SOURCE_FILES
mt_device.c
mt_device.h
mt_interface.h
mt_prober.c
mt_events.h
mt_framequeue.h
mt_framequeue.c
)
add_subdirectory(frameservers)
add_subdirectory(filters)
add_subdirectory(optical_tracking)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(drv_montrack OBJECT ${MONTRACK_SOURCE_FILES}
$<TARGET_OBJECTS:frameserver>
$<TARGET_OBJECTS:filter>
$<TARGET_OBJECTS:optical_tracking>
)
set_property(TARGET drv_montrack PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(drv_montrack SYSTEM
PRIVATE frameservers
PRIVATE optical_tracking
PRIVATE filters)
target_link_libraries (drv_montrack frameserver filter optical_tracking)
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FILTER_SOURCE_FILES
common/filter.h
common/filter.c
filter_opencv_kalman.cpp
filter_opencv_kalman.h
)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(filter OBJECT ${FILTER_SOURCE_FILES})
set_property(TARGET filter PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(filter SYSTEM
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}../
PRIVATE ${OpenCV_INCLUDE_DIRS}
)
#include "filter.h"
#include "filter_opencv_kalman.h"
#include <string.h>
#include "util/u_misc.h"
filter_instance_t*
filter_create(filter_type_t t)
{
filter_instance_t* i = U_TYPED_CALLOC(filter_instance_t);
if (i) {
switch (t) {
case FILTER_TYPE_OPENCV_KALMAN:
i->tracker_type = t;
i->filter_configure = filter_opencv_kalman_configure;
i->filter_get_state = filter_opencv_kalman_get_state;
i->filter_predict_state =
filter_opencv_kalman_predict_state;
i->filter_set_state = filter_opencv_kalman_set_state;
i->filter_queue = filter_opencv_kalman_queue;
i->internal_instance = (filter_internal_instance_ptr)
filter_opencv_kalman_create(i);
break;
case FILTER_TYPE_NONE:
default:
free(i);
return NULL;
break;
}
return i;
}
return NULL;
}
bool
filters_test()
{
// create a filter
filter_instance_t* filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
if (!filter) {
return false;
}
return true;
}
#ifndef FILTER_H
#define FILTER_H
#include <xrt/xrt_defines.h>
#include <../auxiliary/util/u_time.h>
#include <../optical_tracking/common/tracker.h>
typedef void* filter_instance_ptr;
typedef void* filter_internal_instance_ptr;
typedef void* filter_configuration_ptr;
typedef void* filter_state_ptr;
typedef struct filter_state
{
struct xrt_pose pose;
bool has_position;
bool has_rotation;
struct xrt_vec3 velocity;
struct xrt_vec3 acceleration;
struct xrt_quat angular_velocity;
struct xrt_quat angular_accel;
timepoint_ns timestamp;
} filter_state_t;
typedef enum filter_type
{
FILTER_TYPE_NONE,
FILTER_TYPE_OPENCV_KALMAN
} filter_type_t;
typedef struct _filter_instance
{
filter_type_t tracker_type;
bool (*filter_queue)(filter_instance_ptr inst,
tracker_measurement_t* measurement);
bool (*filter_set_state)(filter_instance_ptr inst,
filter_state_ptr state);
bool (*filter_get_state)(filter_instance_ptr inst,
filter_state_ptr state);
bool (*filter_predict_state)(filter_instance_ptr inst,
filter_state_t* state,
timepoint_ns time);
bool (*filter_configure)(filter_instance_ptr inst,
filter_configuration_ptr config);
filter_internal_instance_ptr internal_instance;
} filter_instance_t;
filter_instance_t*
filter_create(filter_type_t t);
bool
filter_destroy(filter_instance_t* inst);
bool
filters_test();
#endif // FILTER_H
#include <opencv2/opencv.hpp>
#include "filter_opencv_kalman.h"
#include "util/u_misc.h"
struct filter_opencv_kalman_instance_t
{
bool configured;
opencv_filter_configuration_t configuration;
cv::KalmanFilter kalman_filter;
cv::Mat observation;
cv::Mat prediction;
cv::Mat state;
bool running;
};
/*!
* Casts the internal instance pointer from the generic opaque type to our
* opencv_kalman internal type.
*/
static inline filter_opencv_kalman_instance_t*
filter_opencv_kalman_instance(filter_internal_instance_ptr ptr)
{
return (filter_opencv_kalman_instance_t*)ptr;
}
bool
filter_opencv_kalman__destroy(filter_instance_t* inst)
{
// do nothing
return false;
}
bool
filter_opencv_kalman_queue(filter_instance_t* inst,
tracker_measurement_t* measurement)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
printf("queueing measurement in filter\n");
internal->observation.at<float>(0, 0) = measurement->pose.position.x;
internal->observation.at<float>(1, 0) = measurement->pose.position.y;
internal->observation.at<float>(2, 0) = measurement->pose.position.z;
internal->kalman_filter.correct(internal->observation);
internal->running = true;
return false;
}
bool
filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state)
{
return false;
}
bool
filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state)
{
return false;
}
bool
filter_opencv_kalman_predict_state(filter_instance_t* inst,
filter_state_t* state,
timepoint_ns time)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
// printf("getting filtered pose\n");
if (!internal->running) {
return false;
}
internal->prediction = internal->kalman_filter.predict();
state->has_position = true;
state->pose.position.x = internal->prediction.at<float>(0, 0);
state->pose.position.y = internal->prediction.at<float>(1, 0);
state->pose.position.z = internal->prediction.at<float>(2, 0);
return true;
}
bool
filter_opencv_kalman_configure(filter_instance_t* inst,
filter_configuration_ptr config_generic)
{
filter_opencv_kalman_instance_t* internal =
filter_opencv_kalman_instance(inst->internal_instance);
opencv_filter_configuration_t* config =
(opencv_filter_configuration_t*)config_generic;
internal->configuration = *config;
cv::setIdentity(
internal->kalman_filter.processNoiseCov,
cv::Scalar::all(internal->configuration.process_noise_cov));
cv::setIdentity(
internal->kalman_filter.measurementNoiseCov,
cv::Scalar::all(internal->configuration.measurement_noise_cov));
internal->configured = true;
return true;
}
filter_opencv_kalman_instance_t*
filter_opencv_kalman_create(filter_instance_t* inst)
{
filter_opencv_kalman_instance_t* i =
U_TYPED_CALLOC(filter_opencv_kalman_instance_t);
if (i) {
float dt = 1.0;
i->kalman_filter.init(6, 3);
i->observation = cv::Mat(3, 1, CV_32F);
i->prediction = cv::Mat(6, 1, CV_32F);
i->kalman_filter.transitionMatrix =
(cv::Mat_<float>(6, 6) << 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0);
cv::setIdentity(i->kalman_filter.measurementMatrix,
cv::Scalar::all(1.0f));
cv::setIdentity(i->kalman_filter.errorCovPost,
cv::Scalar::all(0.0f));
// our filter parameters set the process and measurement noise
// covariances.
cv::setIdentity(
i->kalman_filter.processNoiseCov,
cv::Scalar::all(i->configuration.process_noise_cov));
cv::setIdentity(
i->kalman_filter.measurementNoiseCov,
cv::Scalar::all(i->configuration.measurement_noise_cov));
i->configured = false;
i->running = false;
return i;
}
return NULL;
}
#ifndef FILTER_OPENCV_KALMAN_H
#define FILTER_OPENCV_KALMAN_H
#include <xrt/xrt_defines.h>
#include "common/filter.h"
typedef struct opencv_filter_configuration
{
float measurement_noise_cov;
float process_noise_cov;
} opencv_filter_configuration_t;
typedef struct opencv_kalman_filter_state
{
struct xrt_pose pose;
} opencv_kalman_filter_state_t;
#ifdef __cplusplus
extern "C" {
#endif
// forward declare this, as it contains C++ stuff
typedef struct filter_opencv_kalman_instance_t filter_opencv_kalman_instance_t;
filter_opencv_kalman_instance_t*
filter_opencv_kalman_create(filter_instance_t* inst);
bool
filter_opencv_kalman__destroy(filter_instance_t* inst);
bool
filter_opencv_kalman_queue(filter_instance_t* inst,
tracker_measurement_t* measurement);
bool
filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state);
bool
filter_opencv_kalman_predict_state(filter_instance_t* inst,
filter_state_t*,
timepoint_ns time);
bool
filter_opencv_kalman_configure(filter_instance_t* inst,
filter_configuration_ptr config_generic);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // FILTER_OPENCV_KALMAN_H
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FRAMESERVER_SOURCE_FILES
common/frameserver.c
common/frameserver.h
)
if(BUILD_WITH_FFMPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
ffmpeg/ffmpeg_frameserver.c
ffmpeg/ffmpeg_frameserver.h
)
endif()
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
uvc/uvc_frameserver.c
uvc/uvc_frameserver.h
)
endif()
if(BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
v4l2/v4l2_frameserver.c
v4l2/v4l2_frameserver.h
)
endif()
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(frameserver OBJECT ${FRAMESERVER_SOURCE_FILES})
set_property(TARGET frameserver PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(frameserver SYSTEM
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/..
)
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${libuvc_INCLUDE_DIRS}
${LIBUSB1_INCLUDE_DIRS}
)
endif()
if(BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${JPEG_INCLUDE_DIRS}
)
endif()
#include "frameserver.h"
#ifdef XRT_HAVE_FFMPEG
#include "ffmpeg/ffmpeg_frameserver.h"
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
#include "uvc/uvc_frameserver.h"
#endif // XRT_HAVE_LIBUVC
#include "v4l2/v4l2_frameserver.h"
#include "util/u_misc.h"
float
format_bytes_per_pixel(frame_format_t f)
{
switch (f) {
case FORMAT_Y_UINT8: return 1.0f;
case FORMAT_YUV420_UINT8: return 1.5f;
case FORMAT_Y_UINT16:
case FORMAT_YUV422_UINT8:
case FORMAT_YUYV_UINT8: return 2.0f;
case FORMAT_BGR_UINT8:
case FORMAT_RGB_UINT8:
case FORMAT_YUV444_UINT8: return 3.0f;
case FORMAT_RAW:
case FORMAT_JPG:
default:
printf("cannot compute format bytes per pixel\n");
return -1.0f;
}
return -1.0f;
}
int32_t
frame_size_in_bytes(frame_t* f)
{
if (f) {
int32_t frame_bytes = -1;
// TODO: alpha formats, padding etc.
switch (f->format) {
case FORMAT_Y_UINT8:
case FORMAT_YUV420_UINT8:
case FORMAT_Y_UINT16:
case FORMAT_YUV422_UINT8:
case FORMAT_BGR_UINT8:
case FORMAT_RGB_UINT8:
case FORMAT_YUV444_UINT8:
case FORMAT_YUYV_UINT8:
frame_bytes = f->stride * f->height;
break;
case FORMAT_JPG:
// this is a maximum (assuming YUV444)
frame_bytes = f->width * f->height * 3;
case FORMAT_RAW:
case FORMAT_NONE:
default: printf("cannot compute frame size for this format\n");
}
return frame_bytes;
}
return -1;
}
int32_t
frame_bytes_per_pixel(frame_t* f)
{
printf("ERROR: Not implemented\n");
return -1;
}
bool
frame_split_stereo(frame_t* source, frame_t* left, frame_t* right)
{
printf("ERROR: Not implemented!\n");
return false;
}
bool
frame_extract_plane(frame_t* source, plane_t plane, frame_t* out)
{
// only handle splitting Y out of YUYV for now
if (source->format != FORMAT_YUYV_UINT8 && plane != PLANE_Y) {
printf("ERROR: unhandled plane extraction\n");
return false;
}
if (!source->data) {
printf("ERROR: no frame data!\n");
return false;
}
uint8_t* source_ptr;
uint8_t* dest_ptr;
uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
uint32_t source_line_bytes = source->stride;
uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
uint32_t dest_line_bytes = out->width;
if (!out->data) {
printf(
"allocating data for NULL plane - someone needs to free "
"this!\n");
out->data = malloc(frame_size_in_bytes(out));
}
switch (source->format) {
case FORMAT_YUYV_UINT8:
case FORMAT_YUV444_UINT8:
for (uint32_t i = 0; i < source->height; i++) {
for (uint32_t j = 0; j < source->width; j++) {
source_ptr = source->data +
(j * source_pixel_bytes) +
(i * source_line_bytes);
dest_ptr = out->data + (j * dest_pixel_bytes) +
(i * dest_line_bytes);
*dest_ptr = *source_ptr;
}
}
break;
default: return false;
}
return true;
}
bool
frame_resample(frame_t* source, frame_t* out)
{
// TODO: more complete resampling.
if (source->format != FORMAT_YUYV_UINT8 &&
out->format != FORMAT_YUV444_UINT8) {
printf("ERROR: unhandled resample operation\n");
return false;
}
if (!source->data) {
printf("ERROR: no frame data!\n");
return false;
}
uint8_t* source_ptr;
uint8_t* dest_ptr;
uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
uint32_t source_line_bytes = source->stride;
uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
uint32_t dest_line_bytes = out->stride;
if (!out->data) {
printf(
"allocating data for NULL plane - someone needs to free "
"this!\n");
out->data = (uint8_t*)malloc(frame_size_in_bytes(out));
}
uint8_t lastU = 0;
switch (source->format) {
case FORMAT_YUYV_UINT8:
for (uint32_t i = 0; i < source->height; i++) {
for (uint32_t j = 0; j < source->width; j++) {
source_ptr = source->data +
(j * source_pixel_bytes) +
(i * source_line_bytes);
dest_ptr = out->data + (j * dest_pixel_bytes) +
(i * dest_line_bytes);
*dest_ptr = *source_ptr; // Y
if (j % 2 == 0) {
*(dest_ptr + 1) =
*(source_ptr + 1); // U
*(dest_ptr + 2) =
*(source_ptr +
3); // V from next source pixel
lastU = *(dest_ptr + 1);
} else {
*(dest_ptr + 1) = lastU;
*(dest_ptr + 2) = *(source_ptr + 1);
}
}
}
return true;
break;
default: return false;
}
return false;
}
frameserver_instance_t*
frameserver_create(frameserver_type_t t)
{
frameserver_instance_t* i = U_TYPED_CALLOC(frameserver_instance_t);
frameserver_internal_instance_ptr internal = NULL;
if (i == NULL) {
return NULL;
}
/*
* Each implementation constructor should set up the members of the
* frameserver instance, as well as return a pointer to itself. If it
* fails, it should return NULL without de-allocating the frameserver
* instance: that is the responsibility of this function.
*/
switch (t) {
#ifdef XRT_HAVE_FFMPEG
case FRAMESERVER_TYPE_FFMPEG:
internal = (frameserver_internal_instance_ptr)
ffmpeg_frameserver_create(i);
break;
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_UVC:
internal =
(frameserver_internal_instance_ptr)uvc_frameserver_create(
i);
break;
#endif // XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_V4L2:
internal =
(frameserver_internal_instance_ptr)v4l2_frameserver_create(
i);
break;
case FRAMESERVER_TYPE_NONE:
default:
free(i);
return NULL;
break;
}
if (internal == NULL) {
/* Failed to allocate/create internal implementation */
free(i);
return NULL;
}
return i;
}
bool
frameservers_test()
{
#ifdef XRT_HAVE_FFMPEG
ffmpeg_frameserver_test();
#endif // XRT_HAVE_FFMPEG
// uvc_frameserver_test();
// v4l2_frameserver_test();
return true;
}
#ifndef FRAMESERVER_H
#define FRAMESERVER_H
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <math/m_api.h>
#include "../mt_events.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_PLANES 3 // this is what we see currently in e.g. RGB,YUV
// frame
typedef enum frame_format
{
FORMAT_NONE,
FORMAT_RAW,
FORMAT_Y_UINT8,
FORMAT_Y_UINT16,
FORMAT_RGB_UINT8,
FORMAT_BGR_UINT8,
FORMAT_YUYV_UINT8,
FORMAT_YUV444_UINT8,
FORMAT_YUV422_UINT8,
FORMAT_YUV420_UINT8,
FORMAT_JPG
} frame_format_t;
typedef enum stereo_format
{
STEREO_NONE,
STEREO_SBS,
STEREO_OAU
} stereo_format_t;
typedef enum plane
{
PLANE_NONE,
PLANE_R,
PLANE_G,
PLANE_B,
PLANE_Y,
PLANE_U,
PLANE_V
} plane_t;
typedef enum chroma_sampling
{
CHROMA_SAMP_NONE,
CHROMA_SAMP_444,
CHROMA_SAMP_422,
CHROMA_SAMP_411
} chroma_sampling_t;
typedef enum plane_layout
{
PLANE_LAYOUT_COMPOSITE,
PLANE_LAYOUT_SEPARATE
} plane_layout_t;
typedef enum sampling
{
SAMPLING_NONE,
SAMPLING_UPSAMPLED,
SAMPLING_DOWNSAMPLED
} sampling_t;
// unnormalised pixel coordinates for clipping ROIs
typedef struct frame_rect
{
struct xrt_vec2 tl;
struct xrt_vec2 br;
} frame_rect_t;
// basic frame data structure - holds a pointer to buffer.
typedef struct frame
{
uint16_t width;
uint16_t height;
uint16_t stride;
frame_format_t format;
stereo_format_t stereo_format;
uint32_t size_bytes;
uint8_t* data;
chroma_sampling_t chroma_sampling; // unused
plane_layout_t plane_layout; // unused
uint8_t* u_data; // unused
uint8_t* v_data; // unused
uint64_t timestamp;
uint64_t source_timestamp;
uint64_t source_sequence; //sequence id
uint64_t source_id; // used to tag frames with the source they
// originated from
} frame_t;
typedef struct capture_parameters
{
// used to configure cameras. since there is no guarantee every
// frameserver will support any/all of these params, a 'best effort'
// should be made to apply them. all numeric values are normalised
// floats for broad applicability
float gain;
float exposure;
} capture_parameters_t;
// frameserver
typedef enum frameserver_type
{
FRAMESERVER_TYPE_NONE,
FRAMESERVER_TYPE_FFMPEG,
FRAMESERVER_TYPE_UVC,
FRAMESERVER_TYPE_V4L2
} frameserver_type_t;
// Interface types
typedef struct frameserver_internal_instance* frameserver_internal_instance_ptr;
typedef void* frameserver_source_descriptor_ptr;
typedef struct _frameserver_instance frameserver_instance_t;
typedef void (*frame_consumer_callback_func)(frameserver_instance_t* instance,
frame_t* frame);
// Frameserver API
typedef struct _frameserver_instance
{
frameserver_type_t frameserver_type;
/*!
* Enumerate all available sources.
*/
bool (*frameserver_enumerate_sources)(
frameserver_instance_t* inst,
frameserver_source_descriptor_ptr sources,
uint32_t* count);
/*!
*
*/
bool (*frameserver_configure_capture)(frameserver_instance_t*,
capture_parameters_t cp);
/*!
*
*/
bool (*frameserver_frame_get)(frameserver_instance_t* inst,
frame_t* _frame);
/*!
*
*/
void (*frameserver_register_frame_callback)(
frameserver_instance_t* inst,
void* target_instance,
frame_consumer_callback_func target_func);
/*!
*
*/
void (*frameserver_register_event_callback)(
frameserver_instance_t* inst,
void* target_instance,
event_consumer_callback_func target_func);
/*!
*
*/
bool (*frameserver_seek)(frameserver_instance_t* inst,
uint64_t timestamp);
/*!
*
*/
bool (*frameserver_stream_start)(
frameserver_instance_t* inst,
frameserver_source_descriptor_ptr source);
/*!
*
*/
bool (*frameserver_stream_stop)(frameserver_instance_t* inst);
/*!
*
*/
bool (*frameserver_is_running)(frameserver_instance_t* inst);
/*!
*
*/
frameserver_internal_instance_ptr internal_instance;
} frameserver_instance_t;
frameserver_instance_t*
frameserver_create(frameserver_type_t t);
bool
frameserver_destroy(frameserver_instance_t* inst);
// bool frame_data_alloc(frame_t*);
// bool frame_data_free(frame_t*);
int32_t
frame_size_in_bytes(frame_t* f);
int32_t
frame_bytes_per_pixel(frame_t* f);
float
format_bytes_per_pixel(
frame_format_t f); // this is a float to support e.g. YUV420
bool
frame_split_stereo(frame_t* source, frame_t* left, frame_t* right);
bool
frame_extract_plane(frame_t* source, plane_t plane, frame_t* out);
bool
frame_resample(frame_t* source, frame_t* out);
bool
frameservers_test();
#ifdef __cplusplus
}
#endif
#endif // FRAMESERVER_H
#ifndef FFMPEG_FRAMESERVER_H
#define FFMPEG_FRAMESERVER_H
/* Almost all of the ground covered here would be covered
* by the v4l2 frameserver on linux, but uvc may be the
* simplest approach for cross-platform e.g. OS X
*/
#include <stdint.h>
#include <stdio.h>
#include "../common/frameserver.h"
typedef struct ffmpeg_source_descriptor
{
char name[128];
char* filepath;
uint64_t source_id;
uint32_t current_frame;
uint32_t frame_count;
frame_format_t format;
uint32_t width;
uint32_t height;
} ffmpeg_source_descriptor_t;
typedef struct ffmpeg_frameserver_instance ffmpeg_frameserver_instance_t;
ffmpeg_frameserver_instance_t*
ffmpeg_frameserver_create(frameserver_instance_t* inst);
bool
ffmpeg_frameserver_destroy(ffmpeg_frameserver_instance_t* inst);
/*! @todo are these candidates for static? are they unused? */
bool
ffmpeg_source_create(ffmpeg_source_descriptor_t* desc);
bool
ffmpeg_source_destroy(ffmpeg_source_descriptor_t* desc);
bool
ffmpeg_frameserver_test();
#endif // UVC_FRAMESERVER_H
This diff is collapsed.
#ifndef UVC_FRAMESERVER_H
#define UVC_FRAMESERVER_H
#ifdef XRT_HAVE_LIBUVC
/* Almost all of the ground covered here would be covered
* by the v4l2 frameserver on linux, but uvc may be the
* simplest approach for cross-platform e.g. OS X
*/
#include <stdint.h>
#include <stdio.h>
#include "../common/frameserver.h"
#include <libuvc/libuvc.h>
#include <pthread.h>
// we need this to do a bit of hackery with multiple opens/closes
struct uvc_context
{
/** Underlying context for USB communication */
struct libusb_context* usb_ctx;
/** True if libuvc initialized the underlying USB context */
uint8_t own_usb_ctx;
/** List of open devices in this context */
uvc_device_handle_t* open_devices;
pthread_t handler_thread;
int kill_handler_thread;
};
// TODO: unify device descriptors across apis
typedef struct uvc_source_descriptor
{
char name[128];
uint16_t vendor_id;
uint16_t product_id;
char serial[128];
uint64_t source_id;
uint32_t uvc_device_index;
enum uvc_frame_format stream_format;
frame_format_t format;
sampling_t sampling;
uint32_t width;
uint32_t height;
uint32_t rate;
} uvc_source_descriptor_t;
typedef struct uvc_frameserver_instance
{
uvc_device_t** device_list;
uvc_context_t* context;
uvc_device_t* device;
uvc_device_handle_t* device_handle;
uvc_stream_handle_t* stream_handle;
uvc_stream_ctrl_t stream_ctrl;
frame_consumer_callback_func frame_target_callback;
event_consumer_callback_func event_target_callback;
void* frame_target_instance; // where we send our frames
void* event_target_instance; // where we send our events
uvc_source_descriptor_t source_descriptor;
pthread_t stream_thread;
capture_parameters_t capture_params;
bool is_configured;
bool is_running;
uint32_t sequence_counter;
} uvc_frameserver_instance_t;
uvc_frameserver_instance_t*
uvc_frameserver_create(frameserver_instance_t* inst);
bool
uvc_frameserver_destroy(frameserver_instance_t* inst);
bool
uvc_source_alloc(uvc_source_descriptor_t* desc);
bool
uvc_source_destroy(uvc_source_descriptor_t* desc);
bool
uvc_frameserver_configure_capture(frameserver_instance_t* inst,
capture_parameters_t cp);
bool
uvc_frameserver_enumerate_sources(
frameserver_instance_t*,
frameserver_source_descriptor_ptr sources_generic,
uint32_t* count);
bool
uvc_frameserver_get(frameserver_instance_t* inst, frame_t* _frame);
void
uvc_frameserver_register_event_callback(
frameserver_instance_t* inst,
void* target_instance,
event_consumer_callback_func target_func);
void
uvc_frameserver_register_frame_callback(
frameserver_instance_t* inst,
void* target_instance,
frame_consumer_callback_func target_func);
bool
uvc_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
bool
uvc_frameserver_stream_start(frameserver_instance_t* inst,
frameserver_source_descriptor_ptr source_generic);
bool
uvc_frameserver_stream_stop(frameserver_instance_t* inst);
bool
uvc_frameserver_is_running(frameserver_instance_t* inst);
bool
uvc_frameserver_test();
#endif // XRT_HAVE_LIBUVC
#endif // UVC_FRAMESERVER_H
This diff is collapsed.
#pragma once
#define NUM_V4L2_BUFFERS 2
#include <stdint.h>
#include <stdio.h>
#include "../common/frameserver.h"
#include <linux/types.h>
#include <linux/videodev2.h>
#include <linux/v4l2-common.h>
#include <pthread.h>
// TODO: unify device descriptors across apis
typedef struct v4l2_source_descriptor
{
char device_path[256]; // TODO: might not be enough
char name[128];
char model[128];
uint64_t source_id;
frame_format_t format;
uint32_t stream_format;
sampling_t sampling;
uint32_t width;
uint32_t height;
uint32_t rate;
uint8_t extended_format;
uint32_t crop_scanline_bytes_start; // byte offset - special case for
// ps4 camera
uint32_t crop_width; // pixels - special case for ps4 camera
} v4l2_source_descriptor_t;
typedef struct v4l2_frameserver_instance
{
frame_consumer_callback_func frame_target_callback;
event_consumer_callback_func event_target_callback;
void* frame_target_instance; // where we send our frames
void* event_target_instance; // where we send our events
v4l2_source_descriptor_t source_descriptor;
pthread_t stream_thread;
capture_parameters_t capture_params;
bool is_configured;
bool is_running;
} v4l2_frameserver_instance_t;
v4l2_frameserver_instance_t*
v4l2_frameserver_create(frameserver_instance_t* inst);
bool
v4l2_frameserver_destroy(frameserver_instance_t* inst);
bool
v4l2_frameserver_source_create(v4l2_source_descriptor_t*);
bool
v4l2_frameserver_source_destroy(v4l2_source_descriptor_t*);
bool
v4l2_frameserver_configure_capture(frameserver_instance_t* inst,
capture_parameters_t cp);
bool
v4l2_frameserver_enumerate_sources(
frameserver_instance_t* inst,
frameserver_source_descriptor_ptr sources_generic,
uint32_t* count);
bool
v4l2_frameserver_get(frameserver_instance_t* inst, frame_t* frame);
void
v4l2_frameserver_register_event_callback(
frameserver_instance_t* inst,
void* target_instance,
event_consumer_callback_func target_func);
void
v4l2_frameserver_register_frame_callback(
frameserver_instance_t* inst,
void* target_instance,
frame_consumer_callback_func target_func);
bool
v4l2_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
bool
v4l2_frameserver_stream_start(frameserver_instance_t* inst,
frameserver_source_descriptor_ptr source_generic);
bool
v4l2_frameserver_stream_stop(frameserver_instance_t* inst);
bool
v4l2_frameserver_is_running(frameserver_instance_t* inst);
bool
v4l2_frameserver_test();
This diff is collapsed.
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Interface to internal Monado driver code.
* @author Pete Black <pete.black@collabora.com>
*/
#pragma once
#include "math/m_api.h"
#include "xrt/xrt_device.h"
#include "optical_tracking/common/tracker.h"
#include "frameservers/common/frameserver.h"
#include "filters/common/filter.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mt_device
{
struct xrt_device base;
frameserver_instance_t* frameservers[MAX_FRAMESERVERS];
uint32_t frameserver_count;
tracker_instance_t* tracker;
// TODO: merge these configurations to be descriptive of
// n-source trackers
tracker_mono_configuration_t config_mono;
tracker_stereo_configuration_t config_stereo;
filter_instance_t* filter;
bool log_verbose;
bool log_debug;
} mt_device_t;
XRT_MAYBE_UNUSED static inline mt_device_t*
mt_device(struct xrt_device* xdev)
{
return (mt_device_t*)xdev;
}
mt_device_t*
mt_device_create(char* device_name, bool log_verbose, bool log_debug);
bool
mt_create_mono_ps3eye(mt_device_t* md); // mono blob tracker, ps3 60fps camera
bool
mt_create_mono_c270(
mt_device_t* md); // mono blob tracker, logitech 30fps c270 camera
bool
mt_create_stereo_elp(
mt_device_t* md); // stereo tracker, ELP 60fps stereo camera
bool
mt_create_uvbi_elp(mt_device_t* md); // uvbi tracker, ELP 60fps stereo camera
bool
mt_create_uvbi_hdk(mt_device_t* md); // uvbi tracker, OSVR HDK 100fps IR camera
bool
mt_create_stereo_ps4(
mt_device_t* md); // stereo tracker, PS4 60fps stereo camera
void
mt_handle_event(mt_device_t* md, driver_event_t e);
void
dummy_init_mt_device(mt_device_t* md);
#ifdef __cplusplus
}
#endif
typedef enum driver_event_type
{
EVENT_NONE,
EVENT_FRAMESERVER_GOTFRAME,
EVENT_TRACKER_RECONFIGURED
} driver_event_type_t;
typedef struct driver_event
{
driver_event_type_t type;
// extra data to go along with events
// can be added here
} driver_event_t;
typedef void (*event_consumer_callback_func)(void* instance,
driver_event_t event);
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mt_framequeue.h"
frame_queue_t* get_frame_queue()
{
static frame_queue_t* fq = NULL;
// TODO: locking
if(fq == NULL) {
fq = malloc(sizeof(frame_queue_t));
if (! fq) {
printf("ERROR: could not malloc!\n");
exit(0);
}
frame_array_init(&fq->frames);
}
return fq;
}
frame_t* frame_queue_ref_latest(frame_queue_t* fq,uint32_t source_id){
//find the latest frame from this source, increment its refcount and return it
//TODO: locking
uint64_t highest_seq =0;
uint32_t selected_index=0;
frame_t* ret=NULL;
for (uint32_t i =0;i<fq->frames.size;i++) {
frame_t* f =&fq->frames.items[i];
if (f->source_id == source_id && f->source_sequence > highest_seq) {
highest_seq = f->source_sequence;
ret = f;
selected_index=i;
}
}
if (ret){
framedata_t* fd = &fq->frames.refdata[selected_index];
fd->refcount++;
return ret;
}
return NULL;
}
void frame_queue_unref(frame_queue_t* fq,frame_t* f) {
//find the frame index, based on the source id and sequence id and decrement the corresponding index
//TODO: locking
uint32_t selected_index=0;
bool found =false;
for (uint32_t i =0;i<fq->frames.size;i++) {
frame_t* qf =&fq->frames.items[i];
if (qf->source_id == f->source_id && qf->source_sequence == f->source_sequence) {
selected_index=i;
found =true;
}
}
if (found){
framedata_t* fd = &fq->frames.refdata[selected_index];
fd->refcount--;
}
}
void frame_queue_add(frame_queue_t* fq,frame_t* f) {
//delete any unrefed frames for this source, then add this new one
//TODO: locking
uint32_t* indices_to_remove = malloc(sizeof(uint32_t) * fq->frames.size);
uint32_t c_index=0;
for (uint32_t i =0;i<fq->frames.size;i++) {
framedata_t* fd = &fq->frames.refdata[i];
if (fd->refcount == 0) {
indices_to_remove[c_index] = i;
c_index++;
}
}
for (uint32_t i=0;i<c_index;i++)
{
frame_array_delete(&fq->frames,indices_to_remove[i]);
}
free(indices_to_remove);
frame_array_add(&fq->frames,f);
}
void frame_array_init(frame_array_t* fa)