Commit dc7ab724 authored by Manuel Stoeckl's avatar Manuel Stoeckl
Browse files

Make video compression support a build option

In the process, split out all code which uses ffmpeg into video.c .
An option to disable dmabuf support has been added but does not
yet do anything.
parent a6823eb1
......@@ -41,14 +41,14 @@ Build with meson[0]. A typical incantation is
Requirements:
* meson (>= 0.46 or possibly earlier. a backend for it (i.e, ninja) is
also needed)
* meson (>= 0.47. and its dependencies `ninja` and `pkg-config`)
* wayland (>= 1.15, to support absolute paths in WAYLAND_DISPLAY)
* wayland-protocols (>= 1.12, for the xdg-shell protocol, and others)
* libffi
* libgbm (from a recent version of mesa)
* libzstd
* liblz4
* ffmpeg (optional, >=3.1, for lossy video encoding)
* scdoc (optional, to generate a man page)
* sys/sdt.h (optional, to provide static tracepoints for profiling)
* ssh (runtime, OpenSSH >= 6.7, for Unix domain socket forwarding)
......
#!/bin/sh
clang-format -style=file --assume-filename=C -i \
util.h \
waypipe.c server.c handlers.c client.c util.c parsing.c dmabuf.c shadow.c mainloop.c interval.c \
waypipe.c server.c handlers.c client.c util.c parsing.c dmabuf.c shadow.c mainloop.c interval.c video.c \
test/diff_roundtrip.c test/damage_merge.c
......@@ -1294,7 +1294,9 @@ static void request_zwp_linux_buffer_params_v1_create(struct wl_client *client,
/* replace the format with something the driver can probably
* handle */
info.format = dmabuf_get_simple_format_for_plane(format, i);
if (params->nplanes == 1 && format == 0x34325258 &&
if (params->nplanes == 1 &&
video_supports_dmabuf_format(
format, info.modifier) &&
context->g->config->video_if_possible) {
// attempt video codec
info.using_video = true;
......
......@@ -925,6 +925,7 @@ int main_interface_loop(int chanfd, int progfd,
.disabled = config->no_gpu};
setup_translation_map(&g.map, display_side, config->compression);
init_message_tracker(&g.tracker);
setup_video_logging();
while (!shutdown_flag) {
struct pollfd *pfds = NULL;
......
......@@ -2,7 +2,7 @@ project(
'waypipe',
'c',
license: 'MIT/Expat',
meson_version: '>=0.46.0',
meson_version: '>=0.47.0',
default_options: [
'c_std=c11',
'warning_level=2',
......@@ -41,22 +41,29 @@ wayland_client = dependency('wayland-client', version: '>=1.15') # WAYLAND_DISPL
wayland_server = dependency('wayland-server', version: '>=1.15') # WAYLAND_DISPLAY=path
wayland_protos = dependency('wayland-protocols', version: '>=1.12') # xdg-shell
wayland_scanner = dependency('wayland-scanner', native: true, version: '>=1.15') # public-code
libgbm = dependency('gbm')
libgbm = dependency('gbm', required : get_option('with_dmabuf'))
libdrm = dependency('libdrm', required : get_option('with_dmabuf'))
if libgbm.found() and libdrm.found()
add_project_arguments('-DHAS_DMABUF=1', language : 'c')
endif
libffi = dependency('libffi')
pthreads = dependency('threads')
rt = cc.find_library('rt')
if cc.has_header('sys/sdt.h')
add_project_arguments('-DHAS_USDT=1', language : 'c')
endif
liblz4 = dependency('liblz4')
libzstd = dependency('libzstd')
libavcodec = dependency('libavcodec')
libavutil = dependency('libavutil')
libswscale = dependency('libswscale')
pthreads = dependency('threads')
libavcodec = dependency('libavcodec', required : get_option('with_video'))
libavutil = dependency('libavutil', required : get_option('with_video'))
libswscale = dependency('libswscale', required : get_option('with_video'))
if libavcodec.found() and libavutil.found() and libswscale.found()
add_project_arguments('-DHAS_VIDEO=1', language : 'c')
endif
subdir('protocols')
waypipe_source_files = ['client.c', 'dmabuf.c', 'handlers.c', 'mainloop.c', 'parsing.c', 'server.c', 'shadow.c', 'interval.c', 'util.c']
waypipe_source_files = ['client.c', 'dmabuf.c', 'handlers.c', 'mainloop.c', 'parsing.c', 'server.c', 'shadow.c', 'interval.c', 'util.c', 'video.c']
waypipe_dependencies = [
libgbm, # General GPU buffer creation, aligned with dmabuf proto
libffi, # To call wayland protocol functions
......@@ -69,13 +76,21 @@ waypipe_dependencies = [
wayland_client # For wl_display_connnect
]
waypipe_includes = []
if libdrm.found()
waypipe_includes += libdrm.get_pkgconfig_variable('includedir')
endif
executable(
'waypipe',
['waypipe.c'] + waypipe_source_files,
dependencies : waypipe_dependencies,
include_directories : waypipe_includes,
install: true
)
message(waypipe_includes)
scdoc = dependency('scdoc', version: '>=1.9.4', native: true, required: false)
if scdoc.found()
scdoc_prog = find_program(scdoc.get_pkgconfig_variable('scdoc'), native: true)
......@@ -94,7 +109,12 @@ if scdoc.found()
endif
# Testing
test_diff = executable('diff_roundtrip', ['test/diff_roundtrip.c'] + ['shadow.c', 'util.c', 'dmabuf.c', 'interval.c'], dependencies: waypipe_dependencies)
test_diff = executable(
'diff_roundtrip',
['test/diff_roundtrip.c'] + ['shadow.c', 'util.c', 'dmabuf.c', 'interval.c', 'video.c'],
include_directories : waypipe_includes,
dependencies: waypipe_dependencies
)
test('Whether diff operations successfully roundtrip', test_diff)
test_damage = executable('damage_merge', ['test/damage_merge.c'] + ['interval.c', 'util.c'])
test('If damage rectangles merge efficiently', test_damage)
......
option('with_video', type : 'feature', value : 'auto', description : 'Link with ffmpeg libraries and provide a command line option to display all buffers using a video stream')
option('with_dmabuf', type : 'feature', value : 'auto', description : 'Support DMABUFs, the file descriptors used to exchange data for e.g. OpenGL applications')
......@@ -38,13 +38,6 @@
#include <sys/stat.h>
#include <unistd.h>
#include <libavformat/avformat.h>
#include <libavutil/display.h>
#include <libavutil/hwcontext_drm.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libswscale/swscale.h>
#include <lz4frame.h>
#include <zstd.h>
......@@ -75,6 +68,9 @@ struct shadow_fd *get_shadow_for_rid(struct fd_translation_map *map, int rid)
static void destroy_unlinked_sfd(
struct fd_translation_map *map, struct shadow_fd *sfd)
{
/* video must be cleaned up before any buffers that it may rely on */
destroy_video_data(sfd);
if (sfd->type == FDC_FILE) {
munmap(sfd->file_mem_local, sfd->file_size);
free(sfd->mem_mirror);
......@@ -88,12 +84,6 @@ static void destroy_unlinked_sfd(
free(sfd->mem_mirror);
free(sfd->diff_buffer);
free(sfd->compress_buffer);
sws_freeContext(sfd->video_color_context);
av_frame_free(&sfd->video_reg_frame);
av_frame_free(&sfd->video_yuv_frame);
avcodec_free_context(&sfd->video_context);
av_packet_free(&sfd->video_packet);
free(sfd->video_buffer);
} else if (fdcat_ispipe(sfd->type)) {
......@@ -501,108 +491,10 @@ struct shadow_fd *translate_fd(struct fd_translation_map *map,
sfd->type = FDC_DMABUF;
if (info && info->using_video) {
// Try to set up a video encoding and a video decoding
// stream with AVCodec, although transmissions in each
// direction are relatively independent. TODO: use
// hardware support only if available.
struct AVCodec *codec =
avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
wp_log(WP_ERROR,
"Failed to find encoder for h264");
}
struct AVCodecContext *ctx =
avcodec_alloc_context3(codec);
struct AVPacket *pkt = av_packet_alloc();
ctx->bit_rate = 3000000;
// non-odd resolution ?
ctx->width = align((int)info->width, 8);
ctx->height = align((int)info->height, 8);
// "time" is only meaningful in terms of the frames
// provided
ctx->time_base = (AVRational){1, 25};
ctx->framerate = (AVRational){25, 1};
/* B-frames are directly tied to latency, since each one
* is predicted using its preceding and following
* frames. The gop size is chosen by the driver. */
ctx->gop_size = -1;
ctx->max_b_frames = 0; // Q: how to get this to zero?
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
// low latency
ctx->delay = 0;
if (av_opt_set(ctx->priv_data, "preset", "ultrafast",
0) != 0) {
wp_log(WP_ERROR,
"Failed to set x264 encode ultrafast preset");
}
if (av_opt_set(ctx->priv_data, "tune", "zerolatency",
0) != 0) {
wp_log(WP_ERROR,
"Failed to set x264 encode zerolatency");
}
bool near_perfect = false;
if (near_perfect && av_opt_set(ctx->priv_data, "crf",
"0", 0) != 0) {
wp_log(WP_ERROR, "Failed to set x264 crf");
}
// option: crf = 0
if (avcodec_open2(ctx, codec, NULL) < 0) {
wp_log(WP_ERROR, "Failed to open codec");
}
struct AVFrame *frame = av_frame_alloc();
if (!frame) {
wp_log(WP_ERROR,
"Could not allocate video frame");
}
frame->format = AV_PIX_FMT_BGR0;
frame->width = ctx->width;
frame->height = ctx->height;
frame->linesize[0] = (int)info->strides[0];
struct AVFrame *yuv_frame = av_frame_alloc();
yuv_frame->width = ctx->width;
yuv_frame->height = ctx->height;
yuv_frame->format = AV_PIX_FMT_YUV420P;
if (av_image_alloc(yuv_frame->data, yuv_frame->linesize,
yuv_frame->width, yuv_frame->height,
AV_PIX_FMT_YUV420P, 64) < 0) {
wp_log(WP_ERROR,
"Failed to allocate temp image");
}
if (sws_isSupportedInput(AV_PIX_FMT_BGR0) == 0) {
wp_log(WP_ERROR,
"AV_PIX_FMT_BGR0 not supported");
}
if (sws_isSupportedInput(AV_PIX_FMT_YUV420P) == 0) {
wp_log(WP_ERROR,
"AV_PIX_FMT_YUV420P not supported");
}
struct SwsContext *sws = sws_getContext(ctx->width,
ctx->height, AV_PIX_FMT_BGR0,
ctx->width, ctx->height,
AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL,
NULL, NULL);
if (!sws) {
wp_log(WP_ERROR,
"Could not create software color conversion context");
}
sfd->video_codec = codec;
sfd->video_yuv_frame = yuv_frame;
sfd->video_reg_frame = frame;
sfd->video_packet = pkt;
sfd->video_context = ctx;
sfd->video_color_context = sws;
setup_video_encode(sfd, (int)info->width,
(int)info->height,
(int)info->strides[0],
(int)info->format);
}
}
return sfd;
......@@ -795,7 +687,7 @@ void apply_diff(size_t size, char *__restrict__ base, size_t diffsize,
}
}
static struct transfer *setup_single_block_transfer(int *ntransfers,
struct transfer *setup_single_block_transfer(int *ntransfers,
struct transfer transfers[], int *nblocks,
struct bytebuf blocks[], size_t size, const char *data)
{
......@@ -1050,101 +942,8 @@ void collect_update(struct fd_translation_map *map, struct shadow_fd *sfd,
if (sfd->dmabuf_info.using_video && sfd->video_context &&
sfd->video_reg_frame && sfd->video_packet) {
memcpy(sfd->mem_mirror, data, sfd->dmabuf_size);
sfd->video_reg_frame->data[0] =
(uint8_t *)sfd->mem_mirror;
for (int i = 1; i < AV_NUM_DATA_POINTERS; i++) {
sfd->video_reg_frame->data[i] = NULL;
}
av_frame_make_writable(sfd->video_yuv_frame);
if (sws_scale(sfd->video_color_context,
(const uint8_t *const *)sfd
->video_reg_frame
->data,
sfd->video_reg_frame->linesize, 0,
sfd->video_reg_frame->height,
sfd->video_yuv_frame->data,
sfd->video_yuv_frame->linesize) <
0) {
wp_log(WP_ERROR,
"Failed to perform color conversion");
}
sfd->video_yuv_frame->pts = sfd->video_frameno++;
int sendstat = avcodec_send_frame(sfd->video_context,
sfd->video_yuv_frame);
char errbuf[256];
strcpy(errbuf, "Unknown error");
if (sendstat < 0) {
av_strerror(sendstat, errbuf, sizeof(errbuf));
wp_log(WP_ERROR, "Failed to create frame: %s",
errbuf);
return;
}
// assume 1-1 frames to packets, at the moment
int recvstat = avcodec_receive_packet(
sfd->video_context, sfd->video_packet);
if (recvstat == AVERROR(EINVAL)) {
wp_log(WP_ERROR, "Failed to receive packet");
return;
} else if (recvstat == AVERROR(EAGAIN)) {
wp_log(WP_ERROR, "Packet needs more input");
// Clearly, the solution is to resend the
// original frame ? but _lag_
}
if (recvstat == 0) {
// we can unref the packet when? after sending?
// on the next arrival?
struct AVPacket *pkt = sfd->video_packet;
size_t tsize;
if (first) {
// For the first frame, we must prepend
// the video slice data
free(sfd->video_buffer);
sfd->video_buffer = calloc(
align(pkt->buf->size + sizeof(struct dmabuf_slice_data),
8),
1);
memcpy(sfd->video_buffer,
&sfd->dmabuf_info,
sizeof(struct dmabuf_slice_data));
memcpy(sfd->video_buffer + sizeof(struct dmabuf_slice_data),
pkt->buf->data,
pkt->buf->size);
tsize = pkt->buf->size +
sizeof(struct dmabuf_slice_data);
} else {
free(sfd->video_buffer);
size_t sz = pkt->buf->size;
sfd->video_buffer =
malloc(align(sz, 8));
memcpy(sfd->video_buffer,
pkt->buf->data, sz);
tsize = sz;
}
av_packet_unref(pkt);
struct transfer *tf = setup_single_block_transfer(
ntransfers, transfers, nblocks,
blocks, tsize,
sfd->video_buffer);
tf->type = sfd->type;
tf->obj_id = sfd->remote_id;
tf->special.file_actual_size =
(int)sfd->dmabuf_size;
} else if (first) {
struct transfer *tf = setup_single_block_transfer(
ntransfers, transfers, nblocks,
blocks,
sizeof(struct dmabuf_slice_data),
(const char *)&sfd
->dmabuf_info);
// Q: use a subtype 'FDC_VIDEODMABUF ?'
tf->type = sfd->type;
tf->obj_id = sfd->remote_id;
tf->special.file_actual_size =
(int)sfd->dmabuf_size;
}
collect_video_from_mirror(sfd, ntransfers, transfers,
nblocks, blocks, first);
return;
}
......@@ -1266,62 +1065,6 @@ void collect_update(struct fd_translation_map *map, struct shadow_fd *sfd,
}
}
static void apply_video_packet_to_mirror(
struct shadow_fd *sfd, size_t size, const char *data)
{
// We unpack directly one mem_mirror
sfd->video_reg_frame->data[0] = (uint8_t *)sfd->mem_mirror;
for (int i = 1; i < AV_NUM_DATA_POINTERS; i++) {
sfd->video_reg_frame->data[i] = NULL;
}
// padding, requires zerod overflow for read
sfd->video_packet->data = (uint8_t *)data;
sfd->video_packet->size = size;
int sendstat = avcodec_send_packet(
sfd->video_context, sfd->video_packet);
char errbuf[256];
strcpy(errbuf, "Unknown error");
if (sendstat < 0) {
av_strerror(sendstat, errbuf, sizeof(errbuf));
wp_log(WP_ERROR, "Failed to send packet: %s", errbuf);
}
while (true) {
// Apply all produced frames
int recvstat = avcodec_receive_frame(
sfd->video_context, sfd->video_yuv_frame);
if (recvstat == 0) {
if (sws_scale(sfd->video_color_context,
(const uint8_t *const *)sfd
->video_yuv_frame
->data,
sfd->video_yuv_frame->linesize, 0,
sfd->video_yuv_frame->height,
sfd->video_reg_frame->data,
sfd->video_reg_frame->linesize) <
0) {
wp_log(WP_ERROR,
"Failed to perform color conversion");
}
} else {
if (recvstat != AVERROR(EAGAIN)) {
char errbuf[256];
strcpy(errbuf, "Unknown error");
av_strerror(sendstat, errbuf, sizeof(errbuf));
wp_log(WP_ERROR,
"Failed to receive frame due to error: %s",
errbuf);
}
break;
}
// the scale/copy operation output is
// already onto mem_mirror
}
}
void create_from_update(struct fd_translation_map *map,
struct render_data *render, const struct transfer *transf)
{
......@@ -1445,66 +1188,10 @@ void create_from_update(struct fd_translation_map *map,
const char *contents = NULL;
size_t contents_size = sfd->dmabuf_size;
if (info->using_video) {
struct AVCodec *codec =
avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
wp_log(WP_ERROR,
"Failed to find decoder for h264");
}
struct AVCodecContext *ctx =
avcodec_alloc_context3(codec);
struct AVPacket *pkt = av_packet_alloc();
// non-odd resolution ?
ctx->width = align(info->width, 8);
ctx->height = align(info->height, 8);
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
ctx->delay = 0;
if (avcodec_open2(ctx, codec, NULL) < 0) {
wp_log(WP_ERROR, "Failed to open codec");
}
struct AVFrame *frame = av_frame_alloc();
if (!frame) {
wp_log(WP_ERROR,
"Could not allocate video frame");
}
frame->format = AV_PIX_FMT_BGR0;
frame->width = ctx->width;
frame->height = ctx->height;
frame->linesize[0] = info->strides[0];
if (sws_isSupportedInput(AV_PIX_FMT_BGR0) == 0) {
wp_log(WP_ERROR,
"AV_PIX_FMT_BGR0 not supported");
}
if (sws_isSupportedInput(AV_PIX_FMT_YUV420P) == 0) {
wp_log(WP_ERROR,
"AV_PIX_FMT_YUV420P not supported");
}
struct SwsContext *sws = sws_getContext(ctx->width,
ctx->height, AV_PIX_FMT_YUV420P,
ctx->width, ctx->height,
AV_PIX_FMT_BGR0, SWS_BILINEAR, NULL,
NULL, NULL);
if (!sws) {
wp_log(WP_ERROR,
"Could not create software color conversion context");
}
struct AVFrame *yuv_frame = av_frame_alloc();
yuv_frame->width = ctx->width;
yuv_frame->height = ctx->height;
yuv_frame->format = AV_PIX_FMT_YUV420P;
sfd->video_codec = codec;
sfd->video_reg_frame = frame;
sfd->video_yuv_frame = yuv_frame;
sfd->video_packet = pkt;
sfd->video_context = ctx;
sfd->video_color_context = sws;
setup_video_decode(sfd, (int)info->width,
(int)info->height,
(int)info->strides[0],
(int)info->format);
// Apply first frame, if available
if (block.size > sizeof(struct dmabuf_slice_data)) {
......
......@@ -477,6 +477,9 @@ void decref_transferred_fds(
struct fd_translation_map *map, int nfds, int fds[]);
void decref_transferred_rids(
struct fd_translation_map *map, int nids, int ids[]);
struct transfer *setup_single_block_transfer(int *ntransfers,
struct transfer transfers[], int *nblocks,
struct bytebuf blocks[], size_t size, const char *data);
// parsing.c
......@@ -542,6 +545,25 @@ int get_unique_dmabuf_handle(
struct render_data *rd, int fd, struct gbm_bo **temporary_bo);
uint32_t dmabuf_get_simple_format_for_plane(uint32_t format, int plane);
// video.c
/** set redirect for ffmpeg logging through wp_log */
bool video_supports_dmabuf_format(uint32_t format, uint64_t modifier);
bool video_supports_shm_format(uint32_t format);
void setup_video_logging(void);
void destroy_video_data(struct shadow_fd *sfd);
void setup_video_encode(struct shadow_fd *sfd, int width, int height,
int stride, uint32_t drm_format);
void setup_video_decode(struct shadow_fd *sfd, int width, int height,
int stride, uint32_t drm_format);
/** the video frame to be transferred should already have been transferred into
* `sfd->mem_mirror`. */
void collect_video_from_mirror(struct shadow_fd *sfd, int *ntransfers,
struct transfer transfers[], int *nblocks,
struct bytebuf blocks[], bool first);
void apply_video_packet_to_mirror(
struct shadow_fd *sfd, size_t size, const char *data);
// exported for testing
void apply_diff(size_t size, char *__restrict__ base, size_t diffsize,
const char *__restrict__ diff);
......
/*
* Copyright © 2019 Manuel Stoeckl
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define _XOPEN_SOURCE 700
#include "util.h"
#ifndef HAS_VIDEO
void setup_video_logging() {}
bool video_supports_dmabuf_format(uint32_t format, uint64_t modifier)
{
(void)format;
(void)modifier;
return false;
}
bool video_supports_shm_format(uint32_t format)
{
(void)format;
return false;
}
void destroy_video_data(struct shadow_fd *sfd) { (void)sfd; }
void setup_video_encode(struct shadow_fd *sfd, int width, int height,
int stride, uint32_t drm_format)
{
(void)sfd;
(void)width;
(void)height;
(void)stride;
(void)drm_format;
}
void setup_video_decode(struct shadow_fd *sfd, int width, int height,
int stride, uint32_t drm_format)
{
(void)sfd;
(void)width;
(void)height;
(void)stride;
(void)drm_format;
}
void collect_video_from_mirror(struct shadow_fd *sfd, int *ntransfers,
struct transfer transfers[], int *nblocks,
struct bytebuf blocks[], bool first)
{
(void)sfd;
(void)ntransfers;
(void)transfers;
(void)nblocks;
(void)blocks;
(void)first;
}