Commit c9afd8cb authored by Mario Kleiner's avatar Mario Kleiner Committed by Adam Jackson
Browse files

modesetting: Fix and improve ms_kernel_msc_to_crtc_msc()



The old 32-Bit wraparound handling didn't actually work, due to some
integer casting bug, and the mapping was ill equipped to deal with input
from the new true 64-bit GetCrtcSequence/QueueCrtcSequence api's
introduced in Linux 4.15.

For 32-Bit truncated input from pageflip events and old vblank events
and old drmWaitVblank ioctl, implement new wraparound handling, which
also allows to deal with wraparound in the other direction, e.g., if a
32-Bit truncated sequence value is passed in, whose true 64-Bit
in-kernel hw value is within 2^30 counts of the previous processed
value, but whose 32-bit truncated sequence value happens to lie just
above or below a 2^32 boundary, iow. one of the two values 'sequence'
vs. 'msc_prev' lies above a 2^32 border, the other one below it.

The method is directly translated from Mesa's proven implementation of
the INTEL_swap_events extension, where a true underlying 64-Bit wide
swapbuffers count (SBC) needs to get reconstructed from a 32-Bit LSB
truncated SBC transported over the X11 protocol wire. Same conditions
apply, ie. successive true 64-Bit SBC values are close to each other,
but don't always get received in strictly monotonically increasing
order. See Mesa commit cc5ddd584d17abd422ae4d8e83805969485740d9 ("glx:
Handle out-of-sequence swap completion events correctly. (v2)") for
explanation.

Additionally add a separate path for true 64-bit msc input originating
from Linux 4.15+ drmCrtcGetSequence/QueueSequence ioctl's and
corresponding 64-bit vblank events. True 64-bit msc's don't need
remapping and must be passed through.

As a reliability bonus, they are also used here to update the tracking
values msc_prev and ms_high with perfect 64-Bit ground truth as baseline
for mapping msc from pageflip completion events, because pageflip events
are always 32-bit wide, even when the new kernel api's are used. Because
each pageflip(-event) is always preceeded close in time (and vblank
count) by a drmCrtcQueueSequence queued event or drmCrtcGetSequence
query as part of DRI2 or DRI3+Present swap scheduling, we can be certain
that each pageflip event will get its truncated 32-bit msc remapped
reliably to the true 64-bit msc of flip completion whenever the sequence
api is available, ie. on Linux 4.15 or later.

Note: In principle at least the 32-bit mapping path could also be
backported to earlier server branches, as this seems to be broken for at
least server 1.16 to 1.19.
Signed-off-by: Mario Kleiner's avatarMario Kleiner <mario.kleiner.de@gmail.com>
Reviewed-by: Adam Jackson's avatarAdam Jackson <ajax@redhat.com>
Cc: Keith Packard <keithp@keithp.com>
Cc: Michel Dänzer <michel.daenzer@amd.com>
parent 73f0ed2d
......@@ -149,7 +149,7 @@ xf86CrtcPtr ms_dri2_crtc_covering_drawable(DrawablePtr pDraw);
int ms_get_crtc_ust_msc(xf86CrtcPtr crtc, CARD64 *ust, CARD64 *msc);
uint64_t ms_kernel_msc_to_crtc_msc(xf86CrtcPtr crtc, uint64_t sequence);
uint64_t ms_kernel_msc_to_crtc_msc(xf86CrtcPtr crtc, uint64_t sequence, Bool is64bit);
Bool ms_dri2_screen_init(ScreenPtr screen);
......
......@@ -239,7 +239,7 @@ ms_queue_vblank(xf86CrtcPtr crtc, ms_queue_flag flags,
drm_flags, msc, &kernel_queued, seq);
if (ret == 0) {
if (msc_queued)
*msc_queued = ms_kernel_msc_to_crtc_msc(crtc, kernel_queued);
*msc_queued = ms_kernel_msc_to_crtc_msc(crtc, kernel_queued, TRUE);
ms->has_queue_sequence = TRUE;
return TRUE;
}
......@@ -262,7 +262,7 @@ ms_queue_vblank(xf86CrtcPtr crtc, ms_queue_flag flags,
ret = drmWaitVBlank(ms->fd, &vbl);
if (ret == 0) {
if (msc_queued)
*msc_queued = ms_kernel_msc_to_crtc_msc(crtc, vbl.reply.sequence);
*msc_queued = ms_kernel_msc_to_crtc_msc(crtc, vbl.reply.sequence, FALSE);
return TRUE;
}
check:
......@@ -275,28 +275,59 @@ ms_queue_vblank(xf86CrtcPtr crtc, ms_queue_flag flags,
}
/**
* Convert a 32-bit kernel MSC sequence number to a 64-bit local sequence
* number, adding in the high 32 bits, and dealing with 64-bit wrapping.
* Convert a 32-bit or 64-bit kernel MSC sequence number to a 64-bit local
* sequence number, adding in the high 32 bits, and dealing with 32-bit
* wrapping if needed.
*/
uint64_t
ms_kernel_msc_to_crtc_msc(xf86CrtcPtr crtc, uint64_t sequence)
ms_kernel_msc_to_crtc_msc(xf86CrtcPtr crtc, uint64_t sequence, Bool is64bit)
{
drmmode_crtc_private_rec *drmmode_crtc = crtc->driver_private;
if ((int32_t) (sequence - drmmode_crtc->msc_prev) < -0x40000000)
drmmode_crtc->msc_high += 0x100000000L;
if (!is64bit) {
/* sequence is provided as a 32 bit value from one of the 32 bit apis,
* e.g., drmWaitVBlank(), classic vblank events, or pageflip events.
*
* Track and handle 32-Bit wrapping, somewhat robust against occasional
* out-of-order not always monotonically increasing sequence values.
*/
if ((int64_t) sequence < ((int64_t) drmmode_crtc->msc_prev - 0x40000000))
drmmode_crtc->msc_high += 0x100000000L;
if ((int64_t) sequence > ((int64_t) drmmode_crtc->msc_prev + 0x40000000))
drmmode_crtc->msc_high -= 0x100000000L;
drmmode_crtc->msc_prev = sequence;
return drmmode_crtc->msc_high + sequence;
}
/* True 64-Bit sequence from Linux 4.15+ 64-Bit drmCrtcGetSequence /
* drmCrtcQueueSequence apis and events. Pass through sequence unmodified,
* but update the 32-bit tracking variables with reliable ground truth.
*
* With 64-Bit api in use, the only !is64bit input is from pageflip events,
* and any pageflip event is usually preceeded by some is64bit input from
* swap scheduling, so this should provide reliable mapping for pageflip
* events based on true 64-bit input as baseline as well.
*/
drmmode_crtc->msc_prev = sequence;
return drmmode_crtc->msc_high + sequence;
drmmode_crtc->msc_high = sequence & 0xffffffff00000000;
return sequence;
}
int
ms_get_crtc_ust_msc(xf86CrtcPtr crtc, CARD64 *ust, CARD64 *msc)
{
ScreenPtr screen = crtc->randr_crtc->pScreen;
ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
modesettingPtr ms = modesettingPTR(scrn);
uint64_t kernel_msc;
if (!ms_get_kernel_ust_msc(crtc, &kernel_msc, ust))
return BadMatch;
*msc = ms_kernel_msc_to_crtc_msc(crtc, kernel_msc);
*msc = ms_kernel_msc_to_crtc_msc(crtc, kernel_msc, ms->has_queue_sequence);
return Success;
}
......@@ -416,7 +447,7 @@ ms_drm_abort(ScrnInfoPtr scrn, Bool (*match)(void *data, void *match_data),
* drm event queue and calls the handler for it.
*/
static void
ms_drm_sequence_handler(int fd, uint64_t frame, uint64_t ns, uint64_t user_data)
ms_drm_sequence_handler(int fd, uint64_t frame, uint64_t ns, Bool is64bit, uint64_t user_data)
{
struct ms_drm_queue *q, *tmp;
uint32_t seq = (uint32_t) user_data;
......@@ -425,7 +456,7 @@ ms_drm_sequence_handler(int fd, uint64_t frame, uint64_t ns, uint64_t user_data)
if (q->seq == seq) {
uint64_t msc;
msc = ms_kernel_msc_to_crtc_msc(q->crtc, frame);
msc = ms_kernel_msc_to_crtc_msc(q->crtc, frame, is64bit);
xorg_list_del(&q->list);
q->handler(msc, ns / 1000, q->data);
free(q);
......@@ -434,11 +465,20 @@ ms_drm_sequence_handler(int fd, uint64_t frame, uint64_t ns, uint64_t user_data)
}
}
static void
ms_drm_sequence_handler_64bit(int fd, uint64_t frame, uint64_t ns, uint64_t user_data)
{
/* frame is true 64 bit wrapped into 64 bit */
ms_drm_sequence_handler(fd, frame, ns, TRUE, user_data);
}
static void
ms_drm_handler(int fd, uint32_t frame, uint32_t sec, uint32_t usec,
void *user_ptr)
{
ms_drm_sequence_handler(fd, frame, ((uint64_t) sec * 1000000 + usec) * 1000, (uint32_t) (uintptr_t) user_ptr);
/* frame is 32 bit wrapped into 64 bit */
ms_drm_sequence_handler(fd, frame, ((uint64_t) sec * 1000000 + usec) * 1000,
FALSE, (uint32_t) (uintptr_t) user_ptr);
}
Bool
......@@ -452,7 +492,7 @@ ms_vblank_screen_init(ScreenPtr screen)
ms->event_context.version = 4;
ms->event_context.vblank_handler = ms_drm_handler;
ms->event_context.page_flip_handler = ms_drm_handler;
ms->event_context.sequence_handler = ms_drm_sequence_handler;
ms->event_context.sequence_handler = ms_drm_sequence_handler_64bit;
/* We need to re-register the DRM fd for the synchronisation
* feedback on every server generation, so perform the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment