From 1455fadb2227ac6c85be5af2df78507f22306579 Mon Sep 17 00:00:00 2001 From: Ryan Houdek Date: Wed, 9 Mar 2022 11:45:36 -0800 Subject: [PATCH] intel: Support compiling on non-x86 While this does the bare minimum to get anv compiling on non-x86 platforms, it is a start. The problem coems with anv sprinkling memory fences and cacheline flushes around the codebase. This is architecture specific how this should be done. Theoretically memory fencing can be implemented with c11 and atomic_thread_fence but this will be left as an exercise for the future. No one is running these Intel dGPUs on non-x86 currently anyway. --- src/intel/common/intel_clflush.h | 42 ++++++++++++++++++++++++++++++ src/intel/vulkan/anv_batch_chain.c | 8 +++--- src/intel/vulkan/anv_device.c | 4 +-- 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/intel/common/intel_clflush.h b/src/intel/common/intel_clflush.h index 7c6aaf8f2df0..b10931bb78e8 100644 --- a/src/intel/common/intel_clflush.h +++ b/src/intel/common/intel_clflush.h @@ -27,6 +27,7 @@ #define CACHELINE_SIZE 64 #define CACHELINE_MASK 63 +#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64) static inline void intel_clflush_range(void *start, size_t size) { @@ -65,4 +66,45 @@ intel_invalidate_range(void *start, size_t size) __builtin_ia32_mfence(); } +static inline void +intel_memory_fence() +{ + __builtin_ia32_mfence(); +} + +static inline void +intel_clflush(void *start) +{ + __builtin_ia32_clflush(start); +} +#else +/* + * Unsupported architecture doesn't get cacheline invalidation + */ +static inline void +intel_clflush_range(void *start, size_t size) +{ +} + +static inline void +intel_flush_range(void *start, size_t size) +{ +} + +static inline void +intel_invalidate_range(void *start, size_t size) +{ +} + +static inline void +intel_memory_fence() +{ +} + +static inline void +intel_clflush(void *start) +{ +} +#endif + #endif diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index 5790101fec08..dea804935139 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -1904,11 +1904,11 @@ setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf, } if (!device->info.has_llc) { - __builtin_ia32_mfence(); + intel_memory_fence(); for (uint32_t i = 0; i < num_cmd_buffers; i++) { u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) { for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE) - __builtin_ia32_clflush((*bbo)->bo->map + i); + intel_clflush((*bbo)->bo->map + i); } } } @@ -1991,9 +1991,9 @@ setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue, } if (!device->info.has_llc) { - __builtin_ia32_mfence(); + intel_memory_fence(); for (uint32_t i = 0; i < flush->batch_bo->size; i += CACHELINE_SIZE) - __builtin_ia32_clflush(flush->batch_bo->map); + intel_clflush(flush->batch_bo->map); } execbuf->execbuf = (struct drm_i915_gem_execbuffer2) { diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index ddee1b2d8966..d751247f6cd9 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -4172,7 +4172,7 @@ VkResult anv_FlushMappedMemoryRanges( return VK_SUCCESS; /* Make sure the writes we're flushing have landed. */ - __builtin_ia32_mfence(); + intel_memory_fence(); clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges); @@ -4192,7 +4192,7 @@ VkResult anv_InvalidateMappedMemoryRanges( clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges); /* Make sure no reads get moved up above the invalidate. */ - __builtin_ia32_mfence(); + intel_memory_fence(); return VK_SUCCESS; } -- GitLab