diff --git a/src/intel/common/intel_clflush.h b/src/intel/common/intel_clflush.h index 7c6aaf8f2df02a79d59e06ff2ddd43ded863e0c1..b10931bb78e8ba2ca8266730a5a6038a0d997f53 100644 --- a/src/intel/common/intel_clflush.h +++ b/src/intel/common/intel_clflush.h @@ -27,6 +27,7 @@ #define CACHELINE_SIZE 64 #define CACHELINE_MASK 63 +#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64) static inline void intel_clflush_range(void *start, size_t size) { @@ -65,4 +66,45 @@ intel_invalidate_range(void *start, size_t size) __builtin_ia32_mfence(); } +static inline void +intel_memory_fence() +{ + __builtin_ia32_mfence(); +} + +static inline void +intel_clflush(void *start) +{ + __builtin_ia32_clflush(start); +} +#else +/* + * Unsupported architecture doesn't get cacheline invalidation + */ +static inline void +intel_clflush_range(void *start, size_t size) +{ +} + +static inline void +intel_flush_range(void *start, size_t size) +{ +} + +static inline void +intel_invalidate_range(void *start, size_t size) +{ +} + +static inline void +intel_memory_fence() +{ +} + +static inline void +intel_clflush(void *start) +{ +} +#endif + #endif diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index 5790101fec08f0cdd171c0f4c4aa82e0f27f913e..dea804935139715b926fcf515c2e7f4b80a3a778 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -1904,11 +1904,11 @@ setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf, } if (!device->info.has_llc) { - __builtin_ia32_mfence(); + intel_memory_fence(); for (uint32_t i = 0; i < num_cmd_buffers; i++) { u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) { for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE) - __builtin_ia32_clflush((*bbo)->bo->map + i); + intel_clflush((*bbo)->bo->map + i); } } } @@ -1991,9 +1991,9 @@ setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue, } if (!device->info.has_llc) { - __builtin_ia32_mfence(); + intel_memory_fence(); for (uint32_t i = 0; i < flush->batch_bo->size; i += CACHELINE_SIZE) - __builtin_ia32_clflush(flush->batch_bo->map); + intel_clflush(flush->batch_bo->map); } execbuf->execbuf = (struct drm_i915_gem_execbuffer2) { diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index ddee1b2d8966cf5cc8c567a61d11955240985ef7..d751247f6cd9fd208c242590ab072dc9dbf6872b 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -4172,7 +4172,7 @@ VkResult anv_FlushMappedMemoryRanges( return VK_SUCCESS; /* Make sure the writes we're flushing have landed. */ - __builtin_ia32_mfence(); + intel_memory_fence(); clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges); @@ -4192,7 +4192,7 @@ VkResult anv_InvalidateMappedMemoryRanges( clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges); /* Make sure no reads get moved up above the invalidate. */ - __builtin_ia32_mfence(); + intel_memory_fence(); return VK_SUCCESS; }