anv_device.c 104 KB
Newer Older
Kristian Høgsberg's avatar
Kristian Høgsberg committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
/*
 * Copyright © 2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <assert.h>
#include <stdbool.h>
#include <string.h>
27
#include <sys/mman.h>
28
#include <sys/sysinfo.h>
Kristian Høgsberg's avatar
Kristian Høgsberg committed
29
30
#include <unistd.h>
#include <fcntl.h>
31
#include <xf86drm.h>
32
#include <drm_fourcc.h>
Kristian Høgsberg's avatar
Kristian Høgsberg committed
33

34
#include "anv_private.h"
35
#include "util/strtod.h"
36
#include "util/debug.h"
37
#include "util/build_id.h"
38
#include "util/disk_cache.h"
39
#include "util/mesa-sha1.h"
40
#include "vk_util.h"
41
#include "common/gen_defines.h"
Kristian Høgsberg's avatar
Kristian Høgsberg committed
42

43
#include "genxml/gen7_pack.h"
44

45
46
47
48
49
50
51
52
53
54
55
static void
compiler_debug_log(void *data, const char *fmt, ...)
{ }

static void
compiler_perf_log(void *data, const char *fmt, ...)
{
   va_list args;
   va_start(args, fmt);

   if (unlikely(INTEL_DEBUG & DEBUG_PERF))
56
      intel_logd_v(fmt, args);
57
58
59
60

   va_end(args);
}

61
static VkResult
62
anv_compute_heap_size(int fd, uint64_t gtt_size, uint64_t *heap_size)
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
{
   /* Query the total ram from the system */
   struct sysinfo info;
   sysinfo(&info);

   uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;

   /* We don't want to burn too much ram with the GPU.  If the user has 4GiB
    * or less, we use at most half.  If they have more than 4GiB, we use 3/4.
    */
   uint64_t available_ram;
   if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
      available_ram = total_ram / 2;
   else
      available_ram = total_ram * 3 / 4;

   /* We also want to leave some padding for things we allocate in the driver,
    * so don't go over 3/4 of the GTT either.
    */
   uint64_t available_gtt = gtt_size * 3 / 4;

   *heap_size = MIN2(available_ram, available_gtt);

   return VK_SUCCESS;
}

89
90
91
static VkResult
anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
{
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
   uint64_t gtt_size;
   if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
                                 &gtt_size) == -1) {
      /* If, for whatever reason, we can't actually get the GTT size from the
       * kernel (too old?) fall back to the aperture size.
       */
      anv_perf_warn(NULL, NULL,
                    "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");

      if (anv_gem_get_aperture(fd, &gtt_size) == -1) {
         return vk_errorf(NULL, NULL, VK_ERROR_INITIALIZATION_FAILED,
                          "failed to get aperture size: %m");
      }
   }

   device->supports_48bit_addresses = (device->info.gen >= 8) &&
      gtt_size > (4ULL << 30 /* GiB */);
109

110
   uint64_t heap_size = 0;
111
   VkResult result = anv_compute_heap_size(fd, gtt_size, &heap_size);
112
113
114
   if (result != VK_SUCCESS)
      return result;

115
116
117
118
119
120
121
122
123
124
125
126
   if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
      /* When running with an overridden PCI ID, we may get a GTT size from
       * the kernel that is greater than 2 GiB but the execbuf check for 48bit
       * address support can still fail.  Just clamp the address space size to
       * 2 GiB if we don't have 48-bit support.
       */
      intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
                        "not support for 48-bit addresses",
                        __FILE__, __LINE__);
      heap_size = 2ull << 30;
   }

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
   if (heap_size <= 3ull * (1ull << 30)) {
      /* In this case, everything fits nicely into the 32-bit address space,
       * so there's no need for supporting 48bit addresses on client-allocated
       * memory objects.
       */
      device->memory.heap_count = 1;
      device->memory.heaps[0] = (struct anv_memory_heap) {
         .size = heap_size,
         .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
         .supports_48bit_addresses = false,
      };
   } else {
      /* Not everything will fit nicely into a 32-bit address space.  In this
       * case we need a 64-bit heap.  Advertise a small 32-bit heap and a
       * larger 48-bit heap.  If we're in this case, then we have a total heap
       * size larger than 3GiB which most likely means they have 8 GiB of
       * video memory and so carving off 1 GiB for the 32-bit heap should be
       * reasonable.
       */
      const uint64_t heap_size_32bit = 1ull << 30;
      const uint64_t heap_size_48bit = heap_size - heap_size_32bit;

      assert(device->supports_48bit_addresses);

      device->memory.heap_count = 2;
      device->memory.heaps[0] = (struct anv_memory_heap) {
         .size = heap_size_48bit,
         .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
         .supports_48bit_addresses = true,
      };
      device->memory.heaps[1] = (struct anv_memory_heap) {
         .size = heap_size_32bit,
         .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
         .supports_48bit_addresses = false,
      };
   }
163

164
165
166
167
   uint32_t type_count = 0;
   for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
      uint32_t valid_buffer_usage = ~0;

168
169
170
171
172
173
174
175
176
177
178
179
      /* There appears to be a hardware issue in the VF cache where it only
       * considers the bottom 32 bits of memory addresses.  If you happen to
       * have two vertex buffers which get placed exactly 4 GiB apart and use
       * them in back-to-back draw calls, you can get collisions.  In order to
       * solve this problem, we require vertex and index buffers be bound to
       * memory allocated out of the 32-bit heap.
       */
      if (device->memory.heaps[heap].supports_48bit_addresses) {
         valid_buffer_usage &= ~(VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
                                 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
      }

180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
      if (device->info.has_llc) {
         /* Big core GPUs share LLC with the CPU and thus one memory type can be
          * both cached and coherent at the same time.
          */
         device->memory.types[type_count++] = (struct anv_memory_type) {
            .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
                             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
                             VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
                             VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
            .heapIndex = heap,
            .valid_buffer_usage = valid_buffer_usage,
         };
      } else {
         /* The spec requires that we expose a host-visible, coherent memory
          * type, but Atom GPUs don't share LLC. Thus we offer two memory types
          * to give the application a choice between cached, but not coherent and
          * coherent but uncached (WC though).
          */
         device->memory.types[type_count++] = (struct anv_memory_type) {
            .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
                             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
                             VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
            .heapIndex = heap,
            .valid_buffer_usage = valid_buffer_usage,
         };
         device->memory.types[type_count++] = (struct anv_memory_type) {
            .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
                             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
                             VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
            .heapIndex = heap,
            .valid_buffer_usage = valid_buffer_usage,
         };
      }
   }
   device->memory.type_count = type_count;

216
217
218
   return VK_SUCCESS;
}

219
220
static VkResult
anv_physical_device_init_uuids(struct anv_physical_device *device)
221
{
222
223
   const struct build_id_note *note =
      build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
224
   if (!note) {
225
226
      return vk_errorf(device->instance, device,
                       VK_ERROR_INITIALIZATION_FAILED,
227
228
                       "Failed to find build-id");
   }
229

230
   unsigned build_id_len = build_id_length(note);
231
   if (build_id_len < 20) {
232
233
      return vk_errorf(device->instance, device,
                       VK_ERROR_INITIALIZATION_FAILED,
234
235
                       "build-id too short.  It needs to be a SHA");
   }
236

237
238
   memcpy(device->driver_build_sha1, build_id_data(note), 20);

239
240
241
242
   struct mesa_sha1 sha1_ctx;
   uint8_t sha1[20];
   STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));

243
244
245
   /* The pipeline cache UUID is used for determining when a pipeline cache is
    * invalid.  It needs both a driver build and the PCI ID of the device.
    */
246
247
   _mesa_sha1_init(&sha1_ctx);
   _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
248
249
   _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
                     sizeof(device->chipset_id));
250
   _mesa_sha1_final(&sha1_ctx, sha1);
251
   memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
252

253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
   /* The driver UUID is used for determining sharability of images and memory
    * between two Vulkan instances in separate processes.  People who want to
    * share memory need to also check the device UUID (below) so all this
    * needs to be is the build-id.
    */
   memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);

   /* The device UUID uniquely identifies the given device within the machine.
    * Since we never have more than one device, this doesn't need to be a real
    * UUID.  However, on the off-chance that someone tries to use this to
    * cache pre-tiled images or something of the like, we use the PCI ID and
    * some bits of ISL info to ensure that this is safe.
    */
   _mesa_sha1_init(&sha1_ctx);
   _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
                     sizeof(device->chipset_id));
   _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
                     sizeof(device->isl_dev.has_bit6_swizzling));
   _mesa_sha1_final(&sha1_ctx, sha1);
   memcpy(device->device_uuid, sha1, VK_UUID_SIZE);

274
   return VK_SUCCESS;
275
276
}

277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
static void
anv_physical_device_init_disk_cache(struct anv_physical_device *device)
{
#ifdef ENABLE_SHADER_CACHE
   char renderer[9];
   MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
                                   device->chipset_id);
   assert(len == sizeof(renderer) - 1);

   char timestamp[41];
   _mesa_sha1_format(timestamp, device->driver_build_sha1);

   device->disk_cache = disk_cache_create(renderer, timestamp, 0);
#else
   device->disk_cache = NULL;
#endif
}

static void
anv_physical_device_free_disk_cache(struct anv_physical_device *device)
{
#ifdef ENABLE_SHADER_CACHE
   if (device->disk_cache)
      disk_cache_destroy(device->disk_cache);
#else
   assert(device->disk_cache == NULL);
#endif
}

Kristian Høgsberg's avatar
Kristian Høgsberg committed
306
static VkResult
307
308
anv_physical_device_init(struct anv_physical_device *device,
                         struct anv_instance *instance,
309
                         const char *primary_path,
310
                         const char *path)
Kristian Høgsberg's avatar
Kristian Høgsberg committed
311
{
312
   VkResult result;
313
   int fd;
314
   int master_fd = -1;
315

316
317
   brw_process_intel_debug_variable();

318
319
   fd = open(path, O_RDWR | O_CLOEXEC);
   if (fd < 0)
320
      return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
321

322
   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
Kristian Høgsberg's avatar
Kristian Høgsberg committed
323
   device->instance = instance;
324
325
326

   assert(strlen(path) < ARRAY_SIZE(device->path));
   strncpy(device->path, path, ARRAY_SIZE(device->path));
Chad Versace's avatar
Chad Versace committed
327

328
329
   device->no_hw = getenv("INTEL_NO_HW") != NULL;

330
331
332
333
334
335
336
337
338
339
   const int pci_id_override = gen_get_pci_device_id_override();
   if (pci_id_override < 0) {
      device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
      if (!device->chipset_id) {
         result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
         goto fail;
      }
   } else {
      device->chipset_id = pci_id_override;
      device->no_hw = true;
340
   }
Kristian Høgsberg's avatar
Kristian Høgsberg committed
341

342
   device->name = gen_get_device_name(device->chipset_id);
343
   if (!gen_get_device_info(device->chipset_id, &device->info)) {
344
      result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
345
      goto fail;
346
   }
347

348
   if (device->info.is_haswell) {
349
      intel_logw("Haswell Vulkan support is incomplete");
350
   } else if (device->info.gen == 7 && !device->info.is_baytrail) {
351
      intel_logw("Ivy Bridge Vulkan support is incomplete");
352
   } else if (device->info.gen == 7 && device->info.is_baytrail) {
353
      intel_logw("Bay Trail Vulkan support is incomplete");
354
355
   } else if (device->info.gen >= 8 && device->info.gen <= 10) {
      /* Gen8-10 fully supported */
356
357
   } else if (device->info.gen == 11) {
      intel_logw("Vulkan is not yet fully supported on gen11.");
358
   } else {
359
360
      result = vk_errorf(device->instance, device,
                         VK_ERROR_INCOMPATIBLE_DRIVER,
361
362
363
364
                         "Vulkan not yet supported on %s", device->name);
      goto fail;
   }

365
   device->cmd_parser_version = -1;
366
   if (device->info.gen == 7) {
367
368
369
      device->cmd_parser_version =
         anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
      if (device->cmd_parser_version == -1) {
370
371
         result = vk_errorf(device->instance, device,
                            VK_ERROR_INITIALIZATION_FAILED,
372
373
374
375
376
                            "failed to get command parser version");
         goto fail;
      }
   }

377
   if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
378
379
      result = vk_errorf(device->instance, device,
                         VK_ERROR_INITIALIZATION_FAILED,
Chad Versace's avatar
Chad Versace committed
380
                         "kernel missing gem wait");
Kristian Høgsberg's avatar
Kristian Høgsberg committed
381
      goto fail;
382
   }
Kristian Høgsberg's avatar
Kristian Høgsberg committed
383

384
   if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
385
386
      result = vk_errorf(device->instance, device,
                         VK_ERROR_INITIALIZATION_FAILED,
Chad Versace's avatar
Chad Versace committed
387
                         "kernel missing execbuf2");
Kristian Høgsberg's avatar
Kristian Høgsberg committed
388
      goto fail;
389
   }
Kristian Høgsberg's avatar
Kristian Høgsberg committed
390

391
   if (!device->info.has_llc &&
392
       anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
393
394
      result = vk_errorf(device->instance, device,
                         VK_ERROR_INITIALIZATION_FAILED,
395
396
397
398
                         "kernel missing wc mmap");
      goto fail;
   }

399
   result = anv_physical_device_init_heaps(device, fd);
400
401
402
   if (result != VK_SUCCESS)
      goto fail;

403
   device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
404
   device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
405
   device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
406
   device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
407
408
   device->has_syncobj_wait = device->has_syncobj &&
                              anv_gem_supports_syncobj_wait(fd);
409
   device->has_context_priority = anv_gem_has_context_priority(fd);
410

411
412
413
   device->use_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN)
      && device->supports_48bit_addresses;

414
415
416
   device->has_context_isolation =
      anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);

417
418
   bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);

419
420
421
422
423
424
425
426
427
428
429
430
431
   /* Starting with Gen10, the timestamp frequency of the command streamer may
    * vary from one part to another. We can query the value from the kernel.
    */
   if (device->info.gen >= 10) {
      int timestamp_frequency =
         anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);

      if (timestamp_frequency < 0)
         intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
      else
         device->info.timestamp_frequency = timestamp_frequency;
   }

432
   /* GENs prior to 8 do not support EU/Subslice info */
433
   if (device->info.gen >= 8) {
434
435
436
437
438
439
440
441
      device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
      device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);

      /* Without this information, we cannot get the right Braswell
       * brandstrings, and we have to use conservative numbers for GPGPU on
       * many platforms, but otherwise, things will just work.
       */
      if (device->subslice_total < 1 || device->eu_total < 1) {
442
         intel_logw("Kernel 4.1 required to properly query GPU properties");
443
      }
444
445
   } else if (device->info.gen == 7) {
      device->subslice_total = 1 << (device->info.gt - 1);
446
447
   }

448
   if (device->info.is_cherryview &&
449
       device->subslice_total > 0 && device->eu_total > 0) {
450
451
452
      /* Logical CS threads = EUs per subslice * num threads per EU */
      uint32_t max_cs_threads =
         device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
453
454

      /* Fuse configurations may give more threads than expected, never less. */
455
456
      if (max_cs_threads > device->info.max_cs_threads)
         device->info.max_cs_threads = max_cs_threads;
457
458
   }

459
   device->compiler = brw_compiler_create(NULL, &device->info);
460
461
462
463
   if (device->compiler == NULL) {
      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
      goto fail;
   }
464
465
   device->compiler->shader_debug_log = compiler_debug_log;
   device->compiler->shader_perf_log = compiler_perf_log;
466
   device->compiler->supports_pull_constants = false;
467
468
   device->compiler->constant_buffer_0_is_relative =
      device->info.gen < 8 || !device->has_context_isolation;
469
   device->compiler->supports_shader_constants = true;
470

471
472
473
474
475
476
   isl_device_init(&device->isl_dev, &device->info, swizzled);

   result = anv_physical_device_init_uuids(device);
   if (result != VK_SUCCESS)
      goto fail;

477
478
   anv_physical_device_init_disk_cache(device);

479
480
481
482
483
484
485
486
487
488
489
490
491
492
   if (instance->enabled_extensions.KHR_display) {
      master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
      if (master_fd >= 0) {
         /* prod the device with a GETPARAM call which will fail if
          * we don't have permission to even render on this device
          */
         if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
            close(master_fd);
            master_fd = -1;
         }
      }
   }
   device->master_fd = master_fd;

493
   result = anv_init_wsi(device);
494
495
   if (result != VK_SUCCESS) {
      ralloc_free(device->compiler);
496
      anv_physical_device_free_disk_cache(device);
497
498
      goto fail;
   }
499

500
501
502
   anv_physical_device_get_supported_extensions(device,
                                                &device->supported_extensions);

503

504
   device->local_fd = fd;
505

Kristian Høgsberg's avatar
Kristian Høgsberg committed
506
   return VK_SUCCESS;
507

508
fail:
509
   close(fd);
510
511
   if (master_fd != -1)
      close(master_fd);
512
   return result;
Kristian Høgsberg's avatar
Kristian Høgsberg committed
513
514
}

515
516
517
static void
anv_physical_device_finish(struct anv_physical_device *device)
{
518
   anv_finish_wsi(device);
519
   anv_physical_device_free_disk_cache(device);
520
   ralloc_free(device->compiler);
521
   close(device->local_fd);
522
523
   if (device->master_fd >= 0)
      close(device->master_fd);
524
525
}

526
static void *
527
default_alloc_func(void *pUserData, size_t size, size_t align,
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
                   VkSystemAllocationScope allocationScope)
{
   return malloc(size);
}

static void *
default_realloc_func(void *pUserData, void *pOriginal, size_t size,
                     size_t align, VkSystemAllocationScope allocationScope)
{
   return realloc(pOriginal, size);
}

static void
default_free_func(void *pUserData, void *pMemory)
{
   free(pMemory);
}

static const VkAllocationCallbacks default_alloc = {
   .pUserData = NULL,
   .pfnAllocation = default_alloc_func,
   .pfnReallocation = default_realloc_func,
   .pfnFree = default_free_func,
};

553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
VkResult anv_EnumerateInstanceExtensionProperties(
    const char*                                 pLayerName,
    uint32_t*                                   pPropertyCount,
    VkExtensionProperties*                      pProperties)
{
   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);

   for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
      if (anv_instance_extensions_supported.extensions[i]) {
         vk_outarray_append(&out, prop) {
            *prop = anv_instance_extensions[i];
         }
      }
   }

   return vk_outarray_status(&out);
}

571
VkResult anv_CreateInstance(
Kristian Høgsberg's avatar
Kristian Høgsberg committed
572
    const VkInstanceCreateInfo*                 pCreateInfo,
573
    const VkAllocationCallbacks*                pAllocator,
Kristian Høgsberg's avatar
Kristian Høgsberg committed
574
575
576
    VkInstance*                                 pInstance)
{
   struct anv_instance *instance;
577
   VkResult result;
Kristian Høgsberg's avatar
Kristian Høgsberg committed
578
579
580

   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);

581
   struct anv_instance_extension_table enabled_extensions = {};
582
   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
583
584
585
586
587
588
589
590
591
592
593
      int idx;
      for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
                    anv_instance_extensions[idx].extensionName) == 0)
            break;
      }

      if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
         return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);

      if (!anv_instance_extensions_supported.extensions[idx])
Chad Versace's avatar
Chad Versace committed
594
         return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
595
596

      enabled_extensions.extensions[idx] = true;
597
598
   }

599
   instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
600
                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
601
602
603
   if (!instance)
      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

604
   instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
605
606
607
608
609
610

   if (pAllocator)
      instance->alloc = *pAllocator;
   else
      instance->alloc = default_alloc;

611
612
613
614
615
616
617
   if (pCreateInfo->pApplicationInfo &&
       pCreateInfo->pApplicationInfo->apiVersion != 0) {
      instance->apiVersion = pCreateInfo->pApplicationInfo->apiVersion;
   } else {
      anv_EnumerateInstanceVersion(&instance->apiVersion);
   }

618
   instance->enabled_extensions = enabled_extensions;
619
620
621
622
623
624
625
626

   for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
      /* Vulkan requires that entrypoints for extensions which have not been
       * enabled must not be advertised.
       */
      if (!anv_entrypoint_is_enabled(i, instance->apiVersion,
                                     &instance->enabled_extensions, NULL)) {
         instance->dispatch.entrypoints[i] = NULL;
627
      } else if (anv_dispatch_table.entrypoints[i] != NULL) {
628
         instance->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
629
630
631
      } else {
         instance->dispatch.entrypoints[i] =
            anv_tramp_dispatch_table.entrypoints[i];
632
633
634
      }
   }

635
   instance->physicalDeviceCount = -1;
Kristian Høgsberg's avatar
Kristian Høgsberg committed
636

637
638
   result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
   if (result != VK_SUCCESS) {
639
      vk_free2(&default_alloc, pAllocator, instance);
640
      return vk_error(result);
641
642
   }

643
644
645
   instance->pipeline_cache_enabled =
      env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);

646
647
   _mesa_locale_init();

648
649
   VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));

650
   *pInstance = anv_instance_to_handle(instance);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
651
652
653
654

   return VK_SUCCESS;
}

655
void anv_DestroyInstance(
656
657
    VkInstance                                  _instance,
    const VkAllocationCallbacks*                pAllocator)
Kristian Høgsberg's avatar
Kristian Høgsberg committed
658
{
659
   ANV_FROM_HANDLE(anv_instance, instance, _instance);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
660

661
662
663
   if (!instance)
      return;

664
665
666
667
668
669
   if (instance->physicalDeviceCount > 0) {
      /* We support at most one physical device. */
      assert(instance->physicalDeviceCount == 1);
      anv_physical_device_finish(&instance->physicalDevice);
   }

670
671
   VG(VALGRIND_DESTROY_MEMPOOL(instance));

672
   vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
673

674
675
   _mesa_locale_fini();

676
   vk_free(&instance->alloc, instance);
677
678
}

679
680
681
682
683
684
685
686
687
688
static VkResult
anv_enumerate_devices(struct anv_instance *instance)
{
   /* TODO: Check for more devices ? */
   drmDevicePtr devices[8];
   VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
   int max_devices;

   instance->physicalDeviceCount = 0;

689
   max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
690
691
692
693
694
695
696
697
698
699
   if (max_devices < 1)
      return VK_ERROR_INCOMPATIBLE_DRIVER;

   for (unsigned i = 0; i < (unsigned)max_devices; i++) {
      if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
          devices[i]->bustype == DRM_BUS_PCI &&
          devices[i]->deviceinfo.pci->vendor_id == 0x8086) {

         result = anv_physical_device_init(&instance->physicalDevice,
                        instance,
700
                        devices[i]->nodes[DRM_NODE_PRIMARY],
701
702
703
704
705
                        devices[i]->nodes[DRM_NODE_RENDER]);
         if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
            break;
      }
   }
706
   drmFreeDevices(devices, max_devices);
707
708
709
710
711
712
713

   if (result == VK_SUCCESS)
      instance->physicalDeviceCount = 1;

   return result;
}

714
715
716
717
718
719
720
721
722
723
724
725
static VkResult
anv_instance_ensure_physical_device(struct anv_instance *instance)
{
   if (instance->physicalDeviceCount < 0) {
      VkResult result = anv_enumerate_devices(instance);
      if (result != VK_SUCCESS &&
          result != VK_ERROR_INCOMPATIBLE_DRIVER)
         return result;
   }

   return VK_SUCCESS;
}
726

727
VkResult anv_EnumeratePhysicalDevices(
Kristian Høgsberg's avatar
Kristian Høgsberg committed
728
729
730
731
    VkInstance                                  _instance,
    uint32_t*                                   pPhysicalDeviceCount,
    VkPhysicalDevice*                           pPhysicalDevices)
{
732
   ANV_FROM_HANDLE(anv_instance, instance, _instance);
733
   VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
734

735
736
737
738
739
740
741
742
743
744
   VkResult result = anv_instance_ensure_physical_device(instance);
   if (result != VK_SUCCESS)
      return result;

   if (instance->physicalDeviceCount == 0)
      return VK_SUCCESS;

   assert(instance->physicalDeviceCount == 1);
   vk_outarray_append(&out, i) {
      *i = anv_physical_device_to_handle(&instance->physicalDevice);
745
   }
Kristian Høgsberg's avatar
Kristian Høgsberg committed
746

747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
   return vk_outarray_status(&out);
}

VkResult anv_EnumeratePhysicalDeviceGroups(
    VkInstance                                  _instance,
    uint32_t*                                   pPhysicalDeviceGroupCount,
    VkPhysicalDeviceGroupProperties*            pPhysicalDeviceGroupProperties)
{
   ANV_FROM_HANDLE(anv_instance, instance, _instance);
   VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
                         pPhysicalDeviceGroupCount);

   VkResult result = anv_instance_ensure_physical_device(instance);
   if (result != VK_SUCCESS)
      return result;

   if (instance->physicalDeviceCount == 0)
      return VK_SUCCESS;

   assert(instance->physicalDeviceCount == 1);

   vk_outarray_append(&out, p) {
      p->physicalDeviceCount = 1;
      memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
      p->physicalDevices[0] =
         anv_physical_device_to_handle(&instance->physicalDevice);
      p->subsetAllocation = VK_FALSE;

      vk_foreach_struct(ext, p->pNext)
         anv_debug_ignored_stype(ext->sType);
777
   }
Kristian Høgsberg's avatar
Kristian Høgsberg committed
778

779
   return vk_outarray_status(&out);
Kristian Høgsberg's avatar
Kristian Høgsberg committed
780
781
}

782
void anv_GetPhysicalDeviceFeatures(
783
784
785
    VkPhysicalDevice                            physicalDevice,
    VkPhysicalDeviceFeatures*                   pFeatures)
{
786
   ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
787
788

   *pFeatures = (VkPhysicalDeviceFeatures) {
789
      .robustBufferAccess                       = true,
790
      .fullDrawIndexUint32                      = true,
791
      .imageCubeArray                           = true,
792
      .independentBlend                         = true,
793
      .geometryShader                           = true,
794
      .tessellationShader                       = true,
795
      .sampleRateShading                        = true,
796
      .dualSrcBlend                             = true,
797
      .logicOp                                  = true,
798
      .multiDrawIndirect                        = true,
799
      .drawIndirectFirstInstance                = true,
800
      .depthClamp                               = true,
801
      .depthBiasClamp                           = true,
802
803
804
805
      .fillModeNonSolid                         = true,
      .depthBounds                              = false,
      .wideLines                                = true,
      .largePoints                              = true,
806
807
      .alphaToOne                               = true,
      .multiViewport                            = true,
808
      .samplerAnisotropy                        = true,
809
810
811
      .textureCompressionETC2                   = pdevice->info.gen >= 8 ||
                                                  pdevice->info.is_baytrail,
      .textureCompressionASTC_LDR               = pdevice->info.gen >= 9, /* FINISHME CHV */
812
      .textureCompressionBC                     = true,
813
      .occlusionQueryPrecise                    = true,
814
      .pipelineStatisticsQuery                  = true,
815
816
      .fragmentStoresAndAtomics                 = true,
      .shaderTessellationAndGeometryPointSize   = true,
817
      .shaderImageGatherExtended                = true,
818
      .shaderStorageImageExtendedFormats        = true,
819
      .shaderStorageImageMultisample            = false,
820
      .shaderStorageImageReadWithoutFormat      = false,
821
      .shaderStorageImageWriteWithoutFormat     = true,
822
      .shaderUniformBufferArrayDynamicIndexing  = true,
823
824
825
      .shaderSampledImageArrayDynamicIndexing   = true,
      .shaderStorageBufferArrayDynamicIndexing  = true,
      .shaderStorageImageArrayDynamicIndexing   = true,
826
827
      .shaderClipDistance                       = true,
      .shaderCullDistance                       = true,
828
829
830
831
      .shaderFloat64                            = pdevice->info.gen >= 8 &&
                                                  pdevice->info.has_64bit_types,
      .shaderInt64                              = pdevice->info.gen >= 8 &&
                                                  pdevice->info.has_64bit_types,
832
      .shaderInt16                              = pdevice->info.gen >= 8,
833
      .shaderResourceMinLod                     = false,
834
      .variableMultisampleRate                  = true,
835
      .inheritedQueries                         = true,
836
   };
837
838
839

   /* We can't do image stores in vec4 shaders */
   pFeatures->vertexPipelineStoresAndAtomics =
840
841
      pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
      pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
842
843
}

844
void anv_GetPhysicalDeviceFeatures2(
845
    VkPhysicalDevice                            physicalDevice,
846
    VkPhysicalDeviceFeatures2*                  pFeatures)
847
848
849
{
   anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);

850
851
   vk_foreach_struct(ext, pFeatures->pNext) {
      switch (ext->sType) {
852
853
854
855
856
857
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
         VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
         features->protectedMemory = VK_FALSE;
         break;
      }

858
859
860
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
         VkPhysicalDeviceMultiviewFeatures *features =
            (VkPhysicalDeviceMultiviewFeatures *)ext;
861
862
863
864
865
866
         features->multiview = true;
         features->multiviewGeometryShader = true;
         features->multiviewTessellationShader = true;
         break;
      }

867
868
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES: {
         VkPhysicalDeviceVariablePointerFeatures *features = (void *)ext;
869
         features->variablePointersStorageBuffer = true;
870
         features->variablePointers = true;
871
872
873
         break;
      }

874
875
876
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
         VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
            (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
877
878
879
880
         features->samplerYcbcrConversion = true;
         break;
      }

881
882
883
884
885
886
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
         VkPhysicalDeviceShaderDrawParameterFeatures *features = (void *)ext;
         features->shaderDrawParameters = true;
         break;
      }

887
888
889
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR: {
         VkPhysicalDevice16BitStorageFeaturesKHR *features =
            (VkPhysicalDevice16BitStorageFeaturesKHR *)ext;
890
         ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
891

892
893
         features->storageBuffer16BitAccess = pdevice->info.gen >= 8;
         features->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
894
         features->storagePushConstant16 = pdevice->info.gen >= 8;
895
896
897
898
         features->storageInputOutput16 = false;
         break;
      }

899
900
901
902
903
904
905
906
907
908
909
      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
         VkPhysicalDevice8BitStorageFeaturesKHR *features =
            (VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
         ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);

         features->storageBuffer8BitAccess = pdevice->info.gen >= 8;
         features->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
         features->storagePushConstant8 = pdevice->info.gen >= 8;
         break;
      }

910
      default:
911
         anv_debug_ignored_stype(ext->sType);
912
913
914
915
916
         break;
      }
   }
}

917
void anv_GetPhysicalDeviceProperties(
918
    VkPhysicalDevice                            physicalDevice,
919
    VkPhysicalDeviceProperties*                 pProperties)
920
{
921
   ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
922
   const struct gen_device_info *devinfo = &pdevice->info;
923

924
925
926
927
   /* See assertions made when programming the buffer surface state. */
   const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
                                      (1ul << 30) : (1ul << 27);

928
929
930
   const uint32_t max_samplers = (devinfo->gen >= 8 || devinfo->is_haswell) ?
                                 128 : 16;

931
   VkSampleCountFlags sample_counts =
932
      isl_device_get_sample_counts(&pdevice->isl_dev);
933

934
   VkPhysicalDeviceLimits limits = {
935
936
      .maxImageDimension1D                      = (1 << 14),
      .maxImageDimension2D                      = (1 << 14),
937
      .maxImageDimension3D                      = (1 << 11),
938
      .maxImageDimensionCube                    = (1 << 14),
939
      .maxImageArrayLayers                      = (1 << 11),
940
      .maxTexelBufferElements                   = 128 * 1024 * 1024,
941
942
      .maxUniformBufferRange                    = (1ul << 27),
      .maxStorageBufferRange                    = max_raw_buffer_sz,
943
      .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
944
      .maxMemoryAllocationCount                 = UINT32_MAX,
945
      .maxSamplerAllocationCount                = 64 * 1024,
946
      .bufferImageGranularity                   = 64, /* A cache line */
947
      .sparseAddressSpaceSize                   = 0,
948
      .maxBoundDescriptorSets                   = MAX_SETS,
949
      .maxPerStageDescriptorSamplers            = max_samplers,
950
951
      .maxPerStageDescriptorUniformBuffers      = 64,
      .maxPerStageDescriptorStorageBuffers      = 64,
952
      .maxPerStageDescriptorSampledImages       = max_samplers,
953
      .maxPerStageDescriptorStorageImages       = 64,
954
      .maxPerStageDescriptorInputAttachments    = 64,
955
      .maxPerStageResources                     = 250,
956
957
      .maxDescriptorSetSamplers                 = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
      .maxDescriptorSetUniformBuffers           = 6 * 64,           /* number of stages * maxPerStageDescriptorUniformBuffers */
958
      .maxDescriptorSetUniformBuffersDynamic    = MAX_DYNAMIC_BUFFERS / 2,
959
      .maxDescriptorSetStorageBuffers           = 6 * 64,           /* number of stages * maxPerStageDescriptorStorageBuffers */
960
      .maxDescriptorSetStorageBuffersDynamic    = MAX_DYNAMIC_BUFFERS / 2,
961
962
      .maxDescriptorSetSampledImages            = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSampledImages */
      .maxDescriptorSetStorageImages            = 6 * 64,           /* number of stages * maxPerStageDescriptorStorageImages */
963
      .maxDescriptorSetInputAttachments         = 256,
964
965
      .maxVertexInputAttributes                 = MAX_VBS,
      .maxVertexInputBindings                   = MAX_VBS,
966
967
968
      .maxVertexInputAttributeOffset            = 2047,
      .maxVertexInputBindingStride              = 2048,
      .maxVertexOutputComponents                = 128,
969
970
971
972
973
974
975
976
      .maxTessellationGenerationLevel           = 64,
      .maxTessellationPatchSize                 = 32,
      .maxTessellationControlPerVertexInputComponents = 128,
      .maxTessellationControlPerVertexOutputComponents = 128,
      .maxTessellationControlPerPatchOutputComponents = 128,
      .maxTessellationControlTotalOutputComponents = 2048,
      .maxTessellationEvaluationInputComponents = 128,
      .maxTessellationEvaluationOutputComponents = 128,
977
978
979
980
981
      .maxGeometryShaderInvocations             = 32,
      .maxGeometryInputComponents               = 64,
      .maxGeometryOutputComponents              = 128,
      .maxGeometryOutputVertices                = 256,
      .maxGeometryTotalOutputComponents         = 1024,
982
      .maxFragmentInputComponents               = 112, /* 128 components - (POS, PSIZ, CLIP_DIST0, CLIP_DIST1) */
983
      .maxFragmentOutputAttachments             = 8,
984
      .maxFragmentDualSrcAttachments            = 1,
985
      .maxFragmentCombinedOutputResources       = 8,
986
987
      .maxComputeSharedMemorySize               = 32768,
      .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
988
      .maxComputeWorkGroupInvocations           = 16 * devinfo->max_cs_threads,
989
      .maxComputeWorkGroupSize = {
990
991
992
         16 * devinfo->max_cs_threads,
         16 * devinfo->max_cs_threads,
         16 * devinfo->max_cs_threads,
993
994
995
996
997
      },
      .subPixelPrecisionBits                    = 4 /* FIXME */,
      .subTexelPrecisionBits                    = 4 /* FIXME */,
      .mipmapPrecisionBits                      = 4 /* FIXME */,
      .maxDrawIndexedIndexValue                 = UINT32_MAX,
998
      .maxDrawIndirectCount                     = UINT32_MAX,
999
1000
      .maxSamplerLodBias                        = 16,
      .maxSamplerAnisotropy                     = 16,
1001
      .maxViewports                             = MAX_VIEWPORTS,
1002
      .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
1003
      .viewportBoundsRange                      = { INT16_MIN, INT16_MAX },
1004
      .viewportSubPixelBits                     = 13, /* We take a float? */
1005
      .minMemoryMapAlignment                    = 4096, /* A page */
1006
      .minTexelBufferOffsetAlignment            = 1,
1007
1008
      /* We need 16 for UBO block reads to work and 32 for push UBOs */
      .minUniformBufferOffsetAlignment          = 32,
1009
      .minStorageBufferOffsetAlignment          = 4,
1010
1011
      .minTexelOffset                           = -8,
      .maxTexelOffset                           = 7,
1012
1013
      .minTexelGatherOffset                     = -32,
      .maxTexelGatherOffset                     = 31,
1014
1015
1016
      .minInterpolationOffset                   = -0.5,
      .maxInterpolationOffset                   = 0.4375,
      .subPixelInterpolationOffsetBits          = 4,
1017
1018
      .maxFramebufferWidth                      = (1 << 14),
      .maxFramebufferHeight                     = (1 << 14),
1019
      .maxFramebufferLayers                     = (1 << 11),
1020
1021
1022
1023
      .framebufferColorSampleCounts             = sample_counts,
      .framebufferDepthSampleCounts             = sample_counts,
      .framebufferStencilSampleCounts           = sample_counts,
      .framebufferNoAttachmentsSampleCounts     = sample_counts,
1024
      .maxColorAttachments                      = MAX_RTS,
1025
1026
1027
1028
1029
      .sampledImageColorSampleCounts            = sample_counts,
      .sampledImageIntegerSampleCounts          = VK_SAMPLE_COUNT_1_BIT,
      .sampledImageDepthSampleCounts            = sample_counts,
      .sampledImageStencilSampleCounts          = sample_counts,
      .storageImageSampleCounts                 = VK_SAMPLE_COUNT_1_BIT,
1030
      .maxSampleMaskWords                       = 1,
1031
      .timestampComputeAndGraphics              = false,
1032
      .timestampPeriod                          = 1000000000.0 / devinfo->timestamp_frequency,
1033
1034
1035
      .maxClipDistances                         = 8,
      .maxCullDistances                         = 8,
      .maxCombinedClipAndCullDistances          = 8,
1036
      .discreteQueuePriorities                  = 1,
1037
1038
1039
1040
      .pointSizeRange                           = { 0.125, 255.875 },
      .lineWidthRange                           = { 0.0, 7.9921875 },
      .pointSizeGranularity                     = (1.0 / 8.0),
      .lineWidthGranularity                     = (1.0 / 128.0),
1041
      .strictLines                              = false, /* FINISHME */
1042
      .standardSampleLocations                  = true,
1043
1044
1045
      .optimalBufferCopyOffsetAlignment         = 128,
      .optimalBufferCopyRowPitchAlignment       = 128,
      .nonCoherentAtomSize                      = 64,
1046
1047
   };

1048
   *pProperties = (VkPhysicalDeviceProperties) {
1049
      .apiVersion = anv_physical_device_api_version(pdevice),
1050
      .driverVersion = vk_get_driver_version(),
1051
1052
      .vendorID = 0x8086,
      .deviceID = pdevice->chipset_id,
1053
      .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1054
1055
      .limits = limits,
      .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
1056
1057
   };

1058
1059
   snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
            "%s", pdevice->name);
1060
1061
   memcpy(pProperties->pipelineCacheUUID,
          pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
1062
1063
}

1064
void anv_GetPhysicalDeviceProperties2(
1065
    VkPhysicalDevice                            physicalDevice,
1066
    VkPhysicalDeviceProperties2*                pProperties)
1067
{
1068
1069
   ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);

1070
1071
   anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);

1072
1073
   vk_foreach_struct(ext, pProperties->pNext) {
      switch (ext->sType) {
1074
1075
1076
1077
1078
1079
1080
1081
      case VK_STRU