anv_descriptor_set.c 47.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 * Copyright © 2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <assert.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>

30
#include "util/mesa-sha1.h"
31
#include "vk_util.h"
32

33
34
35
36
37
38
#include "anv_private.h"

/*
 * Descriptor set layouts.
 */

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
static enum anv_descriptor_data
anv_descriptor_data_for_type(const struct anv_physical_device *device,
                             VkDescriptorType type)
{
   enum anv_descriptor_data data = 0;

   switch (type) {
   case VK_DESCRIPTOR_TYPE_SAMPLER:
      data = ANV_DESCRIPTOR_SAMPLER_STATE;
      break;

   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
      data = ANV_DESCRIPTOR_SURFACE_STATE |
             ANV_DESCRIPTOR_SAMPLER_STATE;
      break;

   case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
   case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
   case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
      data = ANV_DESCRIPTOR_SURFACE_STATE;
      break;

   case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
   case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
      data = ANV_DESCRIPTOR_SURFACE_STATE;
      if (device->info.gen < 9)
         data |= ANV_DESCRIPTOR_IMAGE_PARAM;
      break;

   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
      data = ANV_DESCRIPTOR_SURFACE_STATE |
             ANV_DESCRIPTOR_BUFFER_VIEW;
      break;

   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
      data = ANV_DESCRIPTOR_SURFACE_STATE;
      break;

79
80
81
82
   case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
      data = ANV_DESCRIPTOR_INLINE_UNIFORM;
      break;

83
84
85
86
87
88
89
   default:
      unreachable("Unsupported descriptor type");
   }

   return data;
}

90
91
92
93
94
95
96
97
98
99
static unsigned
anv_descriptor_data_size(enum anv_descriptor_data data)
{
   return 0;
}

/** Returns the size in bytes of each descriptor with the given layout */
unsigned
anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout)
{
100
101
102
103
104
   if (layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
      assert(layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
      return layout->array_size;
   }

105
106
107
108
109
110
111
112
113
114
115
116
117
118
   return anv_descriptor_data_size(layout->data);
}

/** Returns the size in bytes of each descriptor of the given type
 *
 * This version of the function does not have access to the entire layout so
 * it may only work on certain descriptor types where the descriptor size is
 * entirely determined by the descriptor type.  Whenever possible, code should
 * use anv_descriptor_size() instead.
 */
unsigned
anv_descriptor_type_size(const struct anv_physical_device *pdevice,
                         VkDescriptorType type)
{
119
   assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
120
121
122
   return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type));
}

123
124
125
126
127
128
129
void anv_GetDescriptorSetLayoutSupport(
    VkDevice                                    device,
    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
    VkDescriptorSetLayoutSupport*               pSupport)
{
   uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };

130
   for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) {
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
      const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b];

      switch (binding->descriptorType) {
      case VK_DESCRIPTOR_TYPE_SAMPLER:
         /* There is no real limit on samplers */
         break;

      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
         if (binding->pImmutableSamplers) {
            for (uint32_t i = 0; i < binding->descriptorCount; i++) {
               ANV_FROM_HANDLE(anv_sampler, sampler,
                               binding->pImmutableSamplers[i]);
               anv_foreach_stage(s, binding->stageFlags)
                  surface_count[s] += sampler->n_planes;
            }
146
147
148
         } else {
            anv_foreach_stage(s, binding->stageFlags)
               surface_count[s] += binding->descriptorCount;
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
         }
         break;

      default:
         anv_foreach_stage(s, binding->stageFlags)
            surface_count[s] += binding->descriptorCount;
         break;
      }
   }

   bool supported = true;
   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
      /* Our maximum binding table size is 250 and we need to reserve 8 for
       * render targets.  240 is a nice round number.
       */
      if (surface_count[s] >= 240)
         supported = false;
   }

   pSupport->supported = supported;
}

171
172
173
VkResult anv_CreateDescriptorSetLayout(
    VkDevice                                    _device,
    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
174
    const VkAllocationCallbacks*                pAllocator,
175
176
177
178
179
180
    VkDescriptorSetLayout*                      pSetLayout)
{
   ANV_FROM_HANDLE(anv_device, device, _device);

   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);

181
   uint32_t max_binding = 0;
182
   uint32_t immutable_sampler_count = 0;
183
   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
184
      max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

      /* From the Vulkan 1.1.97 spec for VkDescriptorSetLayoutBinding:
       *
       *    "If descriptorType specifies a VK_DESCRIPTOR_TYPE_SAMPLER or
       *    VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then
       *    pImmutableSamplers can be used to initialize a set of immutable
       *    samplers. [...]  If descriptorType is not one of these descriptor
       *    types, then pImmutableSamplers is ignored.
       *
       * We need to be careful here and only parse pImmutableSamplers if we
       * have one of the right descriptor types.
       */
      VkDescriptorType desc_type = pCreateInfo->pBindings[j].descriptorType;
      if ((desc_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
           desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
          pCreateInfo->pBindings[j].pImmutableSamplers)
201
         immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
202
203
   }

204
205
206
207
   struct anv_descriptor_set_layout *set_layout;
   struct anv_descriptor_set_binding_layout *bindings;
   struct anv_sampler **samplers;

208
209
210
211
   /* We need to allocate decriptor set layouts off the device allocator
    * with DEVICE scope because they are reference counted and may not be
    * destroyed when vkDestroyDescriptorSetLayout is called.
    */
212
213
214
215
   ANV_MULTIALLOC(ma);
   anv_multialloc_add(&ma, &set_layout, 1);
   anv_multialloc_add(&ma, &bindings, max_binding + 1);
   anv_multialloc_add(&ma, &samplers, immutable_sampler_count);
216

217
218
   if (!anv_multialloc_alloc(&ma, &device->alloc,
                             VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
219
220
      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

221
   memset(set_layout, 0, sizeof(*set_layout));
222
   set_layout->ref_cnt = 1;
223
   set_layout->binding_count = max_binding + 1;
224

225
226
227
228
   for (uint32_t b = 0; b <= max_binding; b++) {
      /* Initialize all binding_layout entries to -1 */
      memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));

229
      set_layout->binding[b].data = 0;
230
      set_layout->binding[b].array_size = 0;
231
232
      set_layout->binding[b].immutable_samplers = NULL;
   }
233
234
235
236

   /* Initialize all samplers to 0 */
   memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));

237
   uint32_t buffer_view_count = 0;
238
   uint32_t dynamic_offset_count = 0;
239
   uint32_t descriptor_buffer_size = 0;
240

241
   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
242
      const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
243
      uint32_t b = binding->binding;
244
245
246
247
248
249
250
251
252
253
254
255
256
      /* We temporarily store the pointer to the binding in the
       * immutable_samplers pointer.  This provides us with a quick-and-dirty
       * way to sort the bindings by binding number.
       */
      set_layout->binding[b].immutable_samplers = (void *)binding;
   }

   for (uint32_t b = 0; b <= max_binding; b++) {
      const VkDescriptorSetLayoutBinding *binding =
         (void *)set_layout->binding[b].immutable_samplers;

      if (binding == NULL)
         continue;
257

258
259
260
261
262
263
      /* We temporarily stashed the pointer to the binding in the
       * immutable_samplers pointer.  Now that we've pulled it back out
       * again, we reset immutable_samplers to NULL.
       */
      set_layout->binding[b].immutable_samplers = NULL;

264
265
266
      if (binding->descriptorCount == 0)
         continue;

267
268
269
#ifndef NDEBUG
      set_layout->binding[b].type = binding->descriptorType;
#endif
270
271
272
      set_layout->binding[b].data =
         anv_descriptor_data_for_type(&device->instance->physicalDevice,
                                      binding->descriptorType);
273
      set_layout->binding[b].array_size = binding->descriptorCount;
274
      set_layout->binding[b].descriptor_index = set_layout->size;
275
      set_layout->size += binding->descriptorCount;
276

277
278
279
280
281
      if (set_layout->binding[b].data & ANV_DESCRIPTOR_BUFFER_VIEW) {
         set_layout->binding[b].buffer_view_index = buffer_view_count;
         buffer_view_count += binding->descriptorCount;
      }

282
      switch (binding->descriptorType) {
283
284
      case VK_DESCRIPTOR_TYPE_SAMPLER:
      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
285
286
287
288
289
290
291
292
         if (binding->pImmutableSamplers) {
            set_layout->binding[b].immutable_samplers = samplers;
            samplers += binding->descriptorCount;

            for (uint32_t i = 0; i < binding->descriptorCount; i++)
               set_layout->binding[b].immutable_samplers[i] =
                  anv_sampler_from_handle(binding->pImmutableSamplers[i]);
         }
293
294
295
296
297
         break;
      default:
         break;
      }

298
      switch (binding->descriptorType) {
299
300
301
      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
         set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
302
         dynamic_offset_count += binding->descriptorCount;
303
304
         break;

305
306
      default:
         break;
307
308
      }

309
310
311
312
313
314
315
316
317
318
319
320
321
      if (binding->descriptorType ==
          VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
         /* Inline uniform blocks are specified to use the descriptor array
          * size as the size in bytes of the block.
          */
         descriptor_buffer_size = align_u32(descriptor_buffer_size, 32);
         set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
         descriptor_buffer_size += binding->descriptorCount;
      } else {
         set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
         descriptor_buffer_size += anv_descriptor_size(&set_layout->binding[b]) *
                                   binding->descriptorCount;
      }
322

323
      set_layout->shader_stages |= binding->stageFlags;
324
325
   }

326
   set_layout->buffer_view_count = buffer_view_count;
327
   set_layout->dynamic_offset_count = dynamic_offset_count;
328
   set_layout->descriptor_buffer_size = descriptor_buffer_size;
329
330
331
332
333
334
335
336

   *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);

   return VK_SUCCESS;
}

void anv_DestroyDescriptorSetLayout(
    VkDevice                                    _device,
337
338
    VkDescriptorSetLayout                       _set_layout,
    const VkAllocationCallbacks*                pAllocator)
339
340
341
342
{
   ANV_FROM_HANDLE(anv_device, device, _device);
   ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);

343
344
345
   if (!set_layout)
      return;

346
   anv_descriptor_set_layout_unref(device, set_layout);
347
348
}

349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
#define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));

static void
sha1_update_immutable_sampler(struct mesa_sha1 *ctx,
                              const struct anv_sampler *sampler)
{
   if (!sampler->conversion)
      return;

   /* The only thing that affects the shader is ycbcr conversion */
   _mesa_sha1_update(ctx, sampler->conversion,
                     sizeof(*sampler->conversion));
}

static void
sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
   const struct anv_descriptor_set_binding_layout *layout)
{
367
   SHA1_UPDATE_VALUE(ctx, layout->data);
368
369
370
   SHA1_UPDATE_VALUE(ctx, layout->array_size);
   SHA1_UPDATE_VALUE(ctx, layout->descriptor_index);
   SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
371
   SHA1_UPDATE_VALUE(ctx, layout->buffer_view_index);
372
   SHA1_UPDATE_VALUE(ctx, layout->descriptor_offset);
373
374
375
376
377
378
379

   if (layout->immutable_samplers) {
      for (uint16_t i = 0; i < layout->array_size; i++)
         sha1_update_immutable_sampler(ctx, layout->immutable_samplers[i]);
   }
}

380
381
382
383
static void
sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
                                  const struct anv_descriptor_set_layout *layout)
{
384
385
386
   SHA1_UPDATE_VALUE(ctx, layout->binding_count);
   SHA1_UPDATE_VALUE(ctx, layout->size);
   SHA1_UPDATE_VALUE(ctx, layout->shader_stages);
387
   SHA1_UPDATE_VALUE(ctx, layout->buffer_view_count);
388
   SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_count);
389
   SHA1_UPDATE_VALUE(ctx, layout->descriptor_buffer_size);
390
391
392

   for (uint16_t i = 0; i < layout->binding_count; i++)
      sha1_update_descriptor_set_binding_layout(ctx, &layout->binding[i]);
393
394
}

395
396
/*
 * Pipeline layouts.  These have nothing to do with the pipeline.  They are
Lionel Landwerlin's avatar
Lionel Landwerlin committed
397
 * just multiple descriptor set layouts pasted together
398
399
400
401
402
 */

VkResult anv_CreatePipelineLayout(
    VkDevice                                    _device,
    const VkPipelineLayoutCreateInfo*           pCreateInfo,
403
    const VkAllocationCallbacks*                pAllocator,
404
405
406
    VkPipelineLayout*                           pPipelineLayout)
{
   ANV_FROM_HANDLE(anv_device, device, _device);
Jason Ekstrand's avatar
Jason Ekstrand committed
407
   struct anv_pipeline_layout *layout;
408
409
410

   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);

411
   layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
Jason Ekstrand's avatar
Jason Ekstrand committed
412
413
414
415
416
                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
   if (layout == NULL)
      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

   layout->num_sets = pCreateInfo->setLayoutCount;
417
418
419

   unsigned dynamic_offset_count = 0;

420
   for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
421
422
      ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
                      pCreateInfo->pSetLayouts[set]);
Jason Ekstrand's avatar
Jason Ekstrand committed
423
      layout->set[set].layout = set_layout;
424
      anv_descriptor_set_layout_ref(set_layout);
425

Jason Ekstrand's avatar
Jason Ekstrand committed
426
      layout->set[set].dynamic_offset_start = dynamic_offset_count;
427
      for (uint32_t b = 0; b < set_layout->binding_count; b++) {
428
429
430
431
         if (set_layout->binding[b].dynamic_offset_index < 0)
            continue;

         dynamic_offset_count += set_layout->binding[b].array_size;
432
433
434
      }
   }

435
436
   struct mesa_sha1 ctx;
   _mesa_sha1_init(&ctx);
437
   for (unsigned s = 0; s < layout->num_sets; s++) {
438
439
      sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
      _mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
440
441
                        sizeof(layout->set[s].dynamic_offset_start));
   }
442
443
   _mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
   _mesa_sha1_final(&ctx, layout->sha1);
444

445
446
447
448
449
450
451
   *pPipelineLayout = anv_pipeline_layout_to_handle(layout);

   return VK_SUCCESS;
}

void anv_DestroyPipelineLayout(
    VkDevice                                    _device,
452
453
    VkPipelineLayout                            _pipelineLayout,
    const VkAllocationCallbacks*                pAllocator)
454
455
456
457
{
   ANV_FROM_HANDLE(anv_device, device, _device);
   ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);

458
459
460
   if (!pipeline_layout)
      return;

461
462
463
   for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
      anv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);

464
   vk_free2(&device->alloc, pAllocator, pipeline_layout);
465
466
467
}

/*
468
469
470
471
472
473
474
475
476
 * Descriptor pools.
 *
 * These are implemented using a big pool of memory and a free-list for the
 * host memory allocations and a state_stream and a free list for the buffer
 * view surface state. The spec allows us to fail to allocate due to
 * fragmentation in all cases but two: 1) after pool reset, allocating up
 * until the pool size with no freeing must succeed and 2) allocating and
 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
 * and the free lists lets us recycle blocks for case 2).
477
478
 */

479
480
481
482
483
484
/* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
 * ensure we can allocate the entire BO without hitting zero.  The actual
 * amount doesn't matter.
 */
#define POOL_HEAP_OFFSET 64

485
486
#define EMPTY 1

487
VkResult anv_CreateDescriptorPool(
488
    VkDevice                                    _device,
489
    const VkDescriptorPoolCreateInfo*           pCreateInfo,
490
    const VkAllocationCallbacks*                pAllocator,
491
492
    VkDescriptorPool*                           pDescriptorPool)
{
493
494
495
   ANV_FROM_HANDLE(anv_device, device, _device);
   struct anv_descriptor_pool *pool;

496
497
498
499
   const VkDescriptorPoolInlineUniformBlockCreateInfoEXT *inline_info =
      vk_find_struct_const(pCreateInfo->pNext,
                           DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT);

500
   uint32_t descriptor_count = 0;
501
   uint32_t buffer_view_count = 0;
502
   uint32_t descriptor_bo_size = 0;
503
   for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
504
505
506
507
508
      enum anv_descriptor_data desc_data =
         anv_descriptor_data_for_type(&device->instance->physicalDevice,
                                      pCreateInfo->pPoolSizes[i].type);

      if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
509
         buffer_view_count += pCreateInfo->pPoolSizes[i].descriptorCount;
510

511
512
      unsigned desc_data_size = anv_descriptor_data_size(desc_data) *
                                pCreateInfo->pPoolSizes[i].descriptorCount;
513
514
515
516
517
518
519
520
521
522

      if (pCreateInfo->pPoolSizes[i].type ==
          VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
         /* Inline uniform blocks are specified to use the descriptor array
          * size as the size in bytes of the block.
          */
         assert(inline_info);
         desc_data_size += pCreateInfo->pPoolSizes[i].descriptorCount;
      }

523
524
      descriptor_bo_size += desc_data_size;

525
      descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
526
   }
527
528
529
530
531
532
533
534
535
536
   /* We have to align descriptor buffer allocations to 32B so that we can
    * push descriptor buffers.  This means that each descriptor buffer
    * allocated may burn up to 32B of extra space to get the right alignment.
    * (Technically, it's at most 28B because we're always going to start at
    * least 4B aligned but we're being conservative here.)  Allocate enough
    * extra space that we can chop it into maxSets pieces and align each one
    * of them to 32B.
    */
   descriptor_bo_size += 32 * pCreateInfo->maxSets;
   descriptor_bo_size = ALIGN(descriptor_bo_size, 4096);
537
538
539
   /* We align inline uniform blocks to 32B */
   if (inline_info)
      descriptor_bo_size += 32 * inline_info->maxInlineUniformBlockBindings;
540

541
   const size_t pool_size =
542
543
      pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
      descriptor_count * sizeof(struct anv_descriptor) +
544
      buffer_view_count * sizeof(struct anv_buffer_view);
545
   const size_t total_size = sizeof(*pool) + pool_size;
546

547
   pool = vk_alloc2(&device->alloc, pAllocator, total_size, 8,
548
549
550
551
                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
   if (!pool)
      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

552
   pool->size = pool_size;
553
554
555
   pool->next = 0;
   pool->free_list = EMPTY;

556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
   if (descriptor_bo_size > 0) {
      VkResult result = anv_bo_init_new(&pool->bo, device, descriptor_bo_size);
      if (result != VK_SUCCESS) {
         vk_free2(&device->alloc, pAllocator, pool);
         return result;
      }

      anv_gem_set_caching(device, pool->bo.gem_handle, I915_CACHING_CACHED);

      pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0,
                                  descriptor_bo_size, 0);
      if (pool->bo.map == NULL) {
         anv_gem_close(device, pool->bo.gem_handle);
         vk_free2(&device->alloc, pAllocator, pool);
         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
      }

      if (device->instance->physicalDevice.use_softpin) {
         pool->bo.flags |= EXEC_OBJECT_PINNED;
         anv_vma_alloc(device, &pool->bo);
      }

      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, descriptor_bo_size);
   } else {
      pool->bo.size = 0;
   }

583
   anv_state_stream_init(&pool->surface_state_stream,
584
                         &device->surface_state_pool, 4096);
585
586
   pool->surface_state_free_list = NULL;

587
588
   list_inithead(&pool->desc_sets);

589
590
   *pDescriptorPool = anv_descriptor_pool_to_handle(pool);

591
592
593
594
595
   return VK_SUCCESS;
}

void anv_DestroyDescriptorPool(
    VkDevice                                    _device,
596
597
    VkDescriptorPool                            _pool,
    const VkAllocationCallbacks*                pAllocator)
598
{
599
600
601
   ANV_FROM_HANDLE(anv_device, device, _device);
   ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);

602
603
604
   if (!pool)
      return;

605
606
607
608
609
   if (pool->bo.size) {
      anv_gem_munmap(pool->bo.map, pool->bo.size);
      anv_vma_free(device, &pool->bo);
      anv_gem_close(device, pool->bo.gem_handle);
   }
610
   anv_state_stream_finish(&pool->surface_state_stream);
611
612
613
614
615
616

   list_for_each_entry_safe(struct anv_descriptor_set, set,
                            &pool->desc_sets, pool_link) {
      anv_descriptor_set_destroy(device, pool, set);
   }

617
   vk_free2(&device->alloc, pAllocator, pool);
618
619
620
}

VkResult anv_ResetDescriptorPool(
621
    VkDevice                                    _device,
622
623
    VkDescriptorPool                            descriptorPool,
    VkDescriptorPoolResetFlags                  flags)
624
{
625
626
627
628
629
   ANV_FROM_HANDLE(anv_device, device, _device);
   ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);

   pool->next = 0;
   pool->free_list = EMPTY;
630
631
632
633
634
635

   if (pool->bo.size) {
      util_vma_heap_finish(&pool->bo_heap);
      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo.size);
   }

636
637
   anv_state_stream_finish(&pool->surface_state_stream);
   anv_state_stream_init(&pool->surface_state_stream,
638
                         &device->surface_state_pool, 4096);
639
640
   pool->surface_state_free_list = NULL;

641
642
643
   return VK_SUCCESS;
}

644
645
646
647
648
struct pool_free_list_entry {
   uint32_t next;
   uint32_t size;
};

649
650
651
652
static VkResult
anv_descriptor_pool_alloc_set(struct anv_descriptor_pool *pool,
                              uint32_t size,
                              struct anv_descriptor_set **set)
653
{
654
   if (size <= pool->size - pool->next) {
655
      *set = (struct anv_descriptor_set *) (pool->data + pool->next);
656
      pool->next += size;
657
      return VK_SUCCESS;
658
659
660
661
662
663
664
   } else {
      struct pool_free_list_entry *entry;
      uint32_t *link = &pool->free_list;
      for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
         entry = (struct pool_free_list_entry *) (pool->data + f);
         if (size <= entry->size) {
            *link = entry->next;
665
666
            *set = (struct anv_descriptor_set *) entry;
            return VK_SUCCESS;
667
668
669
         }
         link = &entry->next;
      }
670

671
672
673
      if (pool->free_list != EMPTY) {
         return vk_error(VK_ERROR_FRAGMENTED_POOL);
      } else {
674
         return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY);
675
676
      }
   }
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
}

static void
anv_descriptor_pool_free_set(struct anv_descriptor_pool *pool,
                             struct anv_descriptor_set *set)
{
   /* Put the descriptor set allocation back on the free list. */
   const uint32_t index = (char *) set - pool->data;
   if (index + set->size == pool->next) {
      pool->next = index;
   } else {
      struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
      entry->next = pool->free_list;
      entry->size = set->size;
      pool->free_list = (char *) entry - pool->data;
   }
693
694

   list_del(&set->pool_link);
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
}

struct surface_state_free_list_entry {
   void *next;
   struct anv_state state;
};

static struct anv_state
anv_descriptor_pool_alloc_state(struct anv_descriptor_pool *pool)
{
   struct surface_state_free_list_entry *entry =
      pool->surface_state_free_list;

   if (entry) {
      struct anv_state state = entry->state;
      pool->surface_state_free_list = entry->next;
      assert(state.alloc_size == 64);
      return state;
   } else {
      return anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
   }
}

static void
anv_descriptor_pool_free_state(struct anv_descriptor_pool *pool,
                               struct anv_state state)
{
   /* Put the buffer view surface state back on the free list. */
   struct surface_state_free_list_entry *entry = state.map;
   entry->next = pool->surface_state_free_list;
   entry->state = state;
   pool->surface_state_free_list = entry;
}

size_t
anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout)
{
   return
      sizeof(struct anv_descriptor_set) +
      layout->size * sizeof(struct anv_descriptor) +
735
      layout->buffer_view_count * sizeof(struct anv_buffer_view);
736
737
738
739
740
741
742
743
744
745
746
747
748
749
}

VkResult
anv_descriptor_set_create(struct anv_device *device,
                          struct anv_descriptor_pool *pool,
                          struct anv_descriptor_set_layout *layout,
                          struct anv_descriptor_set **out_set)
{
   struct anv_descriptor_set *set;
   const size_t size = anv_descriptor_set_layout_size(layout);

   VkResult result = anv_descriptor_pool_alloc_set(pool, size, &set);
   if (result != VK_SUCCESS)
      return result;
750

751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
   if (layout->descriptor_buffer_size) {
      /* Align the size to 32 so that alignment gaps don't cause extra holes
       * in the heap which can lead to bad performance.
       */
      uint64_t pool_vma_offset =
         util_vma_heap_alloc(&pool->bo_heap,
                             ALIGN(layout->descriptor_buffer_size, 32), 32);
      if (pool_vma_offset == 0) {
         anv_descriptor_pool_free_set(pool, set);
         return vk_error(VK_ERROR_FRAGMENTED_POOL);
      }
      assert(pool_vma_offset >= POOL_HEAP_OFFSET &&
             pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
      set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET;
      set->desc_mem.alloc_size = layout->descriptor_buffer_size;
      set->desc_mem.map = pool->bo.map + set->desc_mem.offset;

      set->desc_surface_state = anv_descriptor_pool_alloc_state(pool);
      anv_fill_buffer_surface_state(device, set->desc_surface_state,
                                    ISL_FORMAT_R32G32B32A32_FLOAT,
                                    (struct anv_address) {
                                       .bo = &pool->bo,
                                       .offset = set->desc_mem.offset,
                                    },
                                    layout->descriptor_buffer_size, 1);
   } else {
      set->desc_mem = ANV_STATE_NULL;
      set->desc_surface_state = ANV_STATE_NULL;
   }

   set->pool = pool;
782
   set->layout = layout;
783
784
785
   anv_descriptor_set_layout_ref(layout);

   set->size = size;
786
787
   set->buffer_views =
      (struct anv_buffer_view *) &set->descriptors[layout->size];
788
   set->buffer_view_count = layout->buffer_view_count;
789

790
791
792
793
794
   /* By defining the descriptors to be zero now, we can later verify that
    * a descriptor has not been populated with user data.
    */
   memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);

795
796
797
798
   /* Go through and fill out immutable samplers if we have any */
   struct anv_descriptor *desc = set->descriptors;
   for (uint32_t b = 0; b < layout->binding_count; b++) {
      if (layout->binding[b].immutable_samplers) {
799
800
801
802
803
804
805
806
807
808
809
         for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
            /* The type will get changed to COMBINED_IMAGE_SAMPLER in
             * UpdateDescriptorSets if needed.  However, if the descriptor
             * set has an immutable sampler, UpdateDescriptorSets may never
             * touch it, so we need to make sure it's 100% valid now.
             */
            desc[i] = (struct anv_descriptor) {
               .type = VK_DESCRIPTOR_TYPE_SAMPLER,
               .sampler = layout->binding[b].immutable_samplers[i],
            };
         }
810
811
812
813
      }
      desc += layout->binding[b].array_size;
   }

814
   /* Allocate surface state for the buffer views. */
815
   for (uint32_t b = 0; b < layout->buffer_view_count; b++) {
816
817
      set->buffer_views[b].surface_state =
         anv_descriptor_pool_alloc_state(pool);
818
   }
819

820
821
822
823
824
825
826
   *out_set = set;

   return VK_SUCCESS;
}

void
anv_descriptor_set_destroy(struct anv_device *device,
827
                           struct anv_descriptor_pool *pool,
828
829
                           struct anv_descriptor_set *set)
{
830
831
   anv_descriptor_set_layout_unref(device, set->layout);

832
833
834
835
836
837
838
   if (set->desc_mem.alloc_size) {
      util_vma_heap_free(&pool->bo_heap,
                         (uint64_t)set->desc_mem.offset + POOL_HEAP_OFFSET,
                         set->desc_mem.alloc_size);
      anv_descriptor_pool_free_state(pool, set->desc_surface_state);
   }

839
   for (uint32_t b = 0; b < set->buffer_view_count; b++)
840
      anv_descriptor_pool_free_state(pool, set->buffer_views[b].surface_state);
841

842
   anv_descriptor_pool_free_set(pool, set);
843
844
}

845
VkResult anv_AllocateDescriptorSets(
846
    VkDevice                                    _device,
847
    const VkDescriptorSetAllocateInfo*          pAllocateInfo,
848
849
850
    VkDescriptorSet*                            pDescriptorSets)
{
   ANV_FROM_HANDLE(anv_device, device, _device);
851
   ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
852
853
854
855
856

   VkResult result = VK_SUCCESS;
   struct anv_descriptor_set *set;
   uint32_t i;

857
   for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
858
859
      ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
                      pAllocateInfo->pSetLayouts[i]);
860

861
      result = anv_descriptor_set_create(device, pool, layout, &set);
862
863
864
      if (result != VK_SUCCESS)
         break;

865
866
      list_addtail(&set->pool_link, &pool->desc_sets);

867
868
869
870
      pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
   }

   if (result != VK_SUCCESS)
871
872
      anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
                             i, pDescriptorSets);
873
874
875
876
877
878
879
880
881
882
883

   return result;
}

VkResult anv_FreeDescriptorSets(
    VkDevice                                    _device,
    VkDescriptorPool                            descriptorPool,
    uint32_t                                    count,
    const VkDescriptorSet*                      pDescriptorSets)
{
   ANV_FROM_HANDLE(anv_device, device, _device);
884
   ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
885
886
887
888

   for (uint32_t i = 0; i < count; i++) {
      ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);

889
890
891
      if (!set)
         continue;

892
      anv_descriptor_set_destroy(device, pool, set);
893
894
895
896
897
   }

   return VK_SUCCESS;
}

898
void
899
900
anv_descriptor_set_write_image_view(struct anv_device *device,
                                    struct anv_descriptor_set *set,
901
                                    const VkDescriptorImageInfo * const info,
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
                                    VkDescriptorType type,
                                    uint32_t binding,
                                    uint32_t element)
{
   const struct anv_descriptor_set_binding_layout *bind_layout =
      &set->layout->binding[binding];
   struct anv_descriptor *desc =
      &set->descriptors[bind_layout->descriptor_index + element];
   struct anv_image_view *image_view = NULL;
   struct anv_sampler *sampler = NULL;

   assert(type == bind_layout->type);

   switch (type) {
   case VK_DESCRIPTOR_TYPE_SAMPLER:
917
      sampler = anv_sampler_from_handle(info->sampler);
918
919
920
      break;

   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
921
922
      image_view = anv_image_view_from_handle(info->imageView);
      sampler = anv_sampler_from_handle(info->sampler);
923
924
925
926
927
      break;

   case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
   case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
   case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
928
      image_view = anv_image_view_from_handle(info->imageView);
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
      break;

   default:
      unreachable("invalid descriptor type");
   }

   /* If this descriptor has an immutable sampler, we don't want to stomp on
    * it.
    */
   sampler = bind_layout->immutable_samplers ?
             bind_layout->immutable_samplers[element] :
             sampler;

   *desc = (struct anv_descriptor) {
      .type = type,
944
      .layout = info->imageLayout,
945
946
947
948
949
950
      .image_view = image_view,
      .sampler = sampler,
   };
}

void
951
952
anv_descriptor_set_write_buffer_view(struct anv_device *device,
                                     struct anv_descriptor_set *set,
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
                                     VkDescriptorType type,
                                     struct anv_buffer_view *buffer_view,
                                     uint32_t binding,
                                     uint32_t element)
{
   const struct anv_descriptor_set_binding_layout *bind_layout =
      &set->layout->binding[binding];
   struct anv_descriptor *desc =
      &set->descriptors[bind_layout->descriptor_index + element];

   assert(type == bind_layout->type);

   *desc = (struct anv_descriptor) {
      .type = type,
      .buffer_view = buffer_view,
   };
}

void
972
973
anv_descriptor_set_write_buffer(struct anv_device *device,
                                struct anv_descriptor_set *set,
974
                                struct anv_state_stream *alloc_stream,
975
976
977
978
979
980
981
982
983
984
985
986
987
988
                                VkDescriptorType type,
                                struct anv_buffer *buffer,
                                uint32_t binding,
                                uint32_t element,
                                VkDeviceSize offset,
                                VkDeviceSize range)
{
   const struct anv_descriptor_set_binding_layout *bind_layout =
      &set->layout->binding[binding];
   struct anv_descriptor *desc =
      &set->descriptors[bind_layout->descriptor_index + element];

   assert(type == bind_layout->type);

989
990
991
992
993
994
995
996
997
   if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
       type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
      *desc = (struct anv_descriptor) {
         .type = type,
         .buffer = buffer,
         .offset = offset,
         .range = range,
      };
   } else {
998
      assert(bind_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW);
999
      struct anv_buffer_view *bview =
1000
         &set->buffer_views[bind_layout->buffer_view_index + element];
1001
1002
1003

      bview->format = anv_isl_format_for_descriptor_type(type);
      bview->range = anv_buffer_get_range(buffer, offset, range);
1004
      bview->address = anv_address_add(buffer->address, offset);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014

      /* If we're writing descriptors through a push command, we need to
       * allocate the surface state from the command buffer. Otherwise it will
       * be allocated by the descriptor pool when calling
       * vkAllocateDescriptorSets. */
      if (alloc_stream)
         bview->surface_state = anv_state_stream_alloc(alloc_stream, 64, 64);

      anv_fill_buffer_surface_state(device, bview->surface_state,
                                    bview->format,
1015
                                    bview->address, bview->range, 1);
1016
1017
1018
1019
1020
1021

      *desc = (struct anv_descriptor) {
         .type = type,
         .buffer_view = bview,
      };
   }
1022
1023
}

1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
void
anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
                                             struct anv_descriptor_set *set,
                                             uint32_t binding,
                                             const void *data,
                                             size_t offset,
                                             size_t size)
{
   const struct anv_descriptor_set_binding_layout *bind_layout =
      &set->layout->binding[binding];

   assert(bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM);

   void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset;

   memcpy(desc_map + offset, data, size);
}

1042
void anv_UpdateDescriptorSets(
1043
    VkDevice                                    _device,
1044
    uint32_t                                    descriptorWriteCount,
1045
    const VkWriteDescriptorSet*                 pDescriptorWrites,
1046
    uint32_t                                    descriptorCopyCount,
1047
1048
    const VkCopyDescriptorSet*                  pDescriptorCopies)
{
1049
1050
   ANV_FROM_HANDLE(anv_device, device, _device);

1051
   for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1052
      const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1053
      ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
1054

1055
1056
1057
1058
1059
      switch (write->descriptorType) {
      case VK_DESCRIPTOR_TYPE_SAMPLER:
      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1060
      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1061
         for (uint32_t j = 0; j < write->descriptorCount; j++) {
1062
            anv_descriptor_set_write_image_view(device, set,
1063
                                                write->pImageInfo + j,
1064
1065
1066
                                                write->descriptorType,
                                                write->dstBinding,
                                                write->dstArrayElement + j);
1067
1068
1069
1070
1071
         }
         break;

      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1072
1073
1074
1075
         for (uint32_t j = 0; j < write->descriptorCount; j++) {
            ANV_FROM_HANDLE(anv_buffer_view, bview,
                            write->pTexelBufferView[j]);

1076
            anv_descriptor_set_write_buffer_view(device, set,
1077
1078
1079
1080
                                                 write->descriptorType,
                                                 bview,
                                                 write->dstBinding,
                                                 write->dstArrayElement + j);
1081
         }
1082
1083
1084
1085
1086
1087
         break;

      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1088
1089
1090
         for (uint32_t j = 0; j < write->descriptorCount; j++) {
            assert(write->pBufferInfo[j].buffer);
            ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1091
1092
            assert(buffer);

1093
            anv_descriptor_set_write_buffer(device, set,
1094
                                            NULL,
1095
1096
1097
1098
1099
1100
                                            write->descriptorType,
                                            buffer,
                                            write->dstBinding,
                                            write->dstArrayElement + j,
                                            write->pBufferInfo[j].offset,
                                            write->pBufferInfo[j].range);
1101
         }
1102
         break;
1103

1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
      case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
         const VkWriteDescriptorSetInlineUniformBlockEXT *inline_write =
            vk_find_struct_const(write->pNext,
                                 WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
         assert(inline_write->dataSize == write->descriptorCount);
         anv_descriptor_set_write_inline_uniform_data(device, set,
                                                      write->dstBinding,
                                                      inline_write->pData,
                                                      write->dstArrayElement,
                                                      inline_write->dataSize);
         break;
      }

1117
1118
1119
1120
1121
      default:
         break;
      }
   }

1122
   for (uint32_t i = 0; i < descriptorCopyCount; i++) {
1123
      const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
Józef Kucia's avatar
Józef Kucia committed
1124
      ANV_FROM_HANDLE(anv_descriptor_set, src, copy->srcSet);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
      ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);

      const struct anv_descriptor_set_binding_layout *src_layout =
         &src->layout->binding[copy->srcBinding];
      struct anv_descriptor *src_desc =
         &src->descriptors[src_layout->descriptor_index];
      src_desc += copy->srcArrayElement;

      const struct anv_descriptor_set_binding_layout *dst_layout =
         &dst->layout->binding[copy->dstBinding];
      struct anv_descriptor *dst_desc =
         &dst->descriptors[dst_layout->descriptor_index];
      dst_desc += copy->dstArrayElement;

      for (uint32_t j = 0; j < copy->descriptorCount; j++)
         dst_desc[j] = src_desc[j];
1141

1142
1143
      if (src_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
         assert(src_layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
1144
         memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
1145
                                    copy->dstArrayElement,
1146
                src->desc_mem.map + src_layout->descriptor_offset +
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
                                    copy->srcArrayElement,
                copy->descriptorCount);
      } else {
         unsigned desc_size = anv_descriptor_size(src_layout);
         if (desc_size > 0) {
            assert(desc_size == anv_descriptor_size(dst_layout));
            memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
                                       copy->dstArrayElement * desc_size,
                   src->desc_mem.map + src_layout->descriptor_offset +
                                       copy->srcArrayElement * desc_size,
                   copy->descriptorCount * desc_size);
         }
1159
      }
1160
1161
   }
}
1162
1163
1164
1165
1166
1167

/*
 * Descriptor update templates.
 */

void
1168
1169
anv_descriptor_set_write_template(struct anv_device *device,
                                  struct anv_descriptor_set *set,
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
                                  struct anv_state_stream *alloc_stream,
                                  const struct anv_descriptor_update_template *template,
                                  const void *data)
{
   for (uint32_t i = 0; i < template->entry_count; i++) {
      const struct anv_descriptor_template_entry *entry =
         &template->entries[i];

      switch (entry->type) {
      case VK_DESCRIPTOR_TYPE_SAMPLER:
      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
         for (uint32_t j = 0; j < entry->array_count; j++) {
            const VkDescriptorImageInfo *info =
               data + entry->offset + j * entry->stride;
1187
            anv_descriptor_set_write_image_view(device, set,
1188
                                                info, entry->type,
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
                                                entry->binding,
                                                entry->array_element + j);
         }
         break;

      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
         for (uint32_t j = 0; j < entry->array_count; j++) {
            const VkBufferView *_bview =
               data + entry->offset + j * entry->stride;
            ANV_FROM_HANDLE(anv_buffer_view, bview, *_bview);

1201
            anv_descriptor_set_write_buffer_view(device, set,
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
                                                 entry->type,
                                                 bview,
                                                 entry->binding,
                                                 entry->array_element + j);
         }
         break;

      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
         for (uint32_t j = 0; j < entry->array_count; j++) {
            const VkDescriptorBufferInfo *info =
               data + entry->offset + j * entry->stride;
            ANV_FROM_HANDLE(anv_buffer, buffer, info->buffer);

1218
            anv_descriptor_set_write_buffer(device, set,
1219
1220
1221
1222
1223
1224
1225
1226
1227
                                            alloc_stream,
                                            entry->type,
                                            buffer,
                                            entry->binding,
                                            entry->array_element + j,
                                            info->offset, info->range);
         }
         break;

1228
1229
1230
1231
1232
1233
1234
1235
      case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
         anv_descriptor_set_write_inline_uniform_data(device, set,
                                                      entry->binding,
                                                      data + entry->offset,
                                                      entry->array_element,
                                                      entry->array_count);
         break;

1236
1237
1238
1239
1240
1241
      default:
         break;
      }
   }
}

1242
VkResult anv_CreateDescriptorUpdateTemplate(
1243
    VkDevice                                    _device,
1244
    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
1245
    const VkAllocationCallbacks*                pAllocator,
1246
    VkDescriptorUpdateTemplate*                 pDescriptorUpdateTemplate)
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
{
   ANV_FROM_HANDLE(anv_device, device, _device);
   struct anv_descriptor_update_template *template;

   size_t size = sizeof(*template) +
      pCreateInfo->descriptorUpdateEntryCount * sizeof(template->entries[0]);
   template = vk_alloc2(&device->alloc, pAllocator, size, 8,
                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
   if (template == NULL)
      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

1258
1259
   template->bind_point = pCreateInfo->pipelineBindPoint;

1260
   if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET)
1261
1262
1263
1264
      template->set = pCreateInfo->set;

   template->entry_count = pCreateInfo->descriptorUpdateEntryCount;
   for (uint32_t i = 0; i < template->entry_count; i++) {
Eric Engestrom's avatar
Eric Engestrom committed
1265
      const VkDescriptorUpdateTemplateEntry *pEntry =
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
         &pCreateInfo->pDescriptorUpdateEntries[i];

      template->entries[i] = (struct anv_descriptor_template_entry) {
         .type = pEntry->descriptorType,
         .binding = pEntry->dstBinding,
         .array_element = pEntry->dstArrayElement,
         .array_count = pEntry->descriptorCount,
         .offset = pEntry->offset,
         .stride = pEntry->stride,
      };
   }

   *pDescriptorUpdateTemplate =
      anv_descriptor_update_template_to_handle(template);

   return VK_SUCCESS;
}

1284
void anv_DestroyDescriptorUpdateTemplate(
1285
    VkDevice                                    _device,
1286
    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,
1287
1288
1289
1290
1291
1292
1293
1294
1295
    const VkAllocationCallbacks*                pAllocator)
{
   ANV_FROM_HANDLE(anv_device, device, _device);
   ANV_FROM_HANDLE(anv_descriptor_update_template, template,
                   descriptorUpdateTemplate);

   vk_free2(&device->alloc, pAllocator, template);
}

1296
void anv_UpdateDescriptorSetWithTemplate(