align used as a variable too much, suggest rename function align to align_pot
Rationale:
nvk: Should use alignment instead of align !25997 (25f90c78)
This is a issue because use align function as a variable
treewide: Avoid use align as variable, replace it with other names !25997 (cd9f0dba)
and searching align =
We will found many match:
150 results - 73 files
src\amd\addrlib\src\gfx9\gfx9addrlib.cpp:
212
213: align = Max(align, metaBlkSize);
214
216 {
217: align = Max(align, GetBlockSize(pIn->swizzleMode));
218 }
src\amd\common\ac_rtld.c:
190 /* old-style LDS symbols from initial prototype -- remove eventually */
191: s.align = MIN2(1u << (symbol->st_other >> 3), 1u << 16);
192 } else if (symbol->st_shndx == SHN_AMDGPU_LDS) {
193: s.align = MIN2(symbol->st_value, 1u << 16);
194 report_if(!util_is_power_of_two_nonzero(s.align));
388 lds_end->size = 0;
389: lds_end->align = lds_end_align;
390 lds_end->offset = binary->lds_size;
src\amd\compiler\aco_instruction_selection.cpp:
4055 Operand aligned_offset = offset;
4056: unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
4057 if (need_to_align_offset) {
4058: align = 4;
4059 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
6705 */
6706: unsigned align = nir_intrinsic_align(instr);
6707 bool byte_align_for_smem_mubuf =
7219 unsigned num_components = instr->def.num_components;
7220: unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7221 load_lds(ctx, elem_size_bytes, num_components, dst, address, nir_intrinsic_base(instr), align);
7231
7232: unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7233 store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
11272 continue;
11273: unsigned align = MIN2(4, util_next_power_of_two(ctx->args->args[i].size));
11274 if (ctx->args->args[i].file == AC_ARG_SGPR && ctx->args->args[i].offset % align)
src\amd\vulkan\radv_descriptor_set.c:
100 uint32_t size = 0;
101: uint32_t align = radv_descriptor_alignment(list->pDescriptorTypes[i]);
102
src\amd\vulkan\radv_pipeline.c:
346 if (align_offset)
347: align = 1 << (ffs(align_offset) - 1);
348 else
349: align = align_mul;
350
src\amd\vulkan\radv_sdma.c:
361 unsigned max_size_per_packet = gfx_level >= GFX10_3 ? GFX103_SDMA_COPY_MAX_SIZE : CIK_SDMA_COPY_MAX_SIZE;
362: unsigned align = ~0u;
363 unsigned ncopy = DIV_ROUND_UP(size, max_size_per_packet);
374 if ((src_va & 0x3) == 0 && (dst_va & 0x3) == 0 && size > 4 && (size & 0x3) != 0) {
375: align = ~0x3u;
376 ncopy++;
src\amd\vulkan\radv_shader.c:
285 unsigned length = glsl_get_vector_elements(type);
286: *size = comp_size * length, *align = comp_size;
287 }
1498 sym->size = binary->info.ngg_info.esgs_ring_size;
1499: sym->align = 64 * 1024;
1500 }
1505 sym->size = binary->info.ngg_info.ngg_emit_size * 4;
1506: sym->align = 4;
1507
1510 sym->size = 8;
1511: sym->align = 4;
1512 }
src\asahi\compiler\agx_compile.c:
2565 {
2566: align = nir_combined_align(align, align_offset);
2567
2569
2570: if ((bytes & 1) || (align == 1))
2571 bit_size = 8;
2572: else if ((bytes & 2) || (align == 2))
2573 bit_size = 16;
2579 .bit_size = bit_size,
2580: .align = bit_size / 8,
2581 };
src\asahi\compiler\agx_nir_opt_preamble.c:
15 *size = (bit_size * def->num_components) / 16;
16: *align = bit_size / 16;
17 }
src\asahi\compiler\agx_register_allocate.c:
420 assert(align <= count && "still aligned");
421: align = count;
422
452 uint32_t nr = rctx->ncomps[ssa];
453: unsigned align = agx_size_align_16(rctx->sizes[ssa]);
454
811
812: unsigned align = count;
813
src\asahi\lib\gen_pack.py:
240 if ret[0] == 'align':
241: align = ret[1]
242 # Make sure the alignment is a power of 2
619 self.group.length = int(attrs["size"])
620: self.group.align = int(attrs["align"]) if "align" in attrs else None
621 self.structs[attrs["name"]] = self.group
src\asahi\lib\pool.h:
85 { \
86: .size = MALI_##name##_LENGTH, .align = MALI_##name##_ALIGN, \
87 .nelems = count, \
102 unsigned size = 0;
103: unsigned align = descs[0].align;
104
src\broadcom\vulkan\v3dv_pipeline.c:
3075 *size = comp_size * length,
3076: *align = comp_size * (length == 3 ? 4 : length);
3077 }
src\compiler\glsl_types.c:
3364 *size = glsl_get_cl_size(t);
3365: *align = glsl_get_cl_alignment(t);
3366 }
3682 size_align(type->fields.array, &elem_size, &elem_align);
3683: *align = elem_align;
3684 *size = type->length * ALIGN_POT(elem_size, elem_align);
3689 *size = 0;
3690: *align = 0;
3691 for (unsigned i = 0; i < type->length; i++) {
3693 size_align(type->fields.structure[i].type, &elem_size, &elem_align);
3694: *align = MAX2(*align, elem_align);
3695 *size = ALIGN_POT(*size, elem_align) + elem_size;
3709 *size = 4 * glsl_get_components(type);
3710: *align = 4;
3711 break;
3725 *size = N * glsl_get_components(type);
3726: *align = N;
3727 break;
3742 *size = 8;
3743: *align = 8;
3744 break;
3768 *size = 4 * glsl_get_components(type);
3769: *align = 16;
3770 break;
3784 *size = 16 * (type->matrix_columns - 1) + N * type->vector_elements;
3785: *align = 16;
3786 break;
src\compiler\glsl\ast_type.cpp:
461 if (q.flags.q.explicit_align)
462: this->align = q.align;
463
src\compiler\glsl\gl_nir_linker.c:
862 *size = comp_size * length,
863: *align = comp_size * (length == 3 ? 4 : length);
864 }
src\compiler\glsl\gl_nir_lower_images.c:
49 *size = s;
50: *align = s;
51 }
src\compiler\glsl\glsl_parser.yy:
1744 $$.flags.q.explicit_align = 1;
1745: $$.align = $3;
1746 }
src\compiler\isaspec\isaspec_decode_impl.c:
575 /* alignment handling */
576: const char *align = strstr(field_name, ":align=");
577
864 /* alignment handling */
865: const char *align = strstr(field_name, ":align=");
866
src\compiler\nir\nir_lower_mem_access_bit_sizes.c:
303 requested = (nir_mem_access_size_align){
304: .align = 4,
305 .bit_size = 32,
src\compiler\nir\nir.h:
2039 * For any offset X which satisfies the complex alignment described by
2040: * align_mul/offset, X % align == 0.
2041 */
2054 * provides a single simple alignment parameter. The offset X is guaranteed
2055: * to satisfy X % align == 0.
2056 */
src\compiler\nir\tests\load_store_vectorizer_tests.cpp:
343 /* Calculate a simple alignment, like how nir_intrinsic_align() does. */
344: uint32_t align = align_mul;
345 if (align_offset)
346: align = 1 << (ffs(align_offset) - 1);
347
361 *size = comp_size * length,
362: *align = comp_size;
363 }
src\compiler\spirv\vtn_variables.c:
424 base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
425: const uint32_t align = base->mode == vtn_variable_mode_ssbo ?
426 b->options->min_ssbo_alignment : b->options->min_ubo_alignment;
src\freedreno\ir3\ir3_nir_opt_preamble.c:
43 *size = DIV_ROUND_UP(bit_size, 32) * def->num_components;
44: *align = 1;
45 }
src\freedreno\rnn\rnn.c:
193 } else if (!strcmp(attr->name, "align")) {
194: ti->align = getnumattrib(db, file, node->line, attr);
195 ti->alignvalid = 1;
984 dst->max = src->max;
985: dst->align = src->align;
986 dst->addvariant = src->addvariant;
src\freedreno\vulkan\tu_pass.cc:
630
631: uint32_t align = MAX2(1, att->cpp >> block_align_shift);
632 uint32_t nblocks =
src\freedreno\vulkan\tu_shader.cc:
940 *size = comp_size * length;
941: *align = comp_size;
942 }
src\gallium\auxiliary\nir\nir_to_tgsi.c:
3275 */
3276: unsigned worst_start_component = align == 4 ? 3 : align / 4;
3277 if (worst_start_component + num_components > 4)
src\gallium\drivers\iris\iris_resource.c:
1155 */
1156: uint32_t align = MAX2(4 * 4, 8 * 16);
1157 while (align > size)
src\gallium\drivers\panfrost\pan_afbc_cso.c:
270 .bpp = util_format_get_blocksizebits(rsrc->base.format),
271: .align = align,
272 .tiled = tiled,
src\gallium\drivers\r300\r300_texture_desc.c:
72 h_tile = table[macrotile][util_logbase2(pixsize)][microtile][DIM_HEIGHT];
73: align = 64 / (pixsize * h_tile);
74 if (tile < align)
src\gallium\drivers\r300\compiler\nir_to_rc.c:
2193 */
2194: unsigned worst_start_component = align == 4 ? 3 : align / 4;
2195 if (worst_start_component + num_components > 4)
src\gallium\drivers\r300\compiler\r300_nir.c:
52 */
53: unsigned worst_start_component = align == 4 ? 3 : align / 4;
54 if (worst_start_component + num_components > 4)
src\gallium\drivers\r600\sfn\sfn_instr_export.cpp:
289
290: auto align = int_from_string_with_prefix(align_str, "AL:");
291 auto align_offset = int_from_string_with_prefix(align_offset_str, "ALO:");
src\gallium\drivers\r600\sfn\sfn_nir.cpp:
98 if (instr->intrinsic == nir_intrinsic_store_scratch) {
99: align = instr->src[0].ssa->num_components;
100 address_index = 1;
101 } else {
102: align = instr->def.num_components;
103 }
483 if (type->base_type != GLSL_TYPE_ARRAY) {
484: *align = 1;
485 *size = 1;
488 glsl_get_natural_size_align_bytes(type->fields.array, &elem_size, &elem_align);
489: *align = 1;
490 *size = type->length;
src\gallium\drivers\r600\sfn\sfn_shader.cpp:
1156
1157: int align = nir_intrinsic_align_mul(intr);
1158 int align_offset = nir_intrinsic_align_offset(intr);
1205 } else {
1206: int align = nir_intrinsic_align_mul(intr);
1207 int align_offset = nir_intrinsic_align_offset(intr);
src\gallium\drivers\radeonsi\si_shader.c:
809 sym->size = shader->gs_info.esgs_ring_size * 4;
810: sym->align = 64 * 1024;
811 }
816 sym->size = shader->ngg.ngg_emit_size * 4;
817: sym->align = 4;
818 }
src\gallium\drivers\zink\zink_compiler.c:
5303 {
5304: align = nir_combined_align(align, align_offset);
5305
5310 .bit_size = bit_size,
5311: .align = bit_size / 8,
5312 };
5321 bit_size = *(const uint8_t *)cb_data;
5322: align = nir_combined_align(align, align_offset);
5323
5328 .bit_size = bit_size,
5329: .align = bit_size / 8,
5330 };
src\gallium\drivers\zink\zink_resource.c:
2027 {
2028: VkDeviceSize align = *offset % alignment;
2029 if (alignment - 1 > *offset)
2032 *offset -= align, *size += align;
2033: align = alignment - (*size % alignment);
2034 if (*offset + *size + align > obj_size)
src\gallium\frontends\clover\nir\invocation.cpp:
78 *size = 0;
79: *align = 1;
80 } else {
81 *size = type->cl_size();
82: *align = type->cl_alignment();
83 }
src\gallium\frontends\lavapipe\lvp_pipeline.c:
131 *size = comp_size * length,
132: *align = comp_size;
133 }
src\imagination\vulkan\pvr_bo.c:
576 */
577: align = MAX2(align, cache_line_size);
578 assert(util_is_power_of_two_nonzero(align));
src\imgui\imgui_internal.h:
1442 IMGUI_API void RenderTextWrapped(ImVec2 pos, const char* text, const char* text_end, float wrap_width);
1443: IMGUI_API void RenderTextClipped(const ImVec2& pos_min, const ImVec2& pos_max, const char* text, const char* text_end, const ImVec2* text_size_if_known, const ImVec2& align = ImVec2(0,0), const ImRect* clip_rect = NULL);
1444: IMGUI_API void RenderTextClippedEx(ImDrawList* draw_list, const ImVec2& pos_min, const ImVec2& pos_max, const char* text, const char* text_end, const ImVec2* text_size_if_known, const ImVec2& align = ImVec2(0, 0), const ImRect* clip_rect = NULL);
1445 IMGUI_API void RenderFrame(ImVec2 p_min, ImVec2 p_max, ImU32 fill_col, bool border = true, float rounding = 0.0f);
src\imgui\imstb_rectpack.h:
246 // quantize once we've hit OOM, then we could get rid of this parameter.
247: context->align = 1;
248 else {
253 // align >= width / num_nodes
254: // align = ceil(width/num_nodes)
255
256: context->align = (context->width + context->num_nodes-1) / context->num_nodes;
257 }
357 width -= width % c->align;
358: STBRP_ASSERT(width % c->align == 0);
359
src\intel\compiler\brw_eu_compact.c:
2538 devinfo->platform == INTEL_PLATFORM_G4X) {
2539: brw_compact_inst *align = store + offset;
2540 memset(align, 0, sizeof(*align));
2656 if (p->next_insn_offset & sizeof(brw_compact_inst)) {
2657: brw_compact_inst *align = store + offset;
2658 memset(align, 0, sizeof(*align));
src\intel\compiler\brw_fs_bank_conflicts.cpp:
308 {
309: const unsigned align = MAX2(sizeof(void *), __alignof__(vector_type));
310 const unsigned size = DIV_ROUND_UP(n, vector_width) * sizeof(vector_type);
src\intel\compiler\brw_mesh.cpp:
93 *size = comp_size * length,
94: *align = comp_size * (length == 3 ? 4 : length);
95 }
src\intel\compiler\brw_nir.c:
1338 if (align_offset)
1339: align = 1 << (ffs(align_offset) - 1);
1340 else
1341: align = align_mul;
1342
1390 {
1391: const uint32_t align = nir_combined_align(align_mul, align_offset);
1392
1406 .num_components = comps32,
1407: .align = 4,
1408 };
1416 .num_components = 1,
1417: .align = 4,
1418 };
1451 .num_components = 1,
1452: .align = 1,
1453 };
1459 is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1460: .align = 4,
1461 };
src\intel\isl\isl.c:
307 dev->ss.size = RENDER_SURFACE_STATE_length(info) * 4;
308: dev->ss.align = isl_align(dev->ss.size, 32);
309
src\intel\tools\aubinator_viewer.cpp:
1026 };
1027: float align = 0.0f;
1028 for (uint32_t i = 0; i < ARRAY_SIZE(texts); i += 2)
1029: align = MAX2(align, ImGui::CalcTextSize(texts[i]).x);
1030 align += ImGui::GetStyle().WindowPadding.x + 10;
src\intel\vulkan\anv_allocator.c:
1388
1389: uint32_t align = device->physical->info.mem_alignment;
1390
1392 if (bo->size >= 64 * 1024)
1393: align = MAX2(64 * 1024, align);
1394
1398 if (device->info->has_aux_map && (alloc_flags & ANV_BO_ALLOC_DEDICATED))
1399: align = MAX2(intel_aux_map_get_alignment(device->aux_map_ctx), align);
1400
1407 if (device->info->ver >= 11 && bo->size >= 1 * 1024 * 1024)
1408: align = MAX2(2 * 1024 * 1024, align);
1409
src\intel\vulkan\anv_pipeline.c:
903 *size = comp_size * length,
904: *align = comp_size * (length == 3 ? 4 : length);
905 }
src\intel\vulkan\grl\grl_metakernel_gen.py:
583 def __init__(self, m, name, fields, align):
584: assert align == 0
585 self.name = name
src\intel\vulkan_hasvk\anv_allocator.c:
1560
1561: uint32_t align = 4096;
1562
src\intel\vulkan_hasvk\anv_pipeline.c:
524 *size = comp_size * length,
525: *align = comp_size * (length == 3 ? 4 : length);
526 }
src\mesa\main\texcompress_cpal.c:
172 saved_align = ctx->Unpack.Alignment;
173: align = saved_align;
174
188 _mesa_PixelStorei(GL_UNPACK_ALIGNMENT, 1);
189: align = 1;
190 }
src\microsoft\compiler\dxil_dump.c:
557 unsigned align_mask = (1 << 6 ) - 1;
558: unsigned align = alloca->align & align_mask;
559 _mesa_string_buffer_printf(d->buf, ", %d", 1 << (align - 1));
src\microsoft\compiler\dxil_module.c:
2259 gvar->as = as;
2260: gvar->align = align;
2261 gvar->constant = !!value;
3397 instr->alloca.size = size;
3398: instr->alloca.align = util_logbase2(align) + 1;
3399 assert(instr->alloca.align < (1 << 5));
3470 instr->load.type = type;
3471: instr->load.align = util_logbase2(align) + 1;
3472 instr->load.is_volatile = is_volatile;
3491 instr->store.ptr = ptr;
3492: instr->store.align = util_logbase2(align) + 1;
3493 instr->store.is_volatile = is_volatile;
src\microsoft\compiler\dxil_nir.c:
38 *size = glsl_get_cl_size(type);
39: *align = glsl_get_cl_alignment(type);
40 }
src\microsoft\compiler\nir_to_dxil.c:
6178 return (nir_mem_access_size_align) {
6179: .align = closest_bit_size / 8,
6180 .bit_size = closest_bit_size,
6185 assert(intrin == nir_intrinsic_load_ssbo || intrin == nir_intrinsic_store_ssbo);
6186: uint32_t align = nir_combined_align(align_mul, align_offset);
6187 if (align < min_bit_size / 8) {
6192 return (nir_mem_access_size_align) {
6193: .align = min_bit_size / 8,
6194 .bit_size = min_bit_size,
6208 return (nir_mem_access_size_align) {
6209: .align = bit_size / 8,
6210 .bit_size = bit_size,
src\microsoft\spirv_to_dxil\dxil_spirv_nir_lower_bindless.c:
40 *size = 1;
41: *align = *size;
42 }
src\microsoft\spirv_to_dxil\dxil_spirv_nir.c:
129 *size = comp_size * length;
130: *align = comp_size;
131 }
144 *size = 0;
145: *align = 0;
146 for (uint32_t i = 0; i < glsl_get_length(type); ++i) {
148 *size = ALIGN_POT(*size, base_align) + base_size;
149: *align = MAX2(*align, base_align);
150 }
154
155: *align = MAX2(base_align, 4);
156 *size = ALIGN_POT(base_size, *align);
src\nouveau\codegen\nv50_ir_from_nir.cpp:
63 *size = comp_size * length;
64: *align = 0x10;
65 }
src\nouveau\vulkan\nvk_shader.c:
41 unsigned length = glsl_get_vector_elements(type);
42: *size = comp_size * length, *align = comp_size;
43 }
src\nouveau\winsys\nouveau_bo.c:
187 req.info.size = size;
188: req.align = alignment;
189
194 bo->size = size;
195: bo->align = alignment;
196 bo->offset = -1ULL;
src\panfrost\compiler\bifrost_compile.c:
4310 {
4311: align = nir_combined_align(align, align_offset);
4312 assert(util_is_power_of_two_nonzero(align));
4322 */
4323: if (align == 1)
4324 bit_size = 8;
4325: else if (align == 2)
4326 bit_size = MIN2(bit_size, 16);
4330 .bit_size = bit_size,
4331: .align = bit_size / 8,
4332 };
src\panfrost\lib\pan_pool.h:
96 { \
97: .size = pan_size(name), .align = pan_alignment(name), .nelems = count, \
98 }
112 unsigned size = 0;
113: unsigned align = descs[0].align;
114
src\panfrost\lib\genxml\gen_pack.py:
259 if ret[0] == 'align':
260: align = ret[1]
261 # Make sure the alignment is a power of 2
275 self.size = 0
276: self.align = int(attrs["align"]) if "align" in attrs else None
277
688 self.group.length = int(attrs["size"]) * 4
689: self.group.align = int(attrs["align"]) if "align" in attrs else None
690 self.structs[attrs["name"]] = self.group
src\panfrost\vulkan\panvk_device.c:
1174
1175: const uint64_t align = 4096;
1176 const uint64_t size = panvk_image_get_total_size(image);
src\panfrost\vulkan\panvk_vX_shader.c:
207 unsigned length = glsl_get_vector_elements(type);
208: *size = comp_size * length, *align = comp_size * (length == 3 ? 4 : length);
209 }
src\util\tests\gc_alloc_tests.cpp:
36 for (size_t size = 4; size <= 256; size += 4) {
37: for (size_t align = 4; align <= HEADER_ALIGN; align *= 2) {
38 gc_ctx *ctx = gc_context(NULL);
src\util\tests\vma\vma_random_test.cpp:
104 uint64_t align_pages = 1ULL << align_order;
105: uint64_t align = align_pages * MEM_PAGE_SIZE;
106
124 } else {
125: assert(addr % align == 0);
126 uint64_t addr_page = addr / MEM_PAGE_SIZE;
src\vulkan\util\vk_alloc.h:
187 #define VK_MULTIALLOC(_name) \
188: struct vk_multialloc _name = { .align = 1 }
189
201 ma->size = offset + size;
202: ma->align = MAX2(ma->align, align);
203
So my suggestion is to rename align
function to align_pot
to prevent future issues,
align_pot
is not used by anyone