Commit fd8d71ce authored by Juan A. Suárez's avatar Juan A. Suárez
Browse files

v3dv: rename VC5 to V3D



As we are not using anymore references to the old VC5, let's rename
definitions from VC5 to V3D in the Vulkan driver.
Reviewed-by: Alejandro Piñeiro's avatarAlejandro Piñeiro <apinheiro@igalia.com>
Signed-off-by: Juan A. Suárez's avatarJuan A. Suarez Romero <jasuarez@igalia.com>
Part-of: <!10402>
parent 26618dfb
Pipeline #310608 waiting for manual action with stages
......@@ -23,7 +23,7 @@
/** @file v3d_tiling.c
*
* Handles information about the VC5 tiling formats, and loading and storing
* Handles information about the V3D tiling formats, and loading and storing
* from them.
*/
......@@ -153,7 +153,7 @@ v3d_get_ublinear_1_column_pixel_offset(uint32_t cpp, uint32_t image_h,
/**
* Returns the byte offset for a given pixel in a UIF layout.
*
* UIF is the general VC5 tiling layout shared across 3D, media, and scanout.
* UIF is the general V3D tiling layout shared across 3D, media, and scanout.
* It stores pixels in UIF blocks (2x2 utiles), and UIF blocks are stored in
* 4x4 groups, and those 4x4 groups are then stored in raster order.
*/
......@@ -416,35 +416,35 @@ v3d_move_tiled_image(void *gpu, uint32_t gpu_stride,
bool is_load)
{
switch (tiling_format) {
case VC5_TILING_UIF_XOR:
case V3D_TILING_UIF_XOR:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_uif_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UIF_NO_XOR:
case V3D_TILING_UIF_NO_XOR:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_uif_no_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_2_COLUMN:
case V3D_TILING_UBLINEAR_2_COLUMN:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_ublinear_2_column_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_1_COLUMN:
case V3D_TILING_UBLINEAR_1_COLUMN:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_ublinear_1_column_pixel_offset,
is_load);
break;
case VC5_TILING_LINEARTILE:
case V3D_TILING_LINEARTILE:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
......
......@@ -194,7 +194,7 @@ void v3dv_cl_ensure_space_with_branch(struct v3dv_cl *cl, uint32_t space);
* Helper function called by the XML-generated pack functions for filling in
* an address field in shader records.
*
* Since we have a private address space as of VC5, our BOs can have lifelong
* Since we have a private address space as of V3D, our BOs can have lifelong
* offsets, and all the kernel needs to know is which BOs need to be paged in
* for this exec.
*/
......
......@@ -611,8 +611,8 @@ v3dv_job_start_frame(struct v3dv_job *job,
*/
cl_emit(&job->bcl, START_TILE_BINNING, bin);
job->ez_state = VC5_EZ_UNDECIDED;
job->first_ez_state = VC5_EZ_UNDECIDED;
job->ez_state = V3D_EZ_UNDECIDED;
job->first_ez_state = V3D_EZ_UNDECIDED;
}
static void
......@@ -1492,11 +1492,11 @@ cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
load.r_b_swap = iview->swap_rb;
load.memory_format = slice->tiling;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
load.height_in_ub_or_stride =
slice->padded_height_of_output_image_in_uif_blocks;
} else if (slice->tiling == VC5_TILING_RASTER) {
} else if (slice->tiling == V3D_TILING_RASTER) {
load.height_in_ub_or_stride = slice->stride;
}
......@@ -1726,11 +1726,11 @@ cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
store.r_b_swap = iview->swap_rb;
store.memory_format = slice->tiling;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
store.height_in_ub_or_stride =
slice->padded_height_of_output_image_in_uif_blocks;
} else if (slice->tiling == VC5_TILING_RASTER) {
} else if (slice->tiling == V3D_TILING_RASTER) {
store.height_in_ub_or_stride = slice->stride;
}
......@@ -2087,16 +2087,16 @@ set_rcl_early_z_config(struct v3dv_job *job,
}
switch (job->first_ez_state) {
case VC5_EZ_UNDECIDED:
case VC5_EZ_LT_LE:
case V3D_EZ_UNDECIDED:
case V3D_EZ_LT_LE:
*early_z_disable = false;
*early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
break;
case VC5_EZ_GT_GE:
case V3D_EZ_GT_GE:
*early_z_disable = false;
*early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
break;
case VC5_EZ_DISABLED:
case V3D_EZ_DISABLED:
*early_z_disable = true;
break;
}
......@@ -2230,8 +2230,8 @@ cmd_buffer_emit_render_pass_rcl(struct v3dv_cmd_buffer *cmd_buffer)
&state->attachments[attachment_idx].clear_value.color[0];
uint32_t clear_pad = 0;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
int uif_block_height = v3d_utile_height(image->cpp) * 2;
uint32_t implicit_padded_height =
......@@ -3015,15 +3015,15 @@ job_update_ez_state(struct v3dv_job *job,
struct v3dv_pipeline *pipeline,
struct v3dv_cmd_buffer *cmd_buffer)
{
/* If first_ez_state is VC5_EZ_DISABLED it means that we have already
/* If first_ez_state is V3D_EZ_DISABLED it means that we have already
* determined that we should disable EZ completely for all draw calls in
* this job. This will cause us to disable EZ for the entire job in the
* Tile Rendering Mode RCL packet and when we do that we need to make sure
* we never emit a draw call in the job with EZ enabled in the CFG_BITS
* packet, so ez_state must also be VC5_EZ_DISABLED;
* packet, so ez_state must also be V3D_EZ_DISABLED;
*/
if (job->first_ez_state == VC5_EZ_DISABLED) {
assert(job->ez_state == VC5_EZ_DISABLED);
if (job->first_ez_state == V3D_EZ_DISABLED) {
assert(job->ez_state == V3D_EZ_DISABLED);
return;
}
......@@ -3044,8 +3044,8 @@ job_update_ez_state(struct v3dv_job *job,
assert(state->subpass_idx < state->pass->subpass_count);
struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
job->first_ez_state = VC5_EZ_DISABLED;
job->ez_state = VC5_EZ_DISABLED;
job->first_ez_state = V3D_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
return;
}
......@@ -3073,16 +3073,16 @@ job_update_ez_state(struct v3dv_job *job,
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
perf_debug("Loading depth aspect in a secondary command buffer "
"without framebuffer info disables early-z tests.\n");
job->first_ez_state = VC5_EZ_DISABLED;
job->ez_state = VC5_EZ_DISABLED;
job->first_ez_state = V3D_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
return;
}
if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
perf_debug("Loading depth aspect for framebuffer with odd width "
"or height disables early-Z tests.\n");
job->first_ez_state = VC5_EZ_DISABLED;
job->ez_state = VC5_EZ_DISABLED;
job->first_ez_state = V3D_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
return;
}
}
......@@ -3096,39 +3096,39 @@ job_update_ez_state(struct v3dv_job *job,
struct v3dv_shader_variant *fs_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
if (fs_variant->prog_data.fs->writes_z) {
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
return;
}
switch (pipeline->ez_state) {
case VC5_EZ_UNDECIDED:
case V3D_EZ_UNDECIDED:
/* If the pipeline didn't pick a direction but didn't disable, then go
* along with the current EZ state. This allows EZ optimization for Z
* func == EQUAL or NEVER.
*/
break;
case VC5_EZ_LT_LE:
case VC5_EZ_GT_GE:
case V3D_EZ_LT_LE:
case V3D_EZ_GT_GE:
/* If the pipeline picked a direction, then it needs to match the current
* direction if we've decided on one.
*/
if (job->ez_state == VC5_EZ_UNDECIDED)
if (job->ez_state == V3D_EZ_UNDECIDED)
job->ez_state = pipeline->ez_state;
else if (job->ez_state != pipeline->ez_state)
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
break;
case VC5_EZ_DISABLED:
case V3D_EZ_DISABLED:
/* If the pipeline disables EZ because of a bad Z func or stencil
* operation, then we can't do any more EZ in this frame.
*/
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
break;
}
if (job->first_ez_state == VC5_EZ_UNDECIDED &&
job->ez_state != VC5_EZ_DISABLED) {
if (job->first_ez_state == V3D_EZ_UNDECIDED &&
job->ez_state != V3D_EZ_DISABLED) {
job->first_ez_state = job->ez_state;
}
}
......@@ -3720,7 +3720,7 @@ emit_configuration_bits(struct v3dv_cmd_buffer *cmd_buffer)
v3dv_return_if_oom(cmd_buffer, NULL);
cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
config.early_z_enable = job->ez_state != VC5_EZ_DISABLED;
config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
config.early_z_updates_enable = config.early_z_enable &&
pipeline->z_updates_enable;
}
......
......@@ -1160,7 +1160,7 @@ v3dv_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
2.0 * max_fb_size - 1 },
.viewportSubPixelBits = 0,
.minMemoryMapAlignment = page_size,
.minTexelBufferOffsetAlignment = VC5_UIFBLOCK_SIZE,
.minTexelBufferOffsetAlignment = V3D_UIFBLOCK_SIZE,
.minUniformBufferOffsetAlignment = 32,
.minStorageBufferOffsetAlignment = 32,
.minTexelOffset = -8,
......
......@@ -135,21 +135,21 @@ v3d_setup_slices(struct v3dv_image *image)
level_height = DIV_ROUND_UP(level_height, block_height);
if (!image->tiled) {
slice->tiling = VC5_TILING_RASTER;
slice->tiling = V3D_TILING_RASTER;
if (image->type == VK_IMAGE_TYPE_1D)
level_width = align(level_width, 64 / image->cpp);
} else {
if ((i != 0 || !uif_top) &&
(level_width <= utile_w || level_height <= utile_h)) {
slice->tiling = VC5_TILING_LINEARTILE;
slice->tiling = V3D_TILING_LINEARTILE;
level_width = align(level_width, utile_w);
level_height = align(level_height, utile_h);
} else if ((i != 0 || !uif_top) && level_width <= uif_block_w) {
slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
slice->tiling = V3D_TILING_UBLINEAR_1_COLUMN;
level_width = align(level_width, uif_block_w);
level_height = align(level_height, uif_block_h);
} else if ((i != 0 || !uif_top) && level_width <= 2 * uif_block_w) {
slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
slice->tiling = V3D_TILING_UBLINEAR_2_COLUMN;
level_width = align(level_width, 2 * uif_block_w);
level_height = align(level_height, uif_block_h);
} else {
......@@ -167,10 +167,10 @@ v3d_setup_slices(struct v3dv_image *image)
* perfectly misaligned.
*/
if ((level_height / uif_block_h) %
(VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE) == 0) {
slice->tiling = VC5_TILING_UIF_XOR;
(V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE) == 0) {
slice->tiling = V3D_TILING_UIF_XOR;
} else {
slice->tiling = VC5_TILING_UIF_NO_XOR;
slice->tiling = V3D_TILING_UIF_NO_XOR;
}
}
}
......@@ -178,8 +178,8 @@ v3d_setup_slices(struct v3dv_image *image)
slice->offset = offset;
slice->stride = level_width * image->cpp;
slice->padded_height = level_height;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
slice->padded_height_of_output_image_in_uif_blocks =
slice->padded_height / (2 * v3d_utile_height(image->cpp));
}
......@@ -195,7 +195,7 @@ v3d_setup_slices(struct v3dv_image *image)
if (i == 1 &&
level_width > 4 * uif_block_w &&
level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
slice_total_size = align(slice_total_size, VC5_UIFCFG_PAGE_SIZE);
slice_total_size = align(slice_total_size, V3D_UIFCFG_PAGE_SIZE);
}
offset += slice_total_size;
......@@ -465,10 +465,10 @@ pack_texture_shader_state_helper(struct v3dv_device *device,
v3dv_pack(image_view->texture_shader_state[index], TEXTURE_SHADER_STATE, tex) {
tex.level_0_is_strictly_uif =
(image->slices[0].tiling == VC5_TILING_UIF_XOR ||
image->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
(image->slices[0].tiling == V3D_TILING_UIF_XOR ||
image->slices[0].tiling == V3D_TILING_UIF_NO_XOR);
tex.level_0_xor_enable = (image->slices[0].tiling == VC5_TILING_UIF_XOR);
tex.level_0_xor_enable = (image->slices[0].tiling == V3D_TILING_UIF_XOR);
if (tex.level_0_is_strictly_uif)
tex.level_0_ub_pad = image->slices[0].ub_pad;
......
......@@ -56,17 +56,17 @@
/* These are tunable parameters in the HW design, but all the V3D
* implementations agree.
*/
#define VC5_UIFCFG_BANKS 8
#define VC5_UIFCFG_PAGE_SIZE 4096
#define VC5_UIFCFG_XOR_VALUE (1 << 4)
#define VC5_PAGE_CACHE_SIZE (VC5_UIFCFG_PAGE_SIZE * VC5_UIFCFG_BANKS)
#define VC5_UBLOCK_SIZE 64
#define VC5_UIFBLOCK_SIZE (4 * VC5_UBLOCK_SIZE)
#define VC5_UIFBLOCK_ROW_SIZE (4 * VC5_UIFBLOCK_SIZE)
#define V3D_UIFCFG_BANKS 8
#define V3D_UIFCFG_PAGE_SIZE 4096
#define V3D_UIFCFG_XOR_VALUE (1 << 4)
#define V3D_PAGE_CACHE_SIZE (V3D_UIFCFG_PAGE_SIZE * V3D_UIFCFG_BANKS)
#define V3D_UBLOCK_SIZE 64
#define V3D_UIFBLOCK_SIZE (4 * V3D_UBLOCK_SIZE)
#define V3D_UIFBLOCK_ROW_SIZE (4 * V3D_UIFBLOCK_SIZE)
#define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
#define PAGE_UB_ROWS (V3D_UIFCFG_PAGE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
#define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
#define PAGE_CACHE_UB_ROWS (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
#define PAGE_CACHE_UB_ROWS (V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
#define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
......
......@@ -1171,11 +1171,11 @@ emit_tlb_clear_store(struct v3dv_cmd_buffer *cmd_buffer,
store.r_b_swap = iview->swap_rb;
store.memory_format = slice->tiling;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
store.height_in_ub_or_stride =
slice->padded_height_of_output_image_in_uif_blocks;
} else if (slice->tiling == VC5_TILING_RASTER) {
} else if (slice->tiling == V3D_TILING_RASTER) {
store.height_in_ub_or_stride = slice->stride;
}
......@@ -1427,8 +1427,8 @@ emit_tlb_clear_job(struct v3dv_cmd_buffer *cmd_buffer,
const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
uint32_t clear_pad = 0;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
int uif_block_height = v3d_utile_height(image->cpp) * 2;
uint32_t implicit_padded_height =
......
......@@ -438,8 +438,8 @@ emit_rcl_prologue(struct v3dv_job *job,
const struct v3dv_image *image = clear_info->image;
const struct v3d_resource_slice *slice =
&image->slices[clear_info->level];
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
int uif_block_height = v3d_utile_height(image->cpp) * 2;
uint32_t implicit_padded_height =
......@@ -585,7 +585,7 @@ emit_linear_load(struct v3dv_cl *cl,
load.buffer_to_load = buffer;
load.address = v3dv_cl_address(bo, offset);
load.input_image_format = format;
load.memory_format = VC5_TILING_RASTER;
load.memory_format = V3D_TILING_RASTER;
load.height_in_ub_or_stride = stride;
load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
}
......@@ -605,7 +605,7 @@ emit_linear_store(struct v3dv_cl *cl,
store.address = v3dv_cl_address(bo, offset);
store.clear_buffer_being_stored = false;
store.output_image_format = format;
store.memory_format = VC5_TILING_RASTER;
store.memory_format = V3D_TILING_RASTER;
store.height_in_ub_or_stride = stride;
store.decimate_mode = msaa ? V3D_DECIMATE_MODE_ALL_SAMPLES :
V3D_DECIMATE_MODE_SAMPLE_0;
......@@ -679,11 +679,11 @@ emit_image_load(struct v3dv_cl *cl,
load.r_b_swap = needs_rb_swap;
load.channel_reverse = needs_chan_reverse;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
load.height_in_ub_or_stride =
slice->padded_height_of_output_image_in_uif_blocks;
} else if (slice->tiling == VC5_TILING_RASTER) {
} else if (slice->tiling == V3D_TILING_RASTER) {
load.height_in_ub_or_stride = slice->stride;
}
......@@ -739,11 +739,11 @@ emit_image_store(struct v3dv_cl *cl,
is_copy_to_buffer,
is_copy_from_buffer);
store.memory_format = slice->tiling;
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
store.height_in_ub_or_stride =
slice->padded_height_of_output_image_in_uif_blocks;
} else if (slice->tiling == VC5_TILING_RASTER) {
} else if (slice->tiling == V3D_TILING_RASTER) {
store.height_in_ub_or_stride = slice->stride;
}
......@@ -1483,11 +1483,11 @@ emit_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
tfu.iia |= src_offset;
uint32_t icfg;
if (src_slice->tiling == VC5_TILING_RASTER) {
if (src_slice->tiling == V3D_TILING_RASTER) {
icfg = V3D_TFU_ICFG_FORMAT_RASTER;
} else {
icfg = V3D_TFU_ICFG_FORMAT_LINEARTILE +
(src_slice->tiling - VC5_TILING_LINEARTILE);
(src_slice->tiling - V3D_TILING_LINEARTILE);
}
tfu.icfg |= icfg << V3D_TFU_ICFG_FORMAT_SHIFT;
......@@ -1496,16 +1496,16 @@ emit_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
tfu.ioa |= dst_offset;
tfu.ioa |= (V3D_TFU_IOA_FORMAT_LINEARTILE +
(dst_slice->tiling - VC5_TILING_LINEARTILE)) <<
(dst_slice->tiling - V3D_TILING_LINEARTILE)) <<
V3D_TFU_IOA_FORMAT_SHIFT;
tfu.icfg |= format->tex_type << V3D_TFU_ICFG_TTYPE_SHIFT;
switch (src_slice->tiling) {
case VC5_TILING_UIF_NO_XOR:
case VC5_TILING_UIF_XOR:
case V3D_TILING_UIF_NO_XOR:
case V3D_TILING_UIF_XOR:
tfu.iis |= src_slice->padded_height / (2 * v3d_utile_height(src->cpp));
break;
case VC5_TILING_RASTER:
case V3D_TILING_RASTER:
tfu.iis |= src_slice->stride / src->cpp;
break;
default:
......@@ -1516,8 +1516,8 @@ emit_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
* OPAD field for the destination (how many extra UIF blocks beyond
* those necessary to cover the height).
*/
if (dst_slice->tiling == VC5_TILING_UIF_NO_XOR ||
dst_slice->tiling == VC5_TILING_UIF_XOR) {
if (dst_slice->tiling == V3D_TILING_UIF_NO_XOR ||
dst_slice->tiling == V3D_TILING_UIF_XOR) {
uint32_t uif_block_h = 2 * v3d_utile_height(dst->cpp);
uint32_t implicit_padded_height = align(height, uif_block_h);
uint32_t icfg =
......@@ -2671,7 +2671,7 @@ copy_buffer_to_image_tfu(struct v3dv_cmd_buffer *cmd_buffer,
tfu.ioa |= dst_offset;
tfu.ioa |= (V3D_TFU_IOA_FORMAT_LINEARTILE +
(slice->tiling - VC5_TILING_LINEARTILE)) <<
(slice->tiling - V3D_TILING_LINEARTILE)) <<
V3D_TFU_IOA_FORMAT_SHIFT;
tfu.icfg |= format->tex_type << V3D_TFU_ICFG_TTYPE_SHIFT;
......@@ -2679,8 +2679,8 @@ copy_buffer_to_image_tfu(struct v3dv_cmd_buffer *cmd_buffer,
* OPAD field for the destination (how many extra UIF blocks beyond
* those necessary to cover the height).
*/
if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
slice->tiling == VC5_TILING_UIF_XOR) {
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
slice->tiling == V3D_TILING_UIF_XOR) {
uint32_t uif_block_h = 2 * v3d_utile_height(image->cpp);
uint32_t implicit_padded_height = align(height, uif_block_h);
uint32_t icfg =
......
......@@ -2625,25 +2625,25 @@ pipeline_set_ez_state(struct v3dv_pipeline *pipeline,
const VkPipelineDepthStencilStateCreateInfo *ds_info)
{
if (!ds_info || !ds_info->depthTestEnable) {
pipeline->ez_state = VC5_EZ_DISABLED;
pipeline->ez_state = V3D_EZ_DISABLED;
return;
}
switch (ds_info->depthCompareOp) {
case VK_COMPARE_OP_LESS:
case VK_COMPARE_OP_LESS_OR_EQUAL:
pipeline->ez_state = VC5_EZ_LT_LE;
pipeline->ez_state = V3D_EZ_LT_LE;
break;
case VK_COMPARE_OP_GREATER:
case VK_COMPARE_OP_GREATER_OR_EQUAL:
pipeline->ez_state = VC5_EZ_GT_GE;
pipeline->ez_state = V3D_EZ_GT_GE;
break;
case VK_COMPARE_OP_NEVER:
case VK_COMPARE_OP_EQUAL:
pipeline->ez_state = VC5_EZ_UNDECIDED;
pipeline->ez_state = V3D_EZ_UNDECIDED;
break;
default:
pipeline->ez_state = VC5_EZ_DISABLED;
pipeline->ez_state = V3D_EZ_DISABLED;
break;
}
......@@ -2651,7 +2651,7 @@ pipeline_set_ez_state(struct v3dv_pipeline *pipeline,
if (ds_info->stencilTestEnable &&
(!stencil_op_is_no_op(&ds_info->front) ||
!stencil_op_is_no_op(&ds_info->back))) {
pipeline->ez_state = VC5_EZ_DISABLED;
pipeline->ez_state = V3D_EZ_DISABLED;
}
}
......
......@@ -457,26 +457,26 @@ struct v3dv_format {
*/
enum v3d_tiling_mode {
/* Untiled resources. Not valid as texture inputs. */
VC5_TILING_RASTER,
V3D_TILING_RASTER,
/* Single line of u-tiles. */
VC5_TILING_LINEARTILE,
V3D_TILING_LINEARTILE,
/* Departure from standard 4-UIF block column format. */
VC5_TILING_UBLINEAR_1_COLUMN,
V3D_TILING_UBLINEAR_1_COLUMN,
/* Departure from standard 4-UIF block column format. */
VC5_TILING_UBLINEAR_2_COLUMN,
V3D_TILING_UBLINEAR_2_COLUMN,
/* Normal tiling format: grouped in 4x4 UIFblocks, each of which is
* split 2x2 into utiles.
*/
VC5_TILING_UIF_NO_XOR,
V3D_TILING_UIF_NO_XOR,
/* Normal tiling format: grouped in 4x4 UIFblocks, each of which is
* split 2x2 into utiles.
*/
VC5_TILING_UIF_XOR,
V3D_TILING_UIF_XOR,
};
struct v3d_resource_slice {
......@@ -818,10 +818,10 @@ void v3dv_viewport_compute_xform(const VkViewport *viewport,
float translate[3]);
enum v3dv_ez_state {
VC5_EZ_UNDECIDED = 0,
VC5_EZ_GT_GE,
VC5_EZ_LT_LE,
VC5_EZ_DISABLED,
V3D_EZ_UNDECIDED = 0,
V3D_EZ_GT_GE,
V3D_EZ_LT_LE,
V3D_EZ_DISABLED,
};
enum v3dv_job_type {
......
Markdown is supported
0% or .
You are about to add 0 people to the d