Commit d34f3a1d authored by Maíra Canal's avatar Maíra Canal Committed by Marge Bot
Browse files

v3dv: fix multiple typos



Signed-off-by: Maíra Canal's avatarMaíra Canal <mcanal@igalia.com>
Reviewed-by: Alejandro Piñeiro's avatarAlejandro Piñeiro <apinheiro@igalia.com>
Part-of: <!19538>
parent 67ffe25f
Pipeline #743344 waiting for manual action with stages
......@@ -493,7 +493,7 @@ v3dv_job_start_frame(struct v3dv_job *job,
static bool
job_should_enable_double_buffer(struct v3dv_job *job)
{
/* Inocmpatibility with double-buffer */
/* Incompatibility with double-buffer */
if (!job->can_use_double_buffer)
return false;
......@@ -1686,7 +1686,7 @@ v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer *cmd_buffer,
*
* Secondary command buffers don't start subpasses (and may not even have
* framebuffer state), so we only care about this in primaries. The only
* exception could be a secondary runnning inside a subpass that needs to
* exception could be a secondary running inside a subpass that needs to
* record a meta operation (with its own render pass) that relies on
* attachment load clears, but we don't have any instances of that right
* now.
......@@ -2580,7 +2580,7 @@ cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer *cmd_buffer)
* in rasterization."
*
* We need to enable MSAA in the TILE_BINNING_MODE_CFG packet, which we
* emit when we start a new frame at the begining of a subpass. At that point,
* emit when we start a new frame at the beginning of a subpass. At that point,
* if the framebuffer doesn't have any attachments we won't enable MSAA and
* the job won't be valid in the scenario described by the spec.
*
......@@ -3351,7 +3351,7 @@ v3dv_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
float minDepthBounds,
float maxDepthBounds)
{
/* We do not support depth bounds testing so we just ingore this. We are
/* We do not support depth bounds testing so we just ignore this. We are
* already asserting that pipelines don't enable the feature anyway.
*/
}
......@@ -3622,7 +3622,7 @@ v3dv_cmd_buffer_schedule_end_query(struct v3dv_cmd_buffer *cmd_buffer,
*
* In our case, only the first query is used but this means we still need
* to flag the other queries as available so we don't emit errors when
* the applications attempt to retrive values from them.
* the applications attempt to retrieve values from them.
*/
struct v3dv_render_pass *pass = cmd_buffer->state.pass;
if (!pass->multiview_enabled) {
......
......@@ -187,7 +187,7 @@ v3d_setup_slices(struct v3dv_image *image)
/* The HW aligns level 1's base to a page if any of level 1 or
* below could be UIF XOR. The lower levels then inherit the
* alignment for as long as necesary, thanks to being power of
* alignment for as long as necessary, thanks to being power of
* two aligned.
*/
if (i == 1 &&
......
......@@ -1318,7 +1318,7 @@ copy_buffer_to_image_tfu(struct v3dv_cmd_buffer *cmd_buffer,
* at a time, and the TFU copies full images. Also, V3D depth bits for
* both D24S8 and D24X8 stored in the 24-bit MSB of each 32-bit word, but
* the Vulkan spec has the buffer data specified the other way around, so it
* is not a straight copy, we would havew to swizzle the channels, which the
* is not a straight copy, we would have to swizzle the channels, which the
* TFU can't do.
*/
if (image->vk.format == VK_FORMAT_D24_UNORM_S8_UINT ||
......
......@@ -3322,7 +3322,7 @@ pipeline_get_qpu(struct v3dv_pipeline *pipeline,
}
/* FIXME: we use the same macro in various drivers, maybe move it to
* the comon vk_util.h?
* the common vk_util.h?
*/
#define WRITE_STR(field, ...) ({ \
memset(field, 0, sizeof(field)); \
......
......@@ -243,7 +243,7 @@ v3dv_pipeline_shared_data_write_to_blob(const struct v3dv_pipeline_shared_data *
struct blob *blob);
/**
* It searchs for pipeline cached data, and returns a v3dv_pipeline_shared_data with
* It searches for pipeline cached data, and returns a v3dv_pipeline_shared_data with
* it, or NULL if doesn't have it cached. On the former, it will increases the
* ref_count, so caller is responsible to unref it.
*/
......
......@@ -177,10 +177,10 @@ struct v3dv_physical_device {
* Specifically, when self-importing (i.e. importing a BO into the same
* device that created it), the kernel will give us the same BO handle
* for both BOs and we must only free it once when both references are
* freed. Otherwise, if we are not self-importing, we get two differnt BO
* freed. Otherwise, if we are not self-importing, we get two different BO
* handles, and we want to free each one individually.
*
* The BOs in this map all have a refcnt with the referece counter and
* The BOs in this map all have a refcnt with the reference counter and
* only self-imported BOs will ever have a refcnt > 1.
*/
struct util_sparse_array bo_map;
......@@ -558,7 +558,7 @@ struct v3dv_device {
struct v3dv_pipeline_cache default_pipeline_cache;
/* GL_SHADER_STATE_RECORD needs to speficy default attribute values. The
/* GL_SHADER_STATE_RECORD needs to specify default attribute values. The
* following covers the most common case, that is all attributes format
* being float being float, allowing us to reuse the same BO for all
* pipelines matching this requirement. Pipelines that need integer
......@@ -1568,7 +1568,7 @@ struct v3dv_cmd_buffer {
/* Used at submit time to link command buffers in the submission that have
* spawned wait threads, so we can then wait on all of them to complete
* before we process any signal sempahores or fences.
* before we process any signal semaphores or fences.
*/
struct list_head list_link;
......@@ -1827,7 +1827,7 @@ struct v3dv_descriptor_set_binding_layout {
/* Number of array elements in this binding */
uint32_t array_size;
/* Index into the flattend descriptor set */
/* Index into the flattened descriptor set */
uint32_t descriptor_index;
uint32_t dynamic_offset_count;
......@@ -2472,7 +2472,7 @@ u64_compare(const void *key1, const void *key2)
return memcmp(key1, key2, sizeof(uint64_t)) == 0;
}
/* Helper to call hw ver speficic functions */
/* Helper to call hw ver specific functions */
#define v3dv_X(device, thing) ({ \
__typeof(&v3d42_##thing) v3d_X_thing; \
switch (device->devinfo.ver) { \
......
......@@ -387,7 +387,7 @@ check_needs_clear(const struct v3dv_cmd_buffer_state *state,
if (state->job->is_subpass_continue)
return false;
/* If the render area is not aligned to tile boudaries we can't use the
/* If the render area is not aligned to tile boundaries we can't use the
* TLB for a clear.
*/
if (!state->tile_aligned_render_area)
......@@ -787,7 +787,7 @@ v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
struct v3dv_cl *rcl = &job->rcl;
/* Comon config must be the first TILE_RENDERING_MODE_CFG and
/* Common config must be the first TILE_RENDERING_MODE_CFG and
* Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
* updates to the previous HW state.
*/
......
......@@ -51,7 +51,7 @@ v3dX(descriptor_bo_size)(VkDescriptorType type)
}
/* To compute the max_bo_size we want to iterate through the descriptor
* types. Unfourtunately we can't just use the descriptor type enum values, as
* types. Unfortunately we can't just use the descriptor type enum values, as
* the values are not defined consecutively (so extensions could add new
* descriptor types), and VK_DESCRIPTOR_TYPE_MAX_ENUM is also a really big
* number.
......
......@@ -178,7 +178,7 @@ pack_cfg_bits(struct v3dv_pipeline *pipeline,
rs_info->polygonMode == VK_POLYGON_MODE_POINT;
}
/* diamond-exit rasterization does not suport oversample */
/* diamond-exit rasterization does not support oversample */
config.rasterizer_oversample_mode =
(config.line_rasterization == V3D_LINE_RASTERIZATION_PERP_END_CAPS &&
pipeline->msaa) ? 1 : 0;
......@@ -435,7 +435,7 @@ pack_shader_state_record(struct v3dv_pipeline *pipeline)
shader.vertex_shader_propagate_nans = true;
shader.fragment_shader_propagate_nans = true;
/* Note: see previous note about adresses */
/* Note: see previous note about addresses */
/* shader.coordinate_shader_code_address */
/* shader.vertex_shader_code_address */
/* shader.fragment_shader_code_address */
......@@ -460,7 +460,7 @@ pack_shader_state_record(struct v3dv_pipeline *pipeline)
shader.vertex_shader_output_vpm_segment_size =
prog_data_vs->vpm_output_size;
/* Note: see previous note about adresses */
/* Note: see previous note about addresses */
/* shader.coordinate_shader_uniforms_address */
/* shader.vertex_shader_uniforms_address */
/* shader.fragment_shader_uniforms_address */
......@@ -502,7 +502,7 @@ pack_shader_state_record(struct v3dv_pipeline *pipeline)
shader.instance_id_read_by_vertex_shader =
prog_data_vs->uses_iid;
/* Note: see previous note about adresses */
/* Note: see previous note about addresses */
/* shader.address_of_default_attribute_values */
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment