Trivial.
---
src/amd/vulkan/radv_cmd_buffer.c | 4 ++--
src/amd/vulkan/radv_descriptor_set.c | 2 +-
src/amd/vulkan/radv_device.c | 6 +++---
src/amd/vulkan/radv_entrypoints_gen.py | 2 +-
src/amd/vulkan/radv_image.c | 4 ++--
src/amd/vulkan/radv_nir_to_llvm.c | 4 ++--
6 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index baa28d4..1ca6874 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -236,11 +236,11 @@ static VkResult radv_create_cmd_buffer(
if (pool) {
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
cmd_buffer->queue_family_index = pool->queue_family_index;
} else {
- /* Init the pool_link so we can safefly call list_del when we
destroy
+ /* Init the pool_link so we can safely call list_del when we
destroy
* the command buffer
*/
list_inithead(&cmd_buffer->pool_link);
cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
}
@@ -1154,11 +1154,11 @@ radv_load_depth_clear_regs(struct radv_cmd_buffer
*cmd_buffer,
radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cmd_buffer->cs, 0);
}
/*
- *with DCC some colors don't require CMASK elimiation before being
+ * With DCC some colors don't require CMASK elimination before being
* used as a texture. This sets a predicate value to determine if the
* cmask eliminate is required.
*/
void
radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
diff --git a/src/amd/vulkan/radv_descriptor_set.c
b/src/amd/vulkan/radv_descriptor_set.c
index 4b08a1f..9d783b8 100644
--- a/src/amd/vulkan/radv_descriptor_set.c
+++ b/src/amd/vulkan/radv_descriptor_set.c
@@ -320,11 +320,11 @@ void radv_GetDescriptorSetLayoutSupport(VkDevice device,
pSupport->supported = supported;
}
/*
* Pipeline layouts. These have nothing to do with the pipeline. They are
- * just muttiple descriptor set layouts pasted together
+ * just multiple descriptor set layouts pasted together.
*/
VkResult radv_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 9fe415c..08795dd 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -276,11 +276,11 @@ radv_physical_device_init(struct radv_physical_device
*device,
/* These flags affect shader compilation. */
uint64_t shader_env_flags =
(device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1
: 0) |
(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 :
0);
- /* The gpu id is already embeded in the uuid so we just pass "radv"
+ /* The gpu id is already embedded in the uuid so we just pass "radv"
* when creating the cache.
*/
char buf[VK_UUID_SIZE * 2 + 1];
disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
device->disk_cache = disk_cache_create(device->name, buf,
shader_env_flags);
@@ -298,11 +298,11 @@ radv_physical_device_init(struct radv_physical_device
*device,
device->rbplus_allowed = device->rad_info.family == CHIP_STONEY
||
device->rad_info.family == CHIP_VEGA12
||
device->rad_info.family == CHIP_RAVEN;
}
- /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
+ /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
* on SI.
*/
device->has_clear_state = device->rad_info.chip_class >= CIK;
device->cpdma_prefetch_writes_memory = device->rad_info.chip_class <= VI;
@@ -1464,11 +1464,11 @@ VkResult radv_CreateDevice(
/* The maximum number of scratch waves. Scratch space isn't divided
* evenly between CUs. The number is only a function of the number of
CUs.
* We can decrease the constant to decrease the scratch buffer size.
*
- * sctx->scratch_waves must be >= the maximum posible size of
+ * sctx->scratch_waves must be >= the maximum possible size of
* 1 threadgroup, so that the hw doesn't hang from being unable
* to start any.
*
* The recommended value is 4 per CU at most. Higher numbers don't
* bring much benefit, but they still occupy chip resources (think
diff --git a/src/amd/vulkan/radv_entrypoints_gen.py
b/src/amd/vulkan/radv_entrypoints_gen.py
index 892491e..a201142 100644
--- a/src/amd/vulkan/radv_entrypoints_gen.py
+++ b/src/amd/vulkan/radv_entrypoints_gen.py
@@ -114,11 +114,11 @@ struct string_map_entry {
uint32_t name;
uint32_t hash;
uint32_t num;
};
-/* We use a big string constant to avoid lots of reloctions from the entry
+/* We use a big string constant to avoid lots of relocations from the entry
* point table to lots of little strings. The entries in the entry point table
* store the index into this big string.
*/
static const char strings[] =
diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c
index bfe497c..f735fb9 100644
--- a/src/amd/vulkan/radv_image.c
+++ b/src/amd/vulkan/radv_image.c
@@ -412,11 +412,11 @@ static unsigned radv_tex_dim(VkImageType image_type,
VkImageViewType view_type,
if (view_type == VK_IMAGE_VIEW_TYPE_3D)
return V_008F1C_SQ_RSRC_IMG_3D;
else
return V_008F1C_SQ_RSRC_IMG_2D_ARRAY;
default:
- unreachable("illegale image type");
+ unreachable("illegal image type");
}
}
static unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle[4])
{
@@ -532,11 +532,11 @@ si_make_texture_descriptor(struct radv_device *device,
state[7] = 0;
if (device->physical_device->rad_info.chip_class >= GFX9) {
unsigned bc_swizzle = gfx9_border_color_swizzle(swizzle);
- /* Depth is the the last accessible layer on Gfx9.
+ /* Depth is the last accessible layer on Gfx9.
* The hw doesn't need to know the total number of layers.
*/
if (type == V_008F1C_SQ_RSRC_IMG_3D)
state[4] |= S_008F20_DEPTH(depth - 1);
else
diff --git a/src/amd/vulkan/radv_nir_to_llvm.c
b/src/amd/vulkan/radv_nir_to_llvm.c
index a6b48e2..e2d241e 100644
--- a/src/amd/vulkan/radv_nir_to_llvm.c
+++ b/src/amd/vulkan/radv_nir_to_llvm.c
@@ -2520,11 +2520,11 @@ ac_build_insert_new_block(struct radv_shader_context
*ctx, const char *name)
LLVMBasicBlockRef new_block;
/* get current basic block */
current_block = LLVMGetInsertBlock(ctx->ac.builder);
- /* chqeck if there's another block after this one */
+ /* check if there's another block after this one */
next_block = LLVMGetNextBasicBlock(current_block);
if (next_block) {
/* insert the new block before the next block */
new_block = LLVMInsertBasicBlockInContext(ctx->context,
next_block, name);
}
@@ -2645,11 +2645,11 @@ write_tess_factors(struct radv_shader_context *ctx)
for (i = 0; i < 4; i++) {
inner[i] = LLVMGetUndef(ctx->ac.i32);
outer[i] = LLVMGetUndef(ctx->ac.i32);
}
- // LINES reverseal
+ // LINES reversal
if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
ctx->ac.i32_1, "");
outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);