diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index fcd7d2eb90abe7b9d125d40abefca4babdbcf8b1..5f42e44b2ab356407365db02bef3bd1b3144c8e1 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -32,6 +32,10 @@
  * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  * objects that are physically contiguous in memory. This is useful for
  * display drivers that are unable to map scattered buffers via an IOMMU.
+ *
+ * For GEM callback helpers in struct &drm_gem_object functions, see likewise
+ * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
+ * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
  */
 
 static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
@@ -192,16 +196,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
 }
 
 /**
- * drm_gem_cma_free_object - free resources associated with a CMA GEM object
- * @gem_obj: GEM object to free
+ * drm_gem_cma_free - free resources associated with a CMA GEM object
+ * @cma_obj: CMA GEM object to free
  *
  * This function frees the backing memory of the CMA GEM object, cleans up the
  * GEM object state and frees the memory used to store the object itself.
  * If the buffer is imported and the virtual address is set, it is released.
  */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
 {
-	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+	struct drm_gem_object *gem_obj = &cma_obj->base;
 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
 
 	if (gem_obj->import_attach) {
@@ -217,7 +221,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 
 	kfree(cma_obj);
 }
-EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+EXPORT_SYMBOL_GPL(drm_gem_cma_free);
 
 /**
  * drm_gem_cma_dumb_create_internal - create a dumb buffer object
@@ -364,18 +368,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
 
 /**
  * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @cma_obj: CMA GEM object
  * @p: DRM printer
  * @indent: Tab indentation level
- * @obj: GEM object
  *
- * This function can be used as the &drm_driver->gem_print_info callback.
- * It prints paddr and vaddr for use in e.g. debugfs output.
+ * This function prints paddr and vaddr for use in e.g. debugfs output.
  */
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
-			    const struct drm_gem_object *obj)
+void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+			    struct drm_printer *p, unsigned int indent)
 {
-	const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
 	drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
 	drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
 }
@@ -384,7 +385,7 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
 /**
  * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
  *     pages for a CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
  *
  * This function exports a scatter/gather table by calling the standard
  * DMA mapping API.
@@ -392,9 +393,9 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
  * Returns:
  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  */
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
 {
-	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+	struct drm_gem_object *obj = &cma_obj->base;
 	struct sg_table *sgt;
 	int ret;
 
@@ -460,22 +461,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
 /**
  * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
  *     address space
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
  * @map: Returns the kernel virtual address of the CMA GEM object's backing
  *       store.
  *
- * This function maps a buffer into the kernel's
- * virtual address space. Since the CMA buffers are already mapped into the
- * kernel virtual address space this simply returns the cached virtual
- * address.
+ * This function maps a buffer into the kernel's virtual address space.
+ * Since the CMA buffers are already mapped into the kernel virtual address
+ * space this simply returns the cached virtual address.
  *
  * Returns:
  * 0 on success, or a negative error code otherwise.
  */
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map)
 {
-	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
 	dma_buf_map_set_vaddr(map, cma_obj->vaddr);
 
 	return 0;
@@ -484,7 +482,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
 
 /**
  * drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
  * @vma: VMA for the area to be mapped
  *
  * This function maps a buffer into a userspace process's address space.
@@ -494,9 +492,9 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
  * Returns:
  * 0 on success or a negative error code on failure.
  */
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
 {
-	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *obj = &cma_obj->base;
 	int ret;
 
 	/*
@@ -507,8 +505,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
 	vma->vm_flags &= ~VM_PFNMAP;
 
-	cma_obj = to_drm_gem_cma_obj(obj);
-
 	if (cma_obj->map_noncoherent) {
 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 830756b3159e00c9fa5b123666af08f97f03c4cd..6d1281a343e9aca123f9581e310be9fe4315c1e7 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
 		bo->validated_shader = NULL;
 	}
 
-	drm_gem_cma_free_object(obj);
+	drm_gem_cma_free(&bo->base);
 }
 
 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
 		return -EINVAL;
 	}
 
-	return drm_gem_cma_mmap(obj, vma);
+	return drm_gem_cma_mmap(&bo->base, vma);
 }
 
 static const struct vm_operations_struct vc4_vm_ops = {
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 56d2f9fdf9aceabff4908447c4efd23356ff6d1e..adb507a9dbf099c56c0a8d356c4d5b96d4a251f1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -32,28 +32,23 @@ struct drm_gem_cma_object {
 #define to_drm_gem_cma_obj(gem_obj) \
 	container_of(gem_obj, struct drm_gem_cma_object, base)
 
-/* free GEM object */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-
-/* allocate physical memory */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
 					      size_t size);
+void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
+void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+			    struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
+int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map);
+int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
 
 extern const struct vm_operations_struct drm_gem_cma_vm_ops;
 
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
-			    const struct drm_gem_object *obj);
-
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
 /*
  * GEM object functions
  */
 
 /**
- * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free_object()
+ * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
  * @obj: GEM object to free
  *
  * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
@@ -61,7 +56,9 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  */
 static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
 {
-	drm_gem_cma_free_object(obj);
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	drm_gem_cma_free(cma_obj);
 }
 
 /**
@@ -76,7 +73,9 @@ static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
 static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
 						 const struct drm_gem_object *obj)
 {
-	drm_gem_cma_print_info(p, indent, obj);
+	const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	drm_gem_cma_print_info(cma_obj, p, indent);
 }
 
 /**
@@ -91,7 +90,9 @@ static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned
  */
 static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
 {
-	return drm_gem_cma_get_sg_table(obj);
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	return drm_gem_cma_get_sg_table(cma_obj);
 }
 
 /*
@@ -107,7 +108,9 @@ static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_ob
  */
 static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 {
-	return drm_gem_cma_vmap(obj, map);
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	return drm_gem_cma_vmap(cma_obj, map);
 }
 
 /**
@@ -123,7 +126,9 @@ static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma
  */
 static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
-	return drm_gem_cma_mmap(obj, vma);
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	return drm_gem_cma_mmap(cma_obj, vma);
 }
 
 /*