diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8bf14d9762ecb9ca9e2a89761da0c8a2ce3cf6ed..231abc8976c52c4f075ac79b9dc83a2ff4611a9e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -135,6 +135,8 @@ struct mem_cgroup;
 void __init kmem_cache_init(void);
 bool slab_is_available(void);
 
+extern bool usercopy_fallback;
+
 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 			size_t align, slab_flags_t flags,
 			void (*ctor)(void *));
diff --git a/mm/slab.c b/mm/slab.c
index 1c02f6e942357f1c89edad77916612b5bcac569c..b9b0df620bb93309c81625adbfc4228dd3d4ef6f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4426,7 +4426,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 	 * to be a temporary method to find any missing usercopy
 	 * whitelists.
 	 */
-	if (offset <= cachep->object_size &&
+	if (usercopy_fallback &&
+	    offset <= cachep->object_size &&
 	    n <= cachep->object_size - offset) {
 		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
 		return;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index fc3e66bdce7514fb72a11b44fe26de89b95a1b62..a51f654086372e451bb1d0345ad71c24eec9b919 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -31,6 +31,14 @@ LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
 struct kmem_cache *kmem_cache;
 
+#ifdef CONFIG_HARDENED_USERCOPY
+bool usercopy_fallback __ro_after_init =
+		IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
+module_param(usercopy_fallback, bool, 0400);
+MODULE_PARM_DESC(usercopy_fallback,
+		"WARN instead of reject usercopy whitelist violations");
+#endif
+
 static LIST_HEAD(slab_caches_to_rcu_destroy);
 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
diff --git a/mm/slub.c b/mm/slub.c
index 6d9b1e7d3226bac24e48573e776864468814bf5d..862d835b30423d4e8a93bd49e948033823a01814 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3859,7 +3859,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 	 * whitelists.
 	 */
 	object_size = slab_ksize(s);
-	if (offset <= object_size && n <= object_size - offset) {
+	if (usercopy_fallback &&
+	    offset <= object_size && n <= object_size - offset) {
 		usercopy_warn("SLUB object", s->name, to_user, offset, n);
 		return;
 	}
diff --git a/security/Kconfig b/security/Kconfig
index e8e449444e658be4a9190c6ea2de14cca8fc4890..ae457b018da50b52317adcd6f4eab43fad78a1a6 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -152,6 +152,20 @@ config HARDENED_USERCOPY
 	  or are part of the kernel text. This kills entire classes
 	  of heap overflow exploits and similar kernel memory exposures.
 
+config HARDENED_USERCOPY_FALLBACK
+	bool "Allow usercopy whitelist violations to fallback to object size"
+	depends on HARDENED_USERCOPY
+	default y
+	help
+	  This is a temporary option that allows missing usercopy whitelists
+	  to be discovered via a WARN() to the kernel log, instead of
+	  rejecting the copy, falling back to non-whitelisted hardened
+	  usercopy that checks the slab allocation size instead of the
+	  whitelist size. This option will be removed once it seems like
+	  all missing usercopy whitelists have been identified and fixed.
+	  Booting with "slab_common.usercopy_fallback=Y/N" can change
+	  this setting.
+
 config HARDENED_USERCOPY_PAGESPAN
 	bool "Refuse to copy allocations that span multiple pages"
 	depends on HARDENED_USERCOPY