diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index a7cd2d203ceda64ebd711be273b97249921f1edf..747e489980f9f247a4ef90b07a71ab89ac937762 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -284,10 +284,10 @@ static void __maybe_unused throttle_active_work(struct work_struct *work) avg /= ARRAY_SIZE(state->temp_samples); if (state->average > avg) { - pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n", - this_cpu, - state->level == CORE_LEVEL ? "Core" : "Package", - state->count); + pr_notice("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n", + this_cpu, + state->level == CORE_LEVEL ? "Core" : "Package", + state->count); state->rate_control_active = true; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f546a5761c4f2c419ddb1d24fd0e8972ea633324..5381b6ebef5e0ae06817ea1867f1c072cfd190ec 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2048,7 +2048,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page) unsigned int err, i; if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) { - ata_dev_warn(dev, "ATA Identify Device Log not supported\n"); + ata_dev_notice(dev, "ATA Identify Device Log not supported\n"); return false; } @@ -2122,7 +2122,7 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev) unsigned int err_mask; if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) { - ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n"); + ata_dev_notice(dev, "NCQ Send/Recv Log not supported\n"); return; } err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, @@ -2151,8 +2151,8 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev) unsigned int err_mask; if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) { - ata_dev_warn(dev, - "NCQ Send/Recv Log not supported\n"); + ata_dev_notice(dev, + "NCQ Send/Recv Log not supported\n"); return; } err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, @@ -2484,14 +2484,14 @@ int ata_dev_configure(struct ata_device *dev) if (ata_id_is_cfa(id)) { /* CPRM may make this media unusable */ if (id[ATA_ID_CFA_KEY_MGMT] & 1) - ata_dev_warn(dev, + ata_dev_notice(dev, "supports DRM functions and may not be fully accessible\n"); snprintf(revbuf, 7, "CFA"); } else { snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); /* Warn the user if the device has TPM extensions */ if (ata_id_has_tpm(id)) - ata_dev_warn(dev, + ata_dev_notice(dev, "supports DRM functions and may not be fully accessible\n"); } @@ -2707,8 +2707,8 @@ int ata_dev_configure(struct ata_device *dev) } if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { - ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); - ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); + ata_dev_notice(dev, "WARNING: device requires firmware update to be fully functional\n"); + ata_dev_notice(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); } return 0; diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 1cb28c20807c59d3b61d78eb0895732dceccf760..206882e154bc6f220a54e919bee6bc0cf512172d 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -19,6 +19,14 @@ config DRM_I915_WERROR config DRM_I915_DEBUG bool "Enable additional driver debugging" depends on DRM_I915 + select PCI_MSI # ... for iommu enabled by default + select IOMMU_API + select IOMMU_IOVA + select IOMMU_SUPPORT + select NEED_DMA_MAP_STATE + select DMAR_TABLE + select INTEL_IOMMU + select INTEL_IOMMU_DEFAULT_ON select DEBUG_FS select PREEMPT_COUNT select I2C_CHARDEV @@ -35,6 +43,7 @@ config DRM_I915_DEBUG select DRM_I915_SELFTEST select DRM_I915_DEBUG_RUNTIME_PM select DRM_I915_DEBUG_MMIO + select BROKEN # for prototype uAPI default n help Choose this option to turn on extra driver debugging that may affect diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 87b17bac04c27c24622b825a4e1431817bc7cb16..3543d76c834048f3642f8c912dcb4e7695897cf7 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -443,8 +443,6 @@ static int __init intel_iommu_setup(char *str) no_platform_optin = 1; pr_info("IOMMU disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { - dmar_map_gfx = 0; - pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { pr_info("Forcing DAC for PCI devices\n"); dmar_forcedac = 1; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 30ae4ffda5c1ea89329f015e5c6d7870889264a6..e06435f7feba9de76b36c46eae64d63abe4f2b03 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1071,8 +1071,10 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (maxvec < minvec) return -ERANGE; - if (WARN_ON_ONCE(dev->msi_enabled)) + if (dev->msi_enabled) { + pci_info(dev, "can't enable MSI, already enabled\n"); return -EINVAL; + } nvec = pci_msi_vec_count(dev); if (nvec < 0) diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 50b2fc7fcc0e32576d4527ee08bb861ad97537b5..f511b8bf23caf334fe1bb12735cf168ba901bc84 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -12,6 +12,7 @@ #include <linux/acpi.h> #include <linux/pci.h> #include <linux/usb/hcd.h> +#include <linux/dmi.h> #include "hub.h" @@ -81,6 +82,20 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) } EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); +static const struct dmi_system_id intel_icl_broken_acpi[] = { + { + .ident = "ICL RVP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client Platform"), + }, + }, + + { } +}; + +static bool acpi_connection_type_broken; + static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle, struct acpi_pld_info *pld) { @@ -89,6 +104,10 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle, union acpi_object *upc = NULL; acpi_status status; + /* Work around unknown ACPI instruction error on ICL RVP BIOSes. */ + if (acpi_connection_type_broken) + return USB_PORT_CONNECT_TYPE_UNKNOWN; + /* * According to 9.14 in ACPI Spec 6.2. _PLD indicates whether usb port * is user visible and _UPC indicates whether it is connectable. If @@ -273,6 +292,11 @@ static struct acpi_bus_type usb_acpi_bus = { int usb_acpi_register(void) { + if (dmi_check_system(intel_icl_broken_acpi)) { + pr_info("USB ACPI connection type broken.\n"); + acpi_connection_type_broken = true; + } + return register_acpi_bus_type(&usb_acpi_bus); } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6a584b3e5c74f5bedf9c8cf1695e30585335770b..4894c89b16a81261b31eb0868fb72bfbd0a85d2e 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -134,8 +134,8 @@ struct held_lock { unsigned int read:2; /* see lock_acquire() comment */ unsigned int check:1; /* see lock_acquire() comment */ unsigned int hardirqs_off:1; - unsigned int references:12; /* 32 bits */ - unsigned int pin_count; + unsigned int pin_count:12; /* 32 bits */ + unsigned int references; }; /* diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 077e7ee69e3d8cb22a671817eececec4a5db7637..5522921593431a0e39cadb93a06b17472deb1486 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -145,10 +145,10 @@ enum perf_event_sample_format { PERF_SAMPLE_CGROUP = 1U << 21, PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */ - - __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; +#define __PERF_SAMPLE_CALLCHAIN_EARLY (1ULL << 63) /* non-ABI; internal use */ + /* * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set * diff --git a/kernel/events/core.c b/kernel/events/core.c index 7ed5248f0445eadeaac5711a25523978b47aae7e..030dea8a0fcb120b50773a1f74983f6c944908a9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5122,20 +5122,16 @@ static int __perf_read_group_add(struct perf_event *leader, } static int perf_read_group(struct perf_event *event, - u64 read_format, char __user *buf) + u64 read_format, char __user *buf, + u64 *values) { struct perf_event *leader = event->group_leader, *child; struct perf_event_context *ctx = leader->ctx; int ret; - u64 *values; lockdep_assert_held(&ctx->mutex); - values = kzalloc(event->read_size, GFP_KERNEL); - if (!values) - return -ENOMEM; - - values[0] = 1 + leader->nr_siblings; + *values = 1 + leader->nr_siblings; /* * By locking the child_mutex of the leader we effectively @@ -5153,25 +5149,17 @@ static int perf_read_group(struct perf_event *event, goto unlock; } - mutex_unlock(&leader->child_mutex); - ret = event->read_size; - if (copy_to_user(buf, values, event->read_size)) - ret = -EFAULT; - goto out; - unlock: mutex_unlock(&leader->child_mutex); -out: - kfree(values); return ret; } static int perf_read_one(struct perf_event *event, - u64 read_format, char __user *buf) + u64 read_format, char __user *buf, + u64 *values) { u64 enabled, running; - u64 values[4]; int n = 0; values[n++] = __perf_event_read_value(event, &enabled, &running); @@ -5182,9 +5170,6 @@ static int perf_read_one(struct perf_event *event, if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); - if (copy_to_user(buf, values, n * sizeof(u64))) - return -EFAULT; - return n * sizeof(u64); } @@ -5205,7 +5190,8 @@ static bool is_event_hup(struct perf_event *event) * Read the performance event - simple non blocking version for now */ static ssize_t -__perf_read(struct perf_event *event, char __user *buf, size_t count) +__perf_read(struct perf_event *event, char __user *buf, + size_t count, u64 *values) { u64 read_format = event->attr.read_format; int ret; @@ -5223,9 +5209,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count) WARN_ON_ONCE(event->ctx->parent_ctx); if (read_format & PERF_FORMAT_GROUP) - ret = perf_read_group(event, read_format, buf); + ret = perf_read_group(event, read_format, buf, values); else - ret = perf_read_one(event, read_format, buf); + ret = perf_read_one(event, read_format, buf, values); return ret; } @@ -5235,16 +5221,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; struct perf_event_context *ctx; + u64 stack_values[8]; + u64 *values; int ret; ret = security_perf_event_read(event); if (ret) return ret; + if (event->read_size <= sizeof(stack_values)) + values = memset(stack_values, 0, event->read_size); + else + values = kzalloc(event->read_size, GFP_KERNEL); + if (!values) + return -ENOMEM; + ctx = perf_event_ctx_lock(event); - ret = __perf_read(event, buf, count); + ret = __perf_read(event, buf, count, values); perf_event_ctx_unlock(event, ctx); + if (ret > 0 && copy_to_user(buf, values, ret)) + ret = -EFAULT; + + if (values != stack_values) + kfree(values); + return ret; } @@ -10860,7 +10861,8 @@ void perf_pmu_unregister(struct pmu *pmu) device_del(pmu->dev); put_device(pmu->dev); } - free_pmu_context(pmu); + if (!find_pmu_context(pmu->task_ctx_nr)) + free_pmu_context(pmu); mutex_unlock(&pmus_lock); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index ce76f490126cccb310750c6b32f2a7b67bd5eb60..cfb630b1ec9fc31f662d3fd40e9c8b4681cf8366 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -117,6 +117,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) console_verbose(); hung_task_show_lock = true; hung_task_call_panic = true; + } else { + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } /* diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 54b74fabf40c7bd88633b717423403add6911d8c..b53018f877ba2444eeddfaad87178031cbcb6188 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4801,11 +4801,14 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) if (match_held_lock(hlock, lock)) { /* - * Grab 16bits of randomness; this is sufficient to not - * be guessable and still allows some pin nesting in - * our u32 pin_count. + * Grab 6bits of randomness; this is barely sufficient + * to not be guessable and still allows some 32 levels + * of pin nesting in our u12 pin_count. */ - cookie.val = 1 + (prandom_u32() >> 16); + cookie.val = 1 + (prandom_u32() >> 26); + if (DEBUG_LOCKS_WARN_ON(hlock->pin_count + cookie.val >= 1 << 12)) + return NIL_COOKIE; + hlock->pin_count += cookie.val; return cookie; } diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index baca699b94e91d0026e86700b6f8678a6629d111..9b9f3a4db771562a54ecd2b57dada3ed62fcc43d 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -96,13 +96,13 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #else #define MAX_LOCKDEP_ENTRIES 32768UL -#define MAX_LOCKDEP_CHAINS_BITS 16 +#define MAX_LOCKDEP_CHAINS_BITS 17 /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ -#define MAX_STACK_TRACE_ENTRIES 524288UL +#define MAX_STACK_TRACE_ENTRIES 1048576UL #define STACK_TRACE_HASH_SIZE 16384 #endif diff --git a/kernel/panic.c b/kernel/panic.c index aef8872ba8435d93feacf48eecc337a100e0f472..b94890ddd996153d7548aba2c7ef000ae6e43336 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -223,13 +223,6 @@ void panic(const char *fmt, ...) buf[len - 1] = '\0'; pr_emerg("Kernel panic - not syncing: %s\n", buf); -#ifdef CONFIG_DEBUG_BUGVERBOSE - /* - * Avoid nested stack-dumping if a panic occurs during oops processing - */ - if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) - dump_stack(); -#endif /* * If kgdb is enabled, give it a chance to run before we stop all @@ -271,6 +264,14 @@ void panic(const char *fmt, ...) */ atomic_notifier_call_chain(&panic_notifier_list, 0, buf); +#ifdef CONFIG_DEBUG_BUGVERBOSE + /* + * Avoid nested stack-dumping if a panic occurs during oops processing + */ + if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) + dump_stack(); +#endif + /* Call flush even twice. It tries harder with a single online CPU */ printk_safe_flush_on_panic(); kmsg_dump(KMSG_DUMP_PANIC); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2d95dc3f46444ece3e21feee786be746c131b512..65391a7a1b7fb8fbb2a267043010d11d8cdb4041 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2667,7 +2667,7 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags) static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) { if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { - if (WARN_ON_ONCE(cpu == smp_processor_id())) + if (WARN_ON_ONCE(p->on_cpu && cpu == smp_processor_id())) return false; sched_clock_cpu(cpu); /* Sync clocks across CPUs */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a50364df10543912c0bfc9f6f40d4c0470de5cc5..5af9a8c2c963a3cf5d0bb5d3e06c11eb21e6cf2d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -615,7 +615,14 @@ static struct debug_obj_descr timer_debug_descr; static void *timer_debug_hint(void *addr) { - return ((struct timer_list *) addr)->function; + struct timer_list *timer = addr; + + if (timer->function == delayed_work_timer_fn) { + struct delayed_work *work = from_timer(work, timer, timer); + return work->work.func; + } + + return timer->function; } static bool timer_is_static_object(void *addr) diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a4020c0b4508c9977702dd71823a1c9756217872..7f7366d344bf2228ad946642056b6144ae0a1061 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -107,6 +107,13 @@ config TRACING select EVENT_TRACING select TRACE_CLOCK +config GLOBAL_TRACE_BUF_SIZE + int + prompt "Global ftrace buffer size (for trace_printk)" if EXPERT + range 0 4194034 + default 1441792 # 16384 * 88 (sizeof(struct print_entry)) + depends on TRACING + config GENERIC_TRACER bool select TRACING diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f40d850ebabcc7bb60c47c57898d176fec09dd1e..280c533076cbea547b1089cf430a8a93653c8ba6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -658,9 +658,7 @@ int tracing_is_enabled(void) * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ -#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ - -static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; +static unsigned long trace_buf_size = CONFIG_GLOBAL_TRACE_BUF_SIZE; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5abb5b22ad1308844ee0dd790b3927cf5c2d5e28..f423f16fe87bdc24a01096afeb6f21f600ca93c9 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -433,6 +433,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); if (softlockup_panic) panic("softlockup: hung tasks"); + else + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); __this_cpu_write(soft_watchdog_warn, true); } else __this_cpu_write(soft_watchdog_warn, false); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 247bf0b1582ca1cf352f006aa1fd5f689f8f5859..cce46cf75d762095f069cb0b76a82d2d29d5b259 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -154,6 +154,8 @@ static void watchdog_overflow_callback(struct perf_event *event, if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); + else + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); __this_cpu_write(hard_watchdog_warn, true); return; diff --git a/mm/slub.c b/mm/slub.c index d4177aecedf6bb7cfd45b74b61b8ce2243eebc68..9fa981f4a1cf9d7b9efdb4132e209e848f9b1974 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -538,7 +538,7 @@ static void print_section(char *level, char *text, u8 *addr, unsigned int length) { metadata_access_enable(); - print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, + print_hex_dump(level, text, DUMP_PREFIX_OFFSET, 16, 1, addr, length, 1); metadata_access_disable(); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 265a61d011dfaa7ec0f8fb8aaede920784f690c9..940e237a26b43f3f2c01b137d6cd933650bd49cf 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -437,7 +437,12 @@ static void dev_watchdog(struct timer_list *t) } } - if (some_queue_timedout) { + /* The noise is pissing off our CI and upstream doesn't + * move on the bug report: + * + * https://bugzilla.kernel.org/show_bug.cgi?id=196399 + */ + if (some_queue_timedout && 0) { trace_net_dev_xmit_timeout(dev, i); WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", dev->name, netdev_drivername(dev), i);